Jelajahi Sumber

HUE-8925 [fb] Support user credentials from IDBroker for S3

Jean-Francois Desjeans Gauthier 6 tahun lalu
induk
melakukan
e1d0e9f78b

+ 3 - 4
apps/filebrowser/src/filebrowser/api.py

@@ -18,7 +18,7 @@
 import logging
 import logging
 
 
 from desktop.lib.django_util import JsonResponse
 from desktop.lib.django_util import JsonResponse
-from desktop.lib.fsmanager import FS_GETTERS
+from desktop.lib import fsmanager
 from desktop.lib.i18n import smart_unicode
 from desktop.lib.i18n import smart_unicode
 
 
 from aws.conf import has_s3_access
 from aws.conf import has_s3_access
@@ -45,9 +45,8 @@ def get_filesystems(request):
   response = {}
   response = {}
 
 
   filesystems = {}
   filesystems = {}
-  for k, v in list(FS_GETTERS.items()):
-    if not k.startswith('s3') or has_s3_access(request.user):
-      filesystems[k] = v is not None
+  for k in fsmanager.get_filesystems(request.user):
+    filesystems[k] = True
 
 
   response['status'] = 0
   response['status'] = 0
   response['filesystems'] = filesystems
   response['filesystems'] = filesystems

+ 4 - 3
apps/filebrowser/src/filebrowser/settings.py

@@ -21,11 +21,12 @@ REQUIRES_HADOOP = False
 ICON = "filebrowser/art/icon_filebrowser_48.png"
 ICON = "filebrowser/art/icon_filebrowser_48.png"
 MENU_INDEX = 20
 MENU_INDEX = 20
 
 
-from aws.s3.s3fs import PERMISSION_ACTION_S3
-from azure.adls.webhdfs import PERMISSION_ACTION_ADLS
+from aws.conf import PERMISSION_ACTION_S3
+from azure.conf import PERMISSION_ACTION_ADLS, PERMISSION_ACTION_ABFS
 
 
 
 
 PERMISSION_ACTIONS = (
 PERMISSION_ACTIONS = (
   (PERMISSION_ACTION_S3, "Access to S3 from filebrowser and filepicker."),
   (PERMISSION_ACTION_S3, "Access to S3 from filebrowser and filepicker."),
-  (PERMISSION_ACTION_ADLS, "Access to ADLS from filebrowser and filepicker.")
+  (PERMISSION_ACTION_ADLS, "Access to ADLS from filebrowser and filepicker."),
+  (PERMISSION_ACTION_ABFS, "Access to ABFS from filebrowser and filepicker.")
 )
 )

+ 3 - 3
apps/filebrowser/src/filebrowser/templates/fb_components.mako

@@ -21,7 +21,7 @@ from desktop.lib.paths import SAFE_CHARACTERS_URI_COMPONENTS
 from django.template.defaultfilters import urlencode, stringformat, date, filesizeformat, time
 from django.template.defaultfilters import urlencode, stringformat, date, filesizeformat, time
 from django.utils.translation import ugettext as _
 from django.utils.translation import ugettext as _
 
 
-from aws import get_client
+from aws.conf import get_default_region
 %>
 %>
 
 
 <%def name="breadcrumbs(path, breadcrumbs, from_listdir=False)">
 <%def name="breadcrumbs(path, breadcrumbs, from_listdir=False)">
@@ -29,8 +29,8 @@ from aws import get_client
       <ul class="nav nav-pills hue-breadcrumbs-bar">
       <ul class="nav nav-pills hue-breadcrumbs-bar">
         %if path.lower().find('s3a://') == 0:
         %if path.lower().find('s3a://') == 0:
           <li style="padding-top: 10px">
           <li style="padding-top: 10px">
-            <span class="breadcrumb-link homeLink" title="${ _('S3 region %s') % get_client()._region }">
-              <i class="fa fa-fw fa-cubes"></i> ${ get_client()._region }
+            <span class="breadcrumb-link homeLink" title="${ _('S3 region %s') % get_default_region() }">
+              <i class="fa fa-fw fa-cubes"></i> ${ get_default_region() }
             </span>
             </span>
           </li>
           </li>
         %elif path.lower().find('adl:/') == 0:
         %elif path.lower().find('adl:/') == 0:

+ 1 - 0
apps/useradmin/src/useradmin/models.py

@@ -281,6 +281,7 @@ def update_app_permissions(**kwargs):
            not (new_dp.app == 'security' and new_dp.action == 'impersonate') and \
            not (new_dp.app == 'security' and new_dp.action == 'impersonate') and \
            not (new_dp.app == 'filebrowser' and new_dp.action == 's3_access') and \
            not (new_dp.app == 'filebrowser' and new_dp.action == 's3_access') and \
            not (new_dp.app == 'filebrowser' and new_dp.action == 'adls_access') and \
            not (new_dp.app == 'filebrowser' and new_dp.action == 'adls_access') and \
+           not (new_dp.app == 'filebrowser' and new_dp.action == 'abfs_access') and \
            not (new_dp.app == 'oozie' and new_dp.action == 'disable_editor_access'):
            not (new_dp.app == 'oozie' and new_dp.action == 'disable_editor_access'):
           GroupPermission.objects.create(group=default_group, hue_permission=new_dp)
           GroupPermission.objects.create(group=default_group, hue_permission=new_dp)
 
 

+ 26 - 16
desktop/core/src/desktop/lib/fs/proxyfs.py

@@ -28,15 +28,16 @@ LOG = logging.getLogger(__name__)
 
 
 class ProxyFS(object):
 class ProxyFS(object):
 
 
-  def __init__(self, filesystems_dict, default_scheme):
+  def __init__(self, filesystems_dict, default_scheme, name='default'):
     if default_scheme not in filesystems_dict:
     if default_scheme not in filesystems_dict:
       raise ValueError(
       raise ValueError(
         'Default scheme "%s" is not a member of provided schemes: %s' % (default_scheme, filesystems_dict.keys()))
         'Default scheme "%s" is not a member of provided schemes: %s' % (default_scheme, filesystems_dict.keys()))
 
 
+    self._name = name
     self._fs_dict = filesystems_dict
     self._fs_dict = filesystems_dict
-    self._fs_set = set(self._fs_dict.values())
+    self._user = {'user': None} # wrapping in an object to avoid triggering __getattr__ / __setattr__
     self._default_scheme = default_scheme
     self._default_scheme = default_scheme
-    self._default_fs = self._fs_dict[self._default_scheme]
+    self._default_fs = filesystems_dict[self._default_scheme](name, default_scheme)
 
 
   def __getattr__(self, item):
   def __getattr__(self, item):
     return getattr(object.__getattribute__(self, "_default_fs"), item)
     return getattr(object.__getattribute__(self, "_default_fs"), item)
@@ -48,8 +49,11 @@ class ProxyFS(object):
       object.__setattr__(self, key, value)
       object.__setattr__(self, key, value)
 
 
   def _get_scheme(self, path):
   def _get_scheme(self, path):
-    split = urlparse(path)
-    return split.scheme if split.scheme else self._default_scheme
+    scheme = None
+    if path:
+      split = urlparse(path)
+      scheme = split.scheme if split.scheme else None
+    return scheme or self._default_scheme
 
 
   def _has_access(self, fs):
   def _has_access(self, fs):
     from desktop.auth.backend import rewrite_user  # Avoid cyclic loop
     from desktop.auth.backend import rewrite_user  # Avoid cyclic loop
@@ -58,7 +62,7 @@ class ProxyFS(object):
       #if not filebrowser_action (hdfs) then handle permission via doas else check permission in hue
       #if not filebrowser_action (hdfs) then handle permission via doas else check permission in hue
       if not filebrowser_action:
       if not filebrowser_action:
         return True
         return True
-      user = rewrite_user(User.objects.get(username=self.user))
+      user = rewrite_user(User.objects.get(username=self.getuser()))
       return user.is_authenticated() and user.is_active and (is_admin(user) or not filebrowser_action or user.has_hue_permission(action=filebrowser_action, app="filebrowser"))
       return user.is_authenticated() and user.is_active and (is_admin(user) or not filebrowser_action or user.has_hue_permission(action=filebrowser_action, app="filebrowser"))
     except User.DoesNotExist:
     except User.DoesNotExist:
       LOG.exception('proxyfs.has_access()')
       LOG.exception('proxyfs.has_access()')
@@ -69,11 +73,12 @@ class ProxyFS(object):
     if not scheme:
     if not scheme:
       raise IOError('Can not figure out scheme for path "%s"' % path)
       raise IOError('Can not figure out scheme for path "%s"' % path)
     try:
     try:
-      fs = self._fs_dict[scheme]
-      if (self._has_access(fs)):
+      fs = self._fs_dict[scheme](self._name, self.getuser())
+      if self._has_access(fs):
+        fs.setuser(self.getuser())
         return fs
         return fs
       else:
       else:
-        raise IOError("Missing permissions for %s on %s" % (self.user, path))
+        raise IOError("Missing permissions for %s on %s" % (self.getuser(), path))
     except KeyError:
     except KeyError:
       raise IOError('Unknown scheme %s, available schemes: %s' % (scheme, self._fs_dict.keys()))
       raise IOError('Unknown scheme %s, available schemes: %s' % (scheme, self._fs_dict.keys()))
 
 
@@ -91,13 +96,18 @@ class ProxyFS(object):
 
 
   def setuser(self, user):
   def setuser(self, user):
     """Set a new user. Return the past current user."""
     """Set a new user. Return the past current user."""
-    curr = self.user
-    for fs in self._fs_set:
-      fs.setuser(user)
+    curr = self.getuser()
+    if hasattr(user, 'username'):
+      self._user['user'] = user.username
+    else:
+      self._user['user'] = user
     return curr
     return curr
 
 
+  def getuser(self):
+    return self._user['user']
+
   def do_as_user(self, username, fn, *args, **kwargs):
   def do_as_user(self, username, fn, *args, **kwargs):
-    prev = self.user
+    prev = self.getuser()
     try:
     try:
       self.setuser(username)
       self.setuser(username)
       return fn(*args, **kwargs)
       return fn(*args, **kwargs)
@@ -185,9 +195,9 @@ class ProxyFS(object):
     return fs.mktemp(subdir=subdir, prefix=prefix, basedir=basedir)
     return fs.mktemp(subdir=subdir, prefix=prefix, basedir=basedir)
 
 
   def purge_trash(self):
   def purge_trash(self):
-    for fs in self._fs_set:
-      if hasattr(fs, 'purge_trash'):
-        fs.purge_trash()
+    fs = self._get_fs() # Only webhdfs supports trash.
+    if fs and hasattr(fs, 'purge_trash'):
+      fs.purge_trash()
 
 
   # Handle file systems interactions
   # Handle file systems interactions
   # --------------------------------
   # --------------------------------

+ 55 - 106
desktop/core/src/desktop/lib/fs/proxyfs_test.py

@@ -16,7 +16,7 @@
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-
+from mock import MagicMock, patch
 from nose.plugins.attrib import attr
 from nose.plugins.attrib import attr
 from nose.tools import assert_raises, assert_false, eq_
 from nose.tools import assert_raises, assert_false, eq_
 from nose import SkipTest
 from nose import SkipTest
@@ -29,136 +29,79 @@ from desktop.lib.django_test_util import make_logged_in_client
 from desktop.lib.test_utils import add_permission, remove_from_group
 from desktop.lib.test_utils import add_permission, remove_from_group
 
 
 
 
-@attr('integration')
 def test_fs_selection():
 def test_fs_selection():
-  try:
-    from mock import MagicMock
-  except ImportError:
-    raise SkipTest("Skips until HUE-2947 is resolved")
-
   make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
   make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
   user = User.objects.get(username='test')
   user = User.objects.get(username='test')
-  add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
-  add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
-
-  s3fs, adls, hdfs = MagicMock("s3_access"), MagicMock("adls_access"), MagicMock()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
-  proxy_fs.setuser(user)
-
-  proxy_fs.isdir('s3a://bucket/key')
-  s3fs.isdir.assert_called_once_with('s3a://bucket/key')
-  assert_false(hdfs.isdir.called)
-
-  proxy_fs.isfile('hdfs://localhost:42/user/alice/file')
-  hdfs.isfile.assert_called_once_with('hdfs://localhost:42/user/alice/file')
-  assert_false(s3fs.isfile.called)
-
-  proxy_fs.isdir('adl://net/key')
-  s3fs.isdir.assert_called_once_with('adl://net/key')
-  assert_false(hdfs.isdir.called)
+  with patch('desktop.lib.fs.ProxyFS._has_access') as _has_access:
+    _has_access.return_value = True
 
 
-  proxy_fs.isdir('adl:/key')
-  s3fs.isdir.assert_called_once_with('adl:/key')
-  assert_false(hdfs.isdir.called)
-
-  proxy_fs.open('/user/alice/file')
-  hdfs.open.assert_called_once_with('/user/alice/file')
-  assert_false(s3fs.open.called)
-
-  assert_raises(IOError, proxy_fs.stats, 'ftp://host')
+    s3fs, adls, hdfs, abfs = MagicMock(), MagicMock(), MagicMock(), MagicMock()
+    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
+    proxy_fs.setuser(user)
 
 
+    proxy_fs.isdir('s3a://bucket/key')
+    s3fs.isdir.assert_called_once_with('s3a://bucket/key')
+    assert_false(hdfs.isdir.called)
 
 
-# TODO: remove after HUE-2947 is resolved
-def test__get_fs():
-  make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
-  user = User.objects.get(username='test')
-  add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
-  add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
+    proxy_fs.isfile('hdfs://localhost:42/user/alice/file')
+    hdfs.isfile.assert_called_once_with('hdfs://localhost:42/user/alice/file')
+    assert_false(s3fs.isfile.called)
 
 
-  s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
-  proxy_fs.setuser(user)
+    proxy_fs.isdir('adl://net/key')
+    adls.isdir.assert_called_once_with('adl://net/key')
+    assert_false(hdfs.isdir.called)
 
 
-  f = proxy_fs._get_fs
+    proxy_fs.isdir('abfs://net/key')
+    abfs.isdir.assert_called_once_with('abfs://net/key')
+    assert_false(hdfs.isdir.called)
 
 
-  eq_(f('s3a://bucket'), s3fs)
-  eq_(f('S3A://bucket/key'), s3fs)
-  eq_(f('adl:/path'), adls)
-  eq_(f('adl://net/path'), adls)
-  eq_(f('hdfs:/path'), hdfs)
-  eq_(f('hdfs://net/path'), hdfs)
-  eq_(f('/tmp'), hdfs)
+    assert_raises(IOError, proxy_fs.stats, 'ftp://host')
 
 
-  assert_raises(IOError, f, 'ftp://host')
+def wrapper(mock):
+  def tmp(*args, **kwargs):
+    return mock
+  return tmp
 
 
 
 
-@attr('integration')
 def test_multi_fs_selection():
 def test_multi_fs_selection():
-  try:
-    from mock import MagicMock
-  except ImportError:
-    raise SkipTest("Skips until HUE-2947 is resolved")
-
   make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
   make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
   user = User.objects.get(username='test')
   user = User.objects.get(username='test')
-  add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
-  add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
-
-  s3fs, adls, hdfs = MagicMock("s3_access"), MagicMock("adls_access"), MagicMock()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
-  proxy_fs.setuser(user)
-
-  proxy_fs.copy('s3a://bucket1/key', 's3a://bucket2/key')
-  s3fs.copy.assert_called_once_with('s3a://bucket1/key', 's3a://bucket2/key')
-  assert_false(hdfs.copy.called)
-
-  proxy_fs.copyfile('s3a://bucket/key', 'key2')
-  s3fs.copyfile.assert_called_once_with('s3a://bucket/key', 'key2')
-  assert_false(hdfs.copyfile.called)
-
-  proxy_fs.copyfile('adl://net/key', 'key2')
-  s3fs.copyfile.assert_called_once_with('adl://net/key', 'key2')
-  assert_false(hdfs.copyfile.called)
 
 
-  proxy_fs.copyfile('adl:/key', 'key2')
-  s3fs.copyfile.assert_called_once_with('adl:/key', 'key2')
-  assert_false(hdfs.copyfile.called)
+  with patch('desktop.lib.fs.ProxyFS._has_access') as _has_access:
+    _has_access.return_value = True
 
 
-  proxy_fs.rename('/tmp/file', 'shmile')
-  hdfs.rename.assert_called_once_with('/tmp/file', 'shmile')
-  assert_false(s3fs.rename.called)
+    s3fs, adls, hdfs, abfs = MagicMock(), MagicMock(), MagicMock(), MagicMock()
+    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
+    proxy_fs.setuser(user)
 
 
-  # Will be addressed in HUE-2934
-  assert_raises(NotImplementedError, proxy_fs.copy_remote_dir, 's3a://bucket/key', '/tmp/dir')
+    proxy_fs.copy('s3a://bucket1/key', 's3a://bucket2/key')
+    s3fs.copy.assert_called_once_with('s3a://bucket1/key', 's3a://bucket2/key')
+    assert_false(hdfs.copy.called)
 
 
+    proxy_fs.copyfile('s3a://bucket/key', 'key2')
+    s3fs.copyfile.assert_called_once_with('s3a://bucket/key', 'key2')
+    assert_false(hdfs.copyfile.called)
 
 
-# TODO: remove after HUE-2947 is resolved
-def test__get_fs_pair():
-  make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
-  user = User.objects.get(username='test')
-  add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
-  add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
+    proxy_fs.copyfile('adl://net/key', 'key2')
+    adls.copyfile.assert_called_once_with('adl://net/key', 'key2')
+    assert_false(hdfs.copyfile.called)
 
 
-  s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
-  proxy_fs.setuser(user)
+    proxy_fs.copyfile('abfs:/key', 'key2')
+    abfs.copyfile.assert_called_once_with('abfs:/key', 'key2')
+    assert_false(hdfs.copyfile.called)
 
 
-  f = proxy_fs._get_fs_pair
+    proxy_fs.rename('/tmp/file', 'shmile')
+    hdfs.rename.assert_called_once_with('/tmp/file', 'shmile')
+    assert_false(s3fs.rename.called)
 
 
-  eq_(f('s3a://bucket1/key', 's3a://bucket2/key'), (s3fs, s3fs))
-  eq_(f('s3a://bucket/key', 'key2'), (s3fs, s3fs))
-  eq_(f('adl://net/key', 'key2'), (adls, adls))
-  eq_(f('adl:/key', 'key2'), (adls, adls))
-  eq_(f('/tmp/file', 'shmile'), (hdfs, hdfs))
-
-  assert_raises(IOError, f, 'ftp://host', 'key2')
+    # Will be addressed in HUE-2934
+    assert_raises(NotImplementedError, proxy_fs.copy_remote_dir, 's3a://bucket/key', 'adl://tmp/dir') # Exception can only be thrown if scheme is specified, else default to 1st scheme
 
 
 
 
 def test_constructor_given_invalid_arguments():
 def test_constructor_given_invalid_arguments():
   assert_raises(ValueError, ProxyFS, {'s3a': {}}, 'hdfs')
   assert_raises(ValueError, ProxyFS, {'s3a': {}}, 'hdfs')
 
 
 
 
-
 class MockFs():
 class MockFs():
   def __init__(self, filebrowser_action=None):
   def __init__(self, filebrowser_action=None):
     self.user = None
     self.user = None
@@ -178,20 +121,22 @@ class TestFsPermissions(object):
     user_client = make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
     user_client = make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
     user = User.objects.get(username='test')
     user = User.objects.get(username='test')
 
 
-    s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs()
-    proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
+    s3fs, adls, hdfs, abfs = MockFs("s3_access"), MockFs("adls_access"), MockFs(), MockFs("abfs_access")
+    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
     proxy_fs.setuser(user)
     proxy_fs.setuser(user)
 
 
     f = proxy_fs._get_fs
     f = proxy_fs._get_fs
 
 
     remove_from_group(user.username, 'has_s3')
     remove_from_group(user.username, 'has_s3')
     remove_from_group(user.username, 'has_adls')
     remove_from_group(user.username, 'has_adls')
+    remove_from_group(user.username, 'has_abfs')
 
 
     # No perms by default
     # No perms by default
     assert_raises(Exception, f, 's3a://bucket')
     assert_raises(Exception, f, 's3a://bucket')
     assert_raises(Exception, f, 'S3A://bucket/key')
     assert_raises(Exception, f, 'S3A://bucket/key')
     assert_raises(Exception, f, 'adl://net/key')
     assert_raises(Exception, f, 'adl://net/key')
     assert_raises(Exception, f, 'adl:/key')
     assert_raises(Exception, f, 'adl:/key')
+    assert_raises(Exception, f, 'abfs:/key')
     f('hdfs://path')
     f('hdfs://path')
     f('/tmp')
     f('/tmp')
 
 
@@ -199,23 +144,26 @@ class TestFsPermissions(object):
       # Add perm
       # Add perm
       add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
       add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
       add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
       add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
+      add_permission('test', 'has_abfs', permname='abfs_access', appname='filebrowser')
 
 
       f('s3a://bucket')
       f('s3a://bucket')
       f('S3A://bucket/key')
       f('S3A://bucket/key')
       f('adl://net/key')
       f('adl://net/key')
       f('adl:/key')
       f('adl:/key')
+      f('abfs:/key')
       f('hdfs://path')
       f('hdfs://path')
       f('/tmp')
       f('/tmp')
     finally:
     finally:
       remove_from_group(user.username, 'has_s3')
       remove_from_group(user.username, 'has_s3')
       remove_from_group(user.username, 'has_adls')
       remove_from_group(user.username, 'has_adls')
+      remove_from_group(user.username, 'has_abfs')
 
 
   def test_fs_permissions_admin_user(self):
   def test_fs_permissions_admin_user(self):
     user_client = make_logged_in_client(username='admin', groupname='default', recreate=True, is_superuser=True)
     user_client = make_logged_in_client(username='admin', groupname='default', recreate=True, is_superuser=True)
     user = User.objects.get(username='admin')
     user = User.objects.get(username='admin')
 
 
-    s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs()
-    proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
+    s3fs, adls, hdfs, abfs = MockFs("s3_access"), MockFs("adls_access"), MockFs(), MockFs("abfs_access")
+    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
     proxy_fs.setuser(user)
     proxy_fs.setuser(user)
 
 
     f = proxy_fs._get_fs
     f = proxy_fs._get_fs
@@ -224,5 +172,6 @@ class TestFsPermissions(object):
     f('S3A://bucket/key')
     f('S3A://bucket/key')
     f('adl://net/key')
     f('adl://net/key')
     f('adl:/key')
     f('adl:/key')
+    f('abfs:/key')
     f('hdfs://path')
     f('hdfs://path')
     f('/tmp')
     f('/tmp')

+ 65 - 72
desktop/core/src/desktop/lib/fsmanager.py

@@ -17,42 +17,68 @@
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-import sys
 import logging
 import logging
 
 
-import aws
+import aws.client
 import azure.client
 import azure.client
 
 
-from aws.conf import is_enabled as is_s3_enabled
-from azure.conf import is_adls_enabled, is_abfs_enabled
+from aws.conf import is_enabled as is_s3_enabled, has_s3_access
+from azure.conf import is_adls_enabled, is_abfs_enabled, has_adls_access, has_abfs_access
 from hadoop.cluster import get_hdfs
 from hadoop.cluster import get_hdfs
 from hadoop.conf import has_hdfs_enabled
 from hadoop.conf import has_hdfs_enabled
+from desktop.lib.fs.proxyfs import ProxyFS
+
+SUPPORTED_FS = ['hdfs', 's3a', 'adl', 'abfs']
+
+
+def has_access(fs=None, user=None):
+  if fs == 'hdfs':
+    return True
+  elif fs == 'adl':
+    return has_adls_access(user)
+  elif fs == 's3a':
+    return has_s3_access(user)
+  elif fs == 'abfs':
+    return has_abfs_access(user)
+
+
+def is_enabled(fs=None):
+  if fs == 'hdfs':
+    return has_hdfs_enabled()
+  elif fs == 'adl':
+    return is_adls_enabled()
+  elif fs == 's3a':
+    return is_s3_enabled()
+  elif fs == 'abfs':
+    return is_abfs_enabled()
+
+def is_enabled_and_has_access(fs=None, user=None):
+  return is_enabled(fs) and has_access(fs, user)
+
+def _get_client(fs=None):
+  if fs == 'hdfs':
+    return get_hdfs
+  elif fs == 's3a':
+    return aws.client.get_client
+  elif fs == 'adl':
+    return azure.client.get_client
+  elif fs == 'abfs':
+    return azure.client.get_client_abfs
+  return None
+
+
+def get_client(name='default', fs=None, user=None):
+  fs_getter = _get_client(fs)
+  if fs_getter:
+    return fs_getter(name, user)
+  else:
+    logging.warn('Can not get filesystem called "%s" for "%s" schema' % (name, fs))
+    return None
 
 
-from desktop.lib.fs import ProxyFS
-
-
-FS_CACHE = {}
-
-DEFAULT_SCHEMA = None
-
-FS_GETTERS = {
-}
 
 
-if has_hdfs_enabled():
-  FS_GETTERS['hdfs'] = get_hdfs
-  DEFAULT_SCHEMA = 'hdfs'
-if is_s3_enabled():
-  FS_GETTERS['s3a'] = aws.get_s3fs
-  if DEFAULT_SCHEMA is None:
-    DEFAULT_SCHEMA = 's3a'
-if is_adls_enabled():
-  FS_GETTERS['adl'] = azure.client.get_client
-  if DEFAULT_SCHEMA is None:
-      DEFAULT_SCHEMA = 'adl'
-if is_abfs_enabled():
-  FS_GETTERS['abfs'] = azure.client.get_client_abfs
-  if DEFAULT_SCHEMA is None:
-      DEFAULT_SCHEMA = 'abfs'
+def get_default_schema():
+  fs = [fs for fs in SUPPORTED_FS if is_enabled(fs)]
+  return fs[0] if fs else None
 
 
 
 
 def get_filesystem(name='default'):
 def get_filesystem(name='default'):
@@ -60,51 +86,18 @@ def get_filesystem(name='default'):
   Return the filesystem with the given name.
   Return the filesystem with the given name.
   If the filesystem is not defined, raises KeyError
   If the filesystem is not defined, raises KeyError
   """
   """
-  if name not in FS_CACHE:
-    FS_CACHE[name] = _make_fs(name)
-  return FS_CACHE[name]
-
-
-def _make_fs(name):
-  fs_dict = {}
-
-  for schema, getter in FS_GETTERS.iteritems():
-    try:
-      if getter is not None:
-        fs = getter(name)
-        fs_dict[schema] = fs
-      else:
-        raise Exception('Filesystem not configured for %s' % schema)
-    except KeyError:
-      if DEFAULT_SCHEMA == schema:
-        logging.error('Can not get filesystem called "%s" for default schema "%s"' % (name, schema))
-        exc_class, exc, tb = sys.exc_info()
-        raise exc_class, exc, tb
-      else:
-        logging.warn('Can not get filesystem called "%s" for "%s" schema' % (name, schema))
-    except Exception, e:
-      logging.error('Failed to get filesystem called "%s" for "%s" schema: %s' % (name, schema, e))
-
-  if fs_dict:
-    return ProxyFS(fs_dict, DEFAULT_SCHEMA)
-  else:
-    return None
+  # Instead of taking a list of cached client, ProxyFS will now resolve the client based on scheme
+  # The method to resolve clients returns a cached results if possible.
+  pdict = {}
+  for fs in SUPPORTED_FS:
+    if is_enabled(fs):
+      pdict[fs] = _get_client(fs)
+  return ProxyFS(pdict, get_default_schema(), name)
+
+
+def get_filesystems(user):
+  return [fs for fs in SUPPORTED_FS if is_enabled(fs) and has_access(fs, user)]
 
 
 
 
-def clear_cache():
-  """
-  Clears internal cache.  Returns
-  something that can be given back to restore_cache.
-  """
-  global FS_CACHE
-  old = FS_CACHE
-  FS_CACHE = {}
-  return old
 
 
 
 
-def restore_cache(old_cache):
-  """
-  Restores cache from the result of a previous clear_cache call.
-  """
-  global FS_CACHE
-  FS_CACHE = old_cache

+ 15 - 0
desktop/core/src/desktop/lib/idbroker/__init__.py

@@ -0,0 +1,15 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+ 78 - 0
desktop/core/src/desktop/lib/idbroker/client.py

@@ -0,0 +1,78 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import logging
+
+from desktop.lib.idbroker import conf
+from desktop.lib.rest import http_client, resource
+from hadoop.core_site import is_kerberos_enabled
+
+LOG = logging.getLogger(__name__)
+
+_KNOX_TOKEN_API = '/knoxtoken/api/v1/token'
+_CAB_API_CREDENTIALS_GLOBAL = '/cab/api/v1/credentials'
+
+
+class IDBroker(object):
+  @classmethod
+  def from_core_site(cls, fs=None, user=None):
+    security = {'type': None}
+    if is_kerberos_enabled():
+      security['type'] = 'kerberos'
+    elif conf.get_cab_username(fs):
+      security['type'] = 'basic'
+      security['params'] = {'username': conf.get_cab_username(fs), 'password': conf.get_cab_password(fs)}
+    return cls(
+      user,
+      conf.get_cab_address(fs),
+      conf.get_cab_dt_path(fs),
+      conf.get_cab_path(fs),
+      security
+    )
+
+  def __init__(self, user=None, address=None, dt_path=None, path=None, security=None):
+    self.user=user
+    self.address=address
+    self.dt_path = dt_path
+    self.path = path
+    self.security = security
+    self._client = http_client.HttpClient(self.address, logger=LOG)
+    self._root = resource.Resource(self._client)
+
+
+  def _knox_token_params(self):
+    if self.user:
+      if self.security['type'] == 'kerberos':
+        return { 'doAs': self.user }
+      else:
+        return { 'user.name': self.user }
+    else:
+      return None
+
+
+  def get_auth_token(self):
+    if self.security['type'] == 'kerberos':
+      self._client.set_kerberos_auth()
+    elif self.security['type'] == 'basic':
+      self._client.set_basic_auth(self.security['params']['username'], self.security['params']['password'])
+    res = self._root.invoke("GET", self.dt_path + _KNOX_TOKEN_API, self._knox_token_params(), allow_redirects=True, log_response=False) # Can't log response because returns credentials
+    return res.get('access_token')
+
+
+  def get_cab(self):
+    self._client.set_bearer_auth(self.get_auth_token())
+    return self._root.invoke("GET", self.path + _CAB_API_CREDENTIALS_GLOBAL, allow_redirects=True, log_response=False) # Can't log response because returns credentials

+ 76 - 0
desktop/core/src/desktop/lib/idbroker/conf.py

@@ -0,0 +1,76 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import logging
+
+from hadoop.core_site import get_conf
+
+from django.utils.translation import ugettext as _
+
+LOG = logging.getLogger(__name__)
+
+_CNF_CAB_ADDRESS='fs.%s.ext.cab.address' # http://host:8444/gateway
+_CNF_CAB_ADDRESS_DT_PATH='fs.%s.ext.cab.dt.path' # dt
+_CNF_CAB_ADDRESS_PATH='fs.%s.ext.cab.path' # aws-cab
+_CNF_CAB_USERNAME='fs.%s.ext.cab.username' # when not using kerberos
+_CNF_CAB_PASSWORD='fs.%s.ext.cab.password'
+SUPPORTED_FS = ['s3a', 'azure', 'gs']
+
+def validate_fs(fs=None):
+  if fs in SUPPORTED_FS:
+    return True
+  else:
+    raise ValueError('Selected FS %s is not supported by Hue IDBroker client' % fs)
+
+def get_cab_address(fs=None):
+  validate_fs(fs)
+  return get_conf().get(_CNF_CAB_ADDRESS % fs)
+
+def get_cab_dt_path(fs=None):
+  validate_fs(fs)
+  return get_conf().get(_CNF_CAB_ADDRESS_DT_PATH % fs)
+
+def get_cab_path(fs=None):
+  validate_fs(fs)
+  return get_conf().get(_CNF_CAB_ADDRESS_PATH % fs)
+
+def get_cab_username(fs=None):
+  validate_fs(fs)
+  return get_conf().get(_CNF_CAB_USERNAME % fs)
+
+def get_cab_password(fs=None):
+  validate_fs(fs)
+  return get_conf().get(_CNF_CAB_PASSWORD % fs)
+
+def is_idbroker_enabled(fs=None):
+  return get_cab_address(fs) is not None
+
+def config_validator():
+  res = []
+  from desktop.lib.idbroker.client import IDBroker # Circular dependency
+  if is_idbroker_enabled():
+    try:
+      for fs in SUPPORTED_FS:
+        client = IDBroker.from_core_site(fs)
+      token = client.get_auth_token()
+      if not token:
+        raise ValueError('Failed to obtain IDBroker Token')
+    except Exception as e:
+      LOG.exception('Failed to obtain IDBroker Token')
+      res.append(('idbroker', _('Failed to obtain IDBroker Token, check your IDBroker configuration.')))
+
+  return res

+ 70 - 0
desktop/core/src/desktop/lib/idbroker/tests.py

@@ -0,0 +1,70 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import logging
+import unittest
+
+from mock import patch, Mock
+from nose.tools import assert_equal, assert_true
+
+from desktop.lib.idbroker.client import IDBroker
+
+LOG = logging.getLogger(__name__)
+
+class TestIDBroker(unittest.TestCase):
+  def test_username_authentication(self):
+    with patch('desktop.lib.idbroker.conf.get_conf') as conf:
+      with patch('desktop.lib.idbroker.client.resource.Resource.invoke') as invoke:
+        with patch('desktop.lib.idbroker.client.http_client.HttpClient.set_basic_auth') as set_basic_auth:
+          conf.return_value = {
+            'fs.s3a.ext.cab.address': 'address',
+            'fs.s3a.ext.cab.dt.path': 'dt_path',
+            'fs.s3a.ext.cab.path': 'path',
+            'fs.s3a.ext.cab.username': 'username',
+            'fs.s3a.ext.cab.password': 'password'
+          }
+          invoke.return_value = {
+             'Credentials': 'Credentials'
+          }
+          client = IDBroker.from_core_site('s3a', 'test')
+
+          cab = client.get_cab()
+          assert_equal(invoke.call_count, 2) # get_cab calls twice
+          assert_equal(cab.get('Credentials'), 'Credentials')
+          assert_equal(set_basic_auth.call_count, 1)
+
+  def test_kerberos_authentication(self):
+    with patch('desktop.lib.idbroker.conf.get_conf') as conf:
+      with patch('desktop.lib.idbroker.client.is_kerberos_enabled') as is_kerberos_enabled:
+        with patch('desktop.lib.idbroker.client.resource.Resource.invoke') as invoke:
+          with patch('desktop.lib.idbroker.client.http_client.HttpClient.set_kerberos_auth') as set_kerberos_auth:
+            is_kerberos_enabled.return_value = True
+            conf.return_value = {
+              'fs.s3a.ext.cab.address': 'address',
+              'fs.s3a.ext.cab.dt.path': 'dt_path',
+              'fs.s3a.ext.cab.path': 'path',
+              'hadoop.security.authentication': 'kerberos',
+            }
+            invoke.return_value = {
+              'Credentials': 'Credentials'
+            }
+            client = IDBroker.from_core_site('s3a', 'test')
+
+            cab = client.get_cab()
+            assert_equal(invoke.call_count, 2) # get_cab calls twice
+            assert_equal(cab.get('Credentials'), 'Credentials')
+            assert_equal(set_kerberos_auth.call_count, 1)

+ 21 - 1
desktop/core/src/desktop/lib/rest/http_client.py

@@ -19,6 +19,7 @@ import posixpath
 import requests
 import requests
 import threading
 import threading
 import urllib
 import urllib
+
 from urlparse import urlparse
 from urlparse import urlparse
 
 
 from django.utils.encoding import iri_to_uri, smart_str
 from django.utils.encoding import iri_to_uri, smart_str
@@ -27,7 +28,7 @@ from django.utils.http import urlencode
 from desktop import conf
 from desktop import conf
 
 
 from requests import exceptions
 from requests import exceptions
-from requests.auth import HTTPBasicAuth, HTTPDigestAuth
+from requests.auth import AuthBase ,HTTPBasicAuth, HTTPDigestAuth
 from requests_kerberos import HTTPKerberosAuth, REQUIRED, OPTIONAL, DISABLED
 from requests_kerberos import HTTPKerberosAuth, REQUIRED, OPTIONAL, DISABLED
 from urllib3.contrib import pyopenssl
 from urllib3.contrib import pyopenssl
 
 
@@ -133,6 +134,9 @@ class HttpClient(object):
     self._session.auth = HTTPDigestAuth(username, password)
     self._session.auth = HTTPDigestAuth(username, password)
     return self
     return self
 
 
+  def set_bearer_auth(self, token):
+    self._session.auth = HTTPBearerAuth(token)
+
   def set_headers(self, headers):
   def set_headers(self, headers):
     """
     """
     Add headers to the request
     Add headers to the request
@@ -222,3 +226,19 @@ class HttpClient(object):
       param_str = urlencode(params)
       param_str = urlencode(params)
       res += '?' + param_str
       res += '?' + param_str
     return iri_to_uri(res)
     return iri_to_uri(res)
+
+class HTTPBearerAuth(AuthBase):
+    """Attaches HTTP Basic Authentication to the given Request object."""
+
+    def __init__(self, token):
+        self.token = token
+
+    def __eq__(self, other):
+        return self.token == getattr(other, 'token', None)
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __call__(self, r):
+        r.headers['Authorization'] = 'Bearer %s' % self.token
+        return r

+ 16 - 14
desktop/core/src/desktop/lib/rest/resource.py

@@ -88,29 +88,31 @@ class Resource(object):
     """
     """
     path = self._join_uri(relpath)
     path = self._join_uri(relpath)
     start_time = time.time()
     start_time = time.time()
-    resp = self._client.execute(method,
-                                path,
-                                params=params,
-                                data=data,
-                                headers=headers,
-                                files=files,
-                                allow_redirects=allow_redirects,
-                                urlencode=self._urlencode,
-                                clear_cookies=clear_cookies)
-
-    if log_response:
-      log_length = conf.REST_RESPONSE_SIZE.get() != -1 and conf.REST_RESPONSE_SIZE.get()
+    resp = None
+    try:
+      resp = self._client.execute(method,
+                                  path,
+                                  params=params,
+                                  data=data,
+                                  headers=headers,
+                                  files=files,
+                                  allow_redirects=allow_redirects,
+                                  urlencode=self._urlencode,
+                                  clear_cookies=clear_cookies)
+    finally: # Print the response time even when there's an exception
+      log_length = conf.REST_RESPONSE_SIZE.get() != -1 and conf.REST_RESPONSE_SIZE.get() if log_response else 0 # We want to output duration without content
       duration = time.time() - start_time
       duration = time.time() - start_time
       message = "%s %s Got response%s: %s%s" % (
       message = "%s %s Got response%s: %s%s" % (
           method,
           method,
           smart_unicode(path, errors='ignore'),
           smart_unicode(path, errors='ignore'),
           ' in %dms' % (duration * 1000),
           ' in %dms' % (duration * 1000),
-          smart_unicode(resp.content[:log_length or None], errors='replace'),
-          log_length and len(resp.content) > log_length and "..." or ""
+          smart_unicode(resp.content[:log_length], errors='replace') if resp else "",
+          log_length and len(resp.content) > log_length and "..." or "" if resp else ""
       )
       )
       self._client.logger.disabled = 0
       self._client.logger.disabled = 0
       log_if_slow_call(duration=duration, message=message, logger=self._client.logger)
       log_if_slow_call(duration=duration, message=message, logger=self._client.logger)
 
 
+
     return resp
     return resp
 
 
 
 

+ 1 - 2
desktop/core/src/desktop/lib/test_utils.py

@@ -88,9 +88,8 @@ def reformat_xml(xml_obj):
 
 
 
 
 def clear_sys_caches():
 def clear_sys_caches():
-  return cluster.clear_caches(), fsmanager.clear_cache()
+  return [cluster.clear_caches()]
 
 
 
 
 def restore_sys_caches(old_caches):
 def restore_sys_caches(old_caches):
   cluster.restore_caches(old_caches[0])
   cluster.restore_caches(old_caches[0])
-  fsmanager.restore_cache(old_caches[1])

+ 4 - 6
desktop/core/src/desktop/models.py

@@ -38,15 +38,13 @@ from django.utils.translation import ugettext as _, ugettext_lazy as _t
 
 
 from settings import HUE_DESKTOP_VERSION
 from settings import HUE_DESKTOP_VERSION
 
 
-from aws.conf import is_enabled as is_s3_enabled, has_s3_access
-from azure.conf import is_adls_enabled, has_adls_access
 from dashboard.conf import get_engines, HAS_REPORT_ENABLED
 from dashboard.conf import get_engines, HAS_REPORT_ENABLED
-from hadoop.conf import has_hdfs_enabled
 from kafka.conf import has_kafka
 from kafka.conf import has_kafka
 from notebook.conf import SHOW_NOTEBOOKS, get_ordered_interpreters
 from notebook.conf import SHOW_NOTEBOOKS, get_ordered_interpreters
 
 
 from desktop import appmanager
 from desktop import appmanager
 from desktop.conf import get_clusters, CLUSTER_ID, IS_MULTICLUSTER_ONLY, IS_EMBEDDED, IS_K8S_ONLY
 from desktop.conf import get_clusters, CLUSTER_ID, IS_MULTICLUSTER_ONLY, IS_EMBEDDED, IS_K8S_ONLY
+from desktop.lib import fsmanager
 from desktop.lib.i18n import force_unicode
 from desktop.lib.i18n import force_unicode
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.paths import get_run_root, SAFE_CHARACTERS_URI_COMPONENTS
 from desktop.lib.paths import get_run_root, SAFE_CHARACTERS_URI_COMPONENTS
@@ -1761,7 +1759,7 @@ class ClusterConfig():
   def _get_browser(self):
   def _get_browser(self):
     interpreters = []
     interpreters = []
 
 
-    if has_hdfs_enabled() and 'filebrowser' in self.apps and ANALYTIC_DB not in self.cluster_type:
+    if 'filebrowser' in self.apps and ANALYTIC_DB not in self.cluster_type and fsmanager.is_enabled_and_has_access('hdfs', self.user):
       interpreters.append({
       interpreters.append({
         'type': 'hdfs',
         'type': 'hdfs',
         'displayName': _('Files'),
         'displayName': _('Files'),
@@ -1770,7 +1768,7 @@ class ClusterConfig():
         'page': '/filebrowser/' + (not self.user.is_anonymous() and 'view=' + urllib.quote(self.user.get_home_directory().encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS) or '')
         'page': '/filebrowser/' + (not self.user.is_anonymous() and 'view=' + urllib.quote(self.user.get_home_directory().encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS) or '')
       })
       })
 
 
-    if is_s3_enabled() and 'filebrowser' in self.apps and has_s3_access(self.user) and not IS_EMBEDDED.get():
+    if 'filebrowser' in self.apps and not IS_EMBEDDED.get() and fsmanager.is_enabled_and_has_access('s3a', self.user):
       interpreters.append({
       interpreters.append({
         'type': 's3',
         'type': 's3',
         'displayName': _('S3'),
         'displayName': _('S3'),
@@ -1779,7 +1777,7 @@ class ClusterConfig():
         'page': '/filebrowser/view=' + urllib.quote('S3A://'.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
         'page': '/filebrowser/view=' + urllib.quote('S3A://'.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
       })
       })
 
 
-    if is_adls_enabled() and 'filebrowser' in self.apps and has_adls_access(self.user) and ANALYTIC_DB not in self.cluster_type:
+    if 'filebrowser' in self.apps and ANALYTIC_DB not in self.cluster_type and fsmanager.is_enabled_and_has_access('adl', self.user):
       interpreters.append({
       interpreters.append({
         'type': 'adls',
         'type': 'adls',
         'displayName': _('ADLS'),
         'displayName': _('ADLS'),

+ 6 - 9
desktop/core/src/desktop/views.py

@@ -43,9 +43,6 @@ from configobj import ConfigObj, get_extra_values, ConfigObjError
 
 
 import django.views.debug
 import django.views.debug
 
 
-from aws.conf import is_enabled as is_s3_enabled, has_s3_access
-from azure.conf import is_adls_enabled, has_adls_access
-
 import desktop.conf
 import desktop.conf
 import desktop.log.log_buffer
 import desktop.log.log_buffer
 
 
@@ -53,7 +50,7 @@ from desktop import appmanager
 from desktop.api import massaged_tags_for_json, massaged_documents_for_json, _get_docs
 from desktop.api import massaged_tags_for_json, massaged_documents_for_json, _get_docs
 from desktop.auth.backend import is_admin
 from desktop.auth.backend import is_admin
 from desktop.conf import USE_NEW_EDITOR, HUE_LOAD_BALANCER, get_clusters
 from desktop.conf import USE_NEW_EDITOR, HUE_LOAD_BALANCER, get_clusters
-from desktop.lib import django_mako
+from desktop.lib import django_mako, fsmanager
 from desktop.lib.conf import GLOBAL_CONFIG, BoundConfig, _configs_from_dir
 from desktop.lib.conf import GLOBAL_CONFIG, BoundConfig, _configs_from_dir
 from desktop.lib.config_spec_dump import ConfigSpec
 from desktop.lib.config_spec_dump import ConfigSpec
 from desktop.lib.django_util import JsonResponse, login_notrequired, render
 from desktop.lib.django_util import JsonResponse, login_notrequired, render
@@ -86,8 +83,8 @@ def hue(request):
   return render('hue.mako', request, {
   return render('hue.mako', request, {
     'apps': apps_list,
     'apps': apps_list,
     'other_apps': other_apps,
     'other_apps': other_apps,
-    'is_s3_enabled': is_s3_enabled() and has_s3_access(request.user),
-    'is_adls_enabled': is_adls_enabled() and has_adls_access(request.user),
+    'is_s3_enabled': fsmanager.is_enabled('s3a') and fsmanager.has_access('s3a', request.user),
+    'is_adls_enabled': fsmanager.is_enabled('adl') and fsmanager.has_access('adl', request.user),
     'is_ldap_setup': 'desktop.auth.backend.LdapBackend' in desktop.conf.AUTH.BACKEND.get(),
     'is_ldap_setup': 'desktop.auth.backend.LdapBackend' in desktop.conf.AUTH.BACKEND.get(),
     'leaflet': {
     'leaflet': {
       'layer': desktop.conf.LEAFLET_TILE_LAYER.get(),
       'layer': desktop.conf.LEAFLET_TILE_LAYER.get(),
@@ -362,7 +359,7 @@ def memory(request):
 
 
 def global_js_constants(request):
 def global_js_constants(request):
   return HttpResponse(render('global_js_constants.mako', request, {
   return HttpResponse(render('global_js_constants.mako', request, {
-    'is_s3_enabled': is_s3_enabled() and has_s3_access(request.user),
+    'is_s3_enabled': fsmanager.is_enabled('s3a') and fsmanager.has_access('s3a', request.user),
     'leaflet': {
     'leaflet': {
       'layer': desktop.conf.LEAFLET_TILE_LAYER.get(),
       'layer': desktop.conf.LEAFLET_TILE_LAYER.get(),
       'attribution': desktop.conf.LEAFLET_TILE_LAYER_ATTRIBUTION.get(),
       'attribution': desktop.conf.LEAFLET_TILE_LAYER_ATTRIBUTION.get(),
@@ -495,8 +492,8 @@ def commonheader(title, section, user, request=None, padding="90px", skip_topbar
     },
     },
     'is_demo': desktop.conf.DEMO_ENABLED.get(),
     'is_demo': desktop.conf.DEMO_ENABLED.get(),
     'is_ldap_setup': 'desktop.auth.backend.LdapBackend' in desktop.conf.AUTH.BACKEND.get(),
     'is_ldap_setup': 'desktop.auth.backend.LdapBackend' in desktop.conf.AUTH.BACKEND.get(),
-    'is_s3_enabled': is_s3_enabled() and has_s3_access(user),
-    'is_adls_enabled': is_adls_enabled() and has_adls_access(request.user),
+    'is_s3_enabled': fsmanager.is_enabled('s3a') and fsmanager.has_access('s3a', request.user),
+    'is_adls_enabled': fsmanager.is_enabled('adl') and fsmanager.has_access('adl', request.user),
     'banner_message': get_banner_message(request)
     'banner_message': get_banner_message(request)
   })
   })
 
 

+ 0 - 36
desktop/libs/aws/src/aws/__init__.py

@@ -13,40 +13,4 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
-from __future__ import absolute_import
 
 
-from aws import conf
-from aws.client import Client
-from aws.s3.s3fs import S3FileSystem
-
-CLIENT_CACHE = None
-
-
-def get_client(identifier='default'):
-  global CLIENT_CACHE
-  _init_clients()
-  if identifier not in CLIENT_CACHE:
-    raise ValueError('Unknown AWS client: %s, check your configuration' % identifier)
-  return CLIENT_CACHE[identifier]
-
-
-def _init_clients():
-  global CLIENT_CACHE
-  if CLIENT_CACHE is not None:
-    return
-  CLIENT_CACHE = {}
-  for identifier in list(conf.AWS_ACCOUNTS.keys()):
-    CLIENT_CACHE[identifier] = _make_client(identifier)
-  # If default configuration not initialized, initialize client connection with IAM metadata
-  if 'default' not in CLIENT_CACHE and conf.has_iam_metadata():
-    CLIENT_CACHE['default'] = Client()
-
-
-def _make_client(identifier):
-  client_conf = conf.AWS_ACCOUNTS[identifier]
-  return Client.from_config(client_conf)
-
-
-def get_s3fs(identifier='default'):
-  connection = get_client(identifier).get_s3_connection()
-  return S3FileSystem(connection)

+ 127 - 27
desktop/libs/aws/src/aws/client.py

@@ -17,6 +17,7 @@ from __future__ import absolute_import
 
 
 from builtins import str
 from builtins import str
 from builtins import object
 from builtins import object
+import datetime
 import logging
 import logging
 import os
 import os
 
 
@@ -25,20 +26,115 @@ import boto.s3
 import boto.s3.connection
 import boto.s3.connection
 import boto.utils
 import boto.utils
 
 
-from aws.conf import get_default_region, has_iam_metadata, DEFAULT_CALLING_FORMAT, AWS_ACCOUNT_REGION_DEFAULT
+from aws import conf as aws_conf
 from aws.s3.s3fs import S3FileSystemException
 from aws.s3.s3fs import S3FileSystemException
+from aws.s3.s3fs import S3FileSystem
 
 
+from desktop.lib.idbroker import conf as conf_idbroker
+from desktop.lib.idbroker.client import IDBroker
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
-
 HTTP_SOCKET_TIMEOUT_S = 60
 HTTP_SOCKET_TIMEOUT_S = 60
 
 
+CLIENT_CACHE = None
+
+
+# FIXME: Should we check hue principal for the default user?
+def _get_cache_key(identifier='default', user='HUE'): # FIXME: Caching via username has issues when users get deleted. Need to switch to userid, but bigger change
+  return identifier + ':' + user
+
+
+def clear_cache():
+  global CLIENT_CACHE
+  CLIENT_CACHE = None
+
+
+def current_ms_from_utc():
+  return (datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000
+
+
+def get_client(identifier='default', user=None):
+  global CLIENT_CACHE
+  _init_clients()
+
+  cache_key = _get_cache_key(identifier, user) if conf_idbroker.is_idbroker_enabled('s3a') else _get_cache_key(identifier) # We don't want to cache by username when IDBroker not enabled
+  client = CLIENT_CACHE.get(cache_key)
+
+  if client and (client.expiration is None or client.expiration > int(current_ms_from_utc())): # expiration from IDBroker returns java timestamp in MS
+    return client
+  else:
+    client = _make_client(identifier, user)
+    CLIENT_CACHE[cache_key] = client
+    return client
+
+def get_credential_provider(identifier='default', user=None):
+  client_conf = aws_conf.AWS_ACCOUNTS[identifier] if identifier in aws_conf.AWS_ACCOUNTS else None
+  return CredentialProviderIDBroker(IDBroker.from_core_site('s3a', user)) if conf_idbroker.is_idbroker_enabled('s3a') else CredentialProviderConf(client_conf)
+
+
+def _init_clients():
+  global CLIENT_CACHE
+  if CLIENT_CACHE is not None:
+    return
+  CLIENT_CACHE = {} # Can't convert this to django cache, because S3FileSystem is not pickable
+  for identifier in list(aws_conf.AWS_ACCOUNTS.keys()):
+    CLIENT_CACHE[_get_cache_key(identifier)] = _make_client(identifier)
+  # If default configuration not initialized, initialize client connection with IAM metadata
+  if not CLIENT_CACHE.has_key(_get_cache_key()) and aws_conf.has_iam_metadata():
+    CLIENT_CACHE[_get_cache_key()] = _make_client('default')
+
+
+def _make_client(identifier, user=None):
+  client_conf = aws_conf.AWS_ACCOUNTS[identifier] if identifier in aws_conf.AWS_ACCOUNTS else None
+
+  client = Client.from_config(client_conf, get_credential_provider(identifier, user))
+  return S3FileSystem(client.get_s3_connection(), client.expiration) # It would be nice if the connection is lazy loaded
+
+
+class CredentialProviderConf(object):
+  def __init__(self, conf):
+    self._conf=conf
+
+  def validate(self):
+    credentials = self.get_credentials()
+    if None in (credentials.get('AccessKeyId'), credentials.get('SecretAccessKey')) and not credentials.get('AllowEnvironmentCredentials') and not aws_conf.has_iam_metadata():
+      raise ValueError('Can\'t create AWS client, credential is not configured')
+    return True
+
+  def get_credentials(self):
+    if self._conf:
+      return {
+         'AccessKeyId': self._conf.ACCESS_KEY_ID.get(),
+         'SecretAccessKey': self._conf.SECRET_ACCESS_KEY.get(),
+         'SessionToken': self._conf.SECURITY_TOKEN.get(),
+         'AllowEnvironmentCredentials': self._conf.ALLOW_ENVIRONMENT_CREDENTIALS.get()
+      }
+    else:
+      return {
+        'AccessKeyId': self._conf.ACCESS_KEY_ID.get(),
+        'SecretAccessKey':self._conf.get_default_secret_key(),
+        'SessionToken': self._conf.get_default_session_token(),
+        'AllowEnvironmentCredentials': True
+      }
+
+
+class CredentialProviderIDBroker(object):
+  def __init__(self, idbroker):
+    self.idbroker=idbroker
+    self.credentials = None
+
+  def validate(self):
+    return True # Already been validated in config
+
+  def get_credentials(self):
+    return self.idbroker.get_cab().get('Credentials')
+
 
 
 class Client(object):
 class Client(object):
-  def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, aws_security_token=None, region=AWS_ACCOUNT_REGION_DEFAULT,
+  def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, aws_security_token=None, region=aws_conf.AWS_ACCOUNT_REGION_DEFAULT,
                timeout=HTTP_SOCKET_TIMEOUT_S, host=None, proxy_address=None, proxy_port=None, proxy_user=None,
                timeout=HTTP_SOCKET_TIMEOUT_S, host=None, proxy_address=None, proxy_port=None, proxy_user=None,
-               proxy_pass=None, calling_format=None, is_secure=True):
+               proxy_pass=None, calling_format=None, is_secure=True, expiration=None):
     self._access_key_id = aws_access_key_id
     self._access_key_id = aws_access_key_id
     self._secret_access_key = aws_secret_access_key
     self._secret_access_key = aws_secret_access_key
     self._security_token = aws_security_token
     self._security_token = aws_security_token
@@ -49,8 +145,9 @@ class Client(object):
     self._proxy_port = proxy_port
     self._proxy_port = proxy_port
     self._proxy_user = proxy_user
     self._proxy_user = proxy_user
     self._proxy_pass = proxy_pass
     self._proxy_pass = proxy_pass
-    self._calling_format = DEFAULT_CALLING_FORMAT if calling_format is None else calling_format
+    self._calling_format = aws_conf.DEFAULT_CALLING_FORMAT if calling_format is None else calling_format
     self._is_secure = is_secure
     self._is_secure = is_secure
+    self.expiration = expiration
 
 
     if not boto.config.has_section('Boto'):
     if not boto.config.has_section('Boto'):
       boto.config.add_section('Boto')
       boto.config.add_section('Boto')
@@ -59,28 +156,31 @@ class Client(object):
       boto.config.set('Boto', 'http_socket_timeout', str(self._timeout))
       boto.config.set('Boto', 'http_socket_timeout', str(self._timeout))
 
 
   @classmethod
   @classmethod
-  def from_config(cls, conf):
-    access_key_id = conf.ACCESS_KEY_ID.get()
-    secret_access_key = conf.SECRET_ACCESS_KEY.get()
-    security_token = conf.SECURITY_TOKEN.get()
-    env_cred_allowed = conf.ALLOW_ENVIRONMENT_CREDENTIALS.get()
-
-    if None in (access_key_id, secret_access_key) and not env_cred_allowed and not has_iam_metadata():
-      raise ValueError('Can\'t create AWS client, credential is not configured')
-
-    return cls(
-      aws_access_key_id=access_key_id,
-      aws_secret_access_key=secret_access_key,
-      aws_security_token=security_token,
-      region=get_default_region(),
-      host=conf.HOST.get(),
-      proxy_address=conf.PROXY_ADDRESS.get(),
-      proxy_port=conf.PROXY_PORT.get(),
-      proxy_user=conf.PROXY_USER.get(),
-      proxy_pass=conf.PROXY_PASS.get(),
-      calling_format=conf.CALLING_FORMAT.get(),
-      is_secure=conf.IS_SECURE.get()
-    )
+  def from_config(cls, conf, credential_provider):
+    credential_provider.validate()
+    credentials = credential_provider.get_credentials()
+
+    if conf:
+      return cls(
+        aws_access_key_id=credentials.get('AccessKeyId'),
+        aws_secret_access_key=credentials.get('SecretAccessKey'),
+        aws_security_token=credentials.get('SessionToken'),
+        region=aws_conf.get_default_region(),
+        host=conf.HOST.get(),
+        proxy_address=conf.PROXY_ADDRESS.get(),
+        proxy_port=conf.PROXY_PORT.get(),
+        proxy_user=conf.PROXY_USER.get(),
+        proxy_pass=conf.PROXY_PASS.get(),
+        calling_format=conf.CALLING_FORMAT.get(),
+        is_secure=conf.IS_SECURE.get(),
+        expiration=credentials.get('Expiration')
+      )
+    else:
+      return cls(
+        aws_access_key_id=credentials.get('AccessKeyId'),
+        aws_secret_access_key=credentials.get('SecretAccessKey'),
+        aws_security_token=credentials.get('SessionToken')
+      )
 
 
   def get_s3_connection(self):
   def get_s3_connection(self):
 
 

+ 12 - 6
desktop/libs/aws/src/aws/conf.py

@@ -19,14 +19,10 @@ import logging
 import os
 import os
 import re
 import re
 
 
-import boto.utils
-from boto.s3.connection import Location
-
 from django.utils.translation import ugettext_lazy as _, ugettext as _t
 from django.utils.translation import ugettext_lazy as _, ugettext as _t
 
 
-import aws
 from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, coerce_bool, coerce_password_from_script
 from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, coerce_bool, coerce_password_from_script
-from hadoop.core_site import get_s3a_access_key, get_s3a_secret_key
+from hadoop.core_site import get_s3a_access_key, get_s3a_secret_key, get_s3a_session_token
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
@@ -36,6 +32,7 @@ SUBDOMAIN_ENDPOINT_RE = 's3.(?P<region>[a-z0-9-]+).amazonaws.com'
 HYPHEN_ENDPOINT_RE = 's3-(?P<region>[a-z0-9-]+).amazonaws.com'
 HYPHEN_ENDPOINT_RE = 's3-(?P<region>[a-z0-9-]+).amazonaws.com'
 DUALSTACK_ENDPOINT_RE = 's3.dualstack.(?P<region>[a-z0-9-]+).amazonaws.com'
 DUALSTACK_ENDPOINT_RE = 's3.dualstack.(?P<region>[a-z0-9-]+).amazonaws.com'
 AWS_ACCOUNT_REGION_DEFAULT = 'us-east-1' # Location.USEast
 AWS_ACCOUNT_REGION_DEFAULT = 'us-east-1' # Location.USEast
+PERMISSION_ACTION_S3 = "s3_access"
 
 
 
 
 def get_locations():
 def get_locations():
@@ -76,6 +73,13 @@ def get_default_secret_key():
   return secret_access_key_script or get_s3a_secret_key()
   return secret_access_key_script or get_s3a_secret_key()
 
 
 
 
+def get_default_session_token():
+  """
+  Attempt to set AWS secret key from script, else core-site, else None
+  """
+  return get_s3a_session_token()
+
+
 def get_default_region():
 def get_default_region():
   region = ''
   region = ''
 
 
@@ -140,6 +144,7 @@ AWS_ACCOUNTS = UnspecifiedConfigSection(
         key='security_token',
         key='security_token',
         type=str,
         type=str,
         private=True,
         private=True,
+        dynamic_default=get_default_session_token
       ),
       ),
       ALLOW_ENVIRONMENT_CREDENTIALS=Config(
       ALLOW_ENVIRONMENT_CREDENTIALS=Config(
         help=_('Allow to use environment sources of credentials (environment variables, EC2 profile).'),
         help=_('Allow to use environment sources of credentials (environment variables, EC2 profile).'),
@@ -209,6 +214,7 @@ def is_enabled():
 
 
 def has_iam_metadata():
 def has_iam_metadata():
   try:
   try:
+    import boto.utils
     # To avoid unnecessary network call, check if Hue is running on EC2 instance
     # To avoid unnecessary network call, check if Hue is running on EC2 instance
     # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html
     # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html
     if os.path.exists('/sys/hypervisor/uuid') and open('/sys/hypervisor/uuid', 'read').read()[:3] == 'ec2':
     if os.path.exists('/sys/hypervisor/uuid') and open('/sys/hypervisor/uuid', 'read').read()[:3] == 'ec2':
@@ -226,7 +232,7 @@ def has_s3_access(user):
 
 
 def config_validator(user):
 def config_validator(user):
   res = []
   res = []
-
+  import aws # Circular dependecy
   if is_enabled():
   if is_enabled():
     try:
     try:
       conn = aws.get_client('default').get_s3_connection()
       conn = aws.get_client('default').get_s3_connection()

+ 3 - 3
desktop/libs/aws/src/aws/s3/s3fs.py

@@ -33,13 +33,12 @@ from boto.s3.prefix import Prefix
 from django.utils.translation import ugettext as _
 from django.utils.translation import ugettext as _
 
 
 from aws import s3
 from aws import s3
-from aws.conf import get_default_region, get_locations
+from aws.conf import get_default_region, get_locations, PERMISSION_ACTION_S3
 from aws.s3 import normpath, s3file, translate_s3_error, S3A_ROOT
 from aws.s3 import normpath, s3file, translate_s3_error, S3A_ROOT
 from aws.s3.s3stat import S3Stat
 from aws.s3.s3stat import S3Stat
 
 
 
 
 DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
 DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
-PERMISSION_ACTION_S3 = "s3_access"
 BUCKET_NAME_PATTERN = re.compile("^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9_\-]*[a-zA-Z0-9])\.)*(?:[A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9_\-]*[A-Za-z0-9]))$")
 BUCKET_NAME_PATTERN = re.compile("^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9_\-]*[a-zA-Z0-9])\.)*(?:[A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9_\-]*[A-Za-z0-9]))$")
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
@@ -75,13 +74,14 @@ def auth_error_handler(view_fn):
 
 
 
 
 class S3FileSystem(object):
 class S3FileSystem(object):
-  def __init__(self, s3_connection):
+  def __init__(self, s3_connection, expiration=None):
     self._s3_connection = s3_connection
     self._s3_connection = s3_connection
     self._filebrowser_action = PERMISSION_ACTION_S3
     self._filebrowser_action = PERMISSION_ACTION_S3
     self.user = None
     self.user = None
     self.is_sentry_managed = lambda path: False
     self.is_sentry_managed = lambda path: False
     self.superuser = None
     self.superuser = None
     self.supergroup = None
     self.supergroup = None
+    self.expiration = expiration
 
 
   def _get_bucket(self, name):
   def _get_bucket(self, name):
     return self._s3_connection.get_bucket(name)
     return self._s3_connection.get_bucket(name)

+ 3 - 3
desktop/libs/aws/src/aws/s3/upload.py

@@ -35,7 +35,7 @@ from django.core.files.uploadedfile import SimpleUploadedFile
 from django.core.files.uploadhandler import FileUploadHandler, SkipFile, StopFutureHandlers, StopUpload, UploadFileException
 from django.core.files.uploadhandler import FileUploadHandler, SkipFile, StopFutureHandlers, StopUpload, UploadFileException
 from django.utils.translation import ugettext as _
 from django.utils.translation import ugettext as _
 
 
-from aws import get_s3fs
+from desktop.lib.fsmanager import get_client
 from aws.s3 import parse_uri
 from aws.s3 import parse_uri
 from aws.s3.s3fs import S3FileSystemException
 from aws.s3.s3fs import S3FileSystemException
 
 
@@ -62,7 +62,7 @@ class S3FileUploadHandler(FileUploadHandler):
     self.target_path = None
     self.target_path = None
     self.file = None
     self.file = None
     self._request = request
     self._request = request
-    self._fs = self._get_s3fs(request)
+    self._fs = get_client(fs='s3a', user=request.user.username)
     self._mp = None
     self._mp = None
     self._part_num = 1
     self._part_num = 1
 
 
@@ -122,7 +122,7 @@ class S3FileUploadHandler(FileUploadHandler):
 
 
 
 
   def _get_s3fs(self, request):
   def _get_s3fs(self, request):
-    fs = get_s3fs() # Pre 6.0 request.fs did not exist, now it does. The logic for assigning request.fs is not correct for FileUploadHandler.
+    fs = get_client(user=request.user.username) # Pre 6.0 request.fs did not exist, now it does. The logic for assigning request.fs is not correct for FileUploadHandler.
 
 
     if not fs:
     if not fs:
       raise S3FileUploadError(_("No S3 filesystem found."))
       raise S3FileUploadError(_("No S3 filesystem found."))

+ 74 - 0
desktop/libs/aws/src/aws/tests.py

@@ -0,0 +1,74 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import logging
+import unittest
+
+from mock import patch, Mock
+from nose.tools import assert_equal, assert_true, assert_not_equal
+
+from aws import conf
+from aws.client import clear_cache, get_client, get_credential_provider, current_ms_from_utc
+
+LOG = logging.getLogger(__name__)
+
+class TestAWS(unittest.TestCase):
+  def test_with_credentials(self):
+    try:
+      finish = conf.AWS_ACCOUNTS.set_for_testing({'default': {'access_key_id':'access_key_id', 'secret_access_key': 'secret_access_key'}})
+      with patch('aws.client.conf_idbroker.get_conf') as get_conf:
+        with patch('aws.client.Client.get_s3_connection'):
+          get_conf.return_value = {}
+          client1 = get_client('default')
+          client2 = get_client('default', 'test')
+
+          provider = get_credential_provider()
+          assert_equal(provider.get_credentials().get('AccessKeyId'), conf.AWS_ACCOUNTS['default'].ACCESS_KEY_ID.get())
+          assert_equal(client1, client2) # Should be the same as no support for user based client with credentials & no Expiration
+    finally:
+      finish()
+      clear_cache()
+
+  def test_with_idbroker(self):
+    try:
+      finish = conf.AWS_ACCOUNTS.set_for_testing({'default': {}})
+      with patch('aws.client.conf_idbroker.get_conf') as get_conf:
+        with patch('aws.client.Client.get_s3_connection'):
+          with patch('aws.client.IDBroker.get_cab') as get_cab:
+            get_conf.return_value = {
+              'fs.s3a.ext.cab.address': 'address'
+            }
+            get_cab.return_value = {
+              'Credentials': {'AccessKeyId': 'AccessKeyId', 'Expiration': 0}
+            }
+            provider = get_credential_provider()
+            assert_equal(provider.get_credentials().get('AccessKeyId'), 'AccessKeyId')
+            client1 = get_client('default', 'HUE')
+            client2 = get_client('default', 'HUE')
+            assert_not_equal(client1, client2) # Test that with Expiration 0 clients not equal
+
+            get_cab.return_value = {
+              'Credentials': {'AccessKeyId': 'AccessKeyId', 'Expiration': int(current_ms_from_utc()) + 10*1000}
+            }
+            client3 = get_client('default', 'HUE')
+            client4 = get_client('default', 'HUE')
+            client5 = get_client('default', 'test')
+            assert_equal(client3, client4) # Test that with 10 sec expiration, clients equal
+            assert_not_equal(client4, client5) # Test different user have different clients
+    finally:
+      finish()
+      clear_cache()

+ 1 - 1
desktop/libs/azure/src/azure/abfs/abfs.py

@@ -25,6 +25,7 @@ import logging
 import threading
 import threading
 
 
 from urllib.parse import urlparse
 from urllib.parse import urlparse
+from azure.conf import PERMISSION_ACTION_ABFS
 from hadoop.hdfs_site import get_umask_mode
 from hadoop.hdfs_site import get_umask_mode
 
 
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.fs.exceptions import WebHdfsException
@@ -36,7 +37,6 @@ LOG = logging.getLogger(__name__)
 
 
 #Azure has a 30MB block limit on upload.
 #Azure has a 30MB block limit on upload.
 UPLOAD_CHUCK_SIZE = 30 * 1000 * 1000
 UPLOAD_CHUCK_SIZE = 30 * 1000 * 1000
-PERMISSION_ACTION_ABFS = "abfs_access"
 
 
 
 
 class ABFS(object):
 class ABFS(object):

+ 1 - 2
desktop/libs/azure/src/azure/adls/webhdfs.py

@@ -30,14 +30,13 @@ from hadoop.fs.exceptions import WebHdfsException
 from hadoop.hdfs_site import get_umask_mode
 from hadoop.hdfs_site import get_umask_mode
 
 
 from desktop.lib.rest import http_client, resource
 from desktop.lib.rest import http_client, resource
-from azure.conf import get_default_adls_url, get_default_adls_fs
+from azure.conf import get_default_adls_url, get_default_adls_fs, PERMISSION_ACTION_ADLS
 
 
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
 #Azure has a 30MB block limit on upload.
 #Azure has a 30MB block limit on upload.
 UPLOAD_CHUCK_SIZE = 30 * 1000 * 1000
 UPLOAD_CHUCK_SIZE = 30 * 1000 * 1000
-PERMISSION_ACTION_ADLS = "adls_access"
 
 
 
 
 class WebHdfs(HadoopWebHdfs):
 class WebHdfs(HadoopWebHdfs):

+ 2 - 2
desktop/libs/azure/src/azure/client.py

@@ -27,14 +27,14 @@ LOG = logging.getLogger(__name__)
 
 
 CLIENT_CACHE = None
 CLIENT_CACHE = None
 
 
-def get_client(identifier='default'):
+def get_client(identifier='default', user=None):
   global CLIENT_CACHE
   global CLIENT_CACHE
   _init_clients()
   _init_clients()
   if identifier not in CLIENT_CACHE["adls"]:
   if identifier not in CLIENT_CACHE["adls"]:
     raise ValueError('Unknown azure client: %s, check your configuration' % identifier)
     raise ValueError('Unknown azure client: %s, check your configuration' % identifier)
   return CLIENT_CACHE["adls"][identifier]
   return CLIENT_CACHE["adls"][identifier]
 
 
-def get_client_abfs(identifier='default'):
+def get_client_abfs(identifier='default', user=None):
   global CLIENT_CACHE
   global CLIENT_CACHE
   _init_clients()
   _init_clients()
   if identifier not in CLIENT_CACHE["abfs"]:
   if identifier not in CLIENT_CACHE["abfs"]:

+ 2 - 0
desktop/libs/azure/src/azure/conf.py

@@ -25,6 +25,8 @@ from hadoop.core_site import get_adls_client_id, get_adls_authentication_code, g
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
+PERMISSION_ACTION_ABFS = "abfs_access"
+PERMISSION_ACTION_ADLS = "adls_access"
 REFRESH_URL = 'https://login.microsoftonline.com/<tenant_id>/oauth2/<version>token'
 REFRESH_URL = 'https://login.microsoftonline.com/<tenant_id>/oauth2/<version>token'
 
 
 
 

+ 1 - 1
desktop/libs/hadoop/src/hadoop/cluster.py

@@ -58,7 +58,7 @@ def rm_ha(funct):
   return wraps(funct)(decorate)
   return wraps(funct)(decorate)
 
 
 
 
-def get_hdfs(identifier="default"):
+def get_hdfs(identifier="default", user=None):
   global FS_CACHE
   global FS_CACHE
   get_all_hdfs()
   get_all_hdfs()
   return FS_CACHE[identifier]
   return FS_CACHE[identifier]

+ 10 - 1
desktop/libs/hadoop/src/hadoop/core_site.py

@@ -32,6 +32,7 @@ _CORE_SITE_DICT = None                  # A dictionary of name/value config opti
 _CNF_TRASH_INTERVAL = 'fs.trash.interval'
 _CNF_TRASH_INTERVAL = 'fs.trash.interval'
 _CNF_S3A_ACCESS_KEY = 'fs.s3a.access.key'
 _CNF_S3A_ACCESS_KEY = 'fs.s3a.access.key'
 _CNF_S3A_SECRET_KEY = 'fs.s3a.secret.key'
 _CNF_S3A_SECRET_KEY = 'fs.s3a.secret.key'
+_CNF_S3A_SESSION_TOKEN = 'fs.s3a.session.token'
 
 
 _CNF_ADLS_CLIENT_ID = 'dfs.adls.oauth2.client.id'
 _CNF_ADLS_CLIENT_ID = 'dfs.adls.oauth2.client.id'
 _CNF_ADLS_AUTHENTICATION_CODE = 'dfs.adls.oauth2.credential'
 _CNF_ADLS_AUTHENTICATION_CODE = 'dfs.adls.oauth2.credential'
@@ -39,6 +40,8 @@ _CNF_ADLS_SECRET_KEY = 'dfs.adls.oauth2.credential'
 _CNF_ADLS_REFRESH_URL = 'dfs.adls.oauth2.refresh.url'
 _CNF_ADLS_REFRESH_URL = 'dfs.adls.oauth2.refresh.url'
 _CNF_ADLS_GRANT_TYPE = 'dfs.adls.oauth2.access.token.provider.type'
 _CNF_ADLS_GRANT_TYPE = 'dfs.adls.oauth2.access.token.provider.type'
 
 
+_CNF_SECURITY = 'hadoop.security.authentication'
+
 def reset():
 def reset():
   """Reset the cached conf"""
   """Reset the cached conf"""
   global _CORE_SITE_DICT
   global _CORE_SITE_DICT
@@ -98,6 +101,9 @@ def get_s3a_secret_key():
   """
   """
   return get_conf().get(_CNF_S3A_SECRET_KEY)
   return get_conf().get(_CNF_S3A_SECRET_KEY)
 
 
+def get_s3a_session_token():
+  return get_conf().get(_CNF_S3A_SESSION_TOKEN)
+
 def get_adls_client_id():
 def get_adls_client_id():
   """
   """
   Get ADLS client id
   Get ADLS client id
@@ -124,4 +130,7 @@ def get_adls_grant_type():
   Get ADLS provider type
   Get ADLS provider type
   https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html
   https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html
   """
   """
-  return get_conf().get(_CNF_ADLS_GRANT_TYPE)
+  return get_conf().get(_CNF_ADLS_GRANT_TYPE)
+
+def is_kerberos_enabled():
+  return get_conf().get(_CNF_SECURITY) == 'kerberos'