Przeglądaj źródła

HUE-7248 [adls] fix test test__get_fs & test__get_fs_pair

jdesjean 8 lat temu
rodzic
commit
8c40a88

+ 5 - 2
apps/filebrowser/src/filebrowser/settings.py

@@ -21,7 +21,10 @@ REQUIRES_HADOOP = False
 ICON = "filebrowser/art/icon_filebrowser_48.png"
 MENU_INDEX = 20
 
+PERMISSION_ACTION_S3 = "s3_access"
+PERMISSION_ACTION_ADLS = "adls_access"
+
 PERMISSION_ACTIONS = (
-  ("s3_access", "Access to S3 from filebrowser and filepicker."),
-  ("adls_access", "Access to ADLS from filebrowser and filepicker.")
+  (PERMISSION_ACTION_S3, "Access to S3 from filebrowser and filepicker."),
+  (PERMISSION_ACTION_ADLS, "Access to ADLS from filebrowser and filepicker.")
 )

+ 20 - 26
desktop/core/src/desktop/lib/fs/proxyfs.py

@@ -16,18 +16,9 @@
 
 from __future__ import absolute_import
 
-import posixpath
-import errno
-
 from urlparse import urlparse
-
 from django.contrib.auth.models import User
 
-from aws.conf import has_s3_access
-from aws.s3 import S3A_ROOT
-from aws.s3.s3fs import S3FileSystemException
-
-
 class ProxyFS(object):
 
   def __init__(self, filesystems_dict, default_scheme):
@@ -41,10 +32,7 @@ class ProxyFS(object):
     self._default_fs = self._fs_dict[self._default_scheme]
 
   def __getattr__(self, item):
-    if hasattr(self, "_default_fs"):
-      return getattr(object.__getattribute__(self, "_default_fs"), item)
-    else:
-      raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, item))
+    return getattr(object.__getattribute__(self, "_default_fs"), item)
 
   def __setattr__(self, key, value):
     if hasattr(self, "_default_fs") and hasattr(self._default_fs, key):
@@ -53,34 +41,40 @@ class ProxyFS(object):
       object.__setattr__(self, key, value)
 
   def _get_scheme(self, path):
-    if path.lower().startswith(S3A_ROOT):
-      from desktop.auth.backend import rewrite_user # Avoid cyclic loop
-      try:
-        user = User.objects.get(username=self.user)
-        if not has_s3_access(rewrite_user(user)):
-          raise S3FileSystemException("Missing permissions for %s on %s" % (self.user, path,))
-      except User.DoesNotExist:
-        raise S3FileSystemException("Can't check permissions for %s on %s" % (self.user, path))
-
     split = urlparse(path)
     return split.scheme if split.scheme else self._default_scheme
 
+  def _has_access(self, fs):
+    from desktop.auth.backend import rewrite_user  # Avoid cyclic loop
+    try:
+      user = rewrite_user(User.objects.get(username=self.user))
+      filebrowser_action = fs.filebrowser_action()
+      return user.is_authenticated() and user.is_active and (user.is_superuser or not filebrowser_action or user.has_hue_permission(action=filebrowser_action, app="filebrowser"))
+    except User.DoesNotExist:
+      LOG.exception('proxyfs.has_access()')
+      return False
+
   def _get_fs(self, path):
     scheme = self._get_scheme(path)
     if not scheme:
-      raise S3FileSystemException('Can not figure out scheme for path "%s"' % path)
+      raise IOError('Can not figure out scheme for path "%s"' % path)
     try:
-      return self._fs_dict[scheme]
+      fs = self._fs_dict[scheme]
+      if (self._has_access(fs)):
+        return fs
+      else:
+        raise IOError("Missing permissions for %s on %s" % (self.user, path))
     except KeyError:
-      raise S3FileSystemException('Unknown scheme %s, available schemes: %s' % (scheme, self._fs_dict.keys()))
+      raise IOError('Unknown scheme %s, available schemes: %s' % (scheme, self._fs_dict.keys()))
 
   def _get_fs_pair(self, src, dst):
     """
     Returns two FS for source and destination paths respectively.
     If `dst` is not self-contained path assumes it's relative path to `src`.
     """
+
     src_fs = self._get_fs(src)
-    dst_scheme = self._get_scheme(dst)
+    dst_scheme = urlparse(dst).scheme
     if not dst_scheme:
       return src_fs, src_fs
     return src_fs, self._get_fs(dst)

+ 56 - 19
desktop/core/src/desktop/lib/fs/proxyfs_test.py

@@ -21,6 +21,7 @@ from nose.tools import assert_raises, assert_false, eq_
 from nose import SkipTest
 
 from django.contrib.auth.models import User
+from desktop.auth.backend import rewrite_user
 
 from desktop.lib.fs import ProxyFS
 from desktop.lib.django_test_util import make_logged_in_client
@@ -36,9 +37,10 @@ def test_fs_selection():
   make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
   user = User.objects.get(username='test')
   add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
+  add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
 
-  s3fs, hdfs = MagicMock(), MagicMock()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs')
+  s3fs, adls, hdfs = MagicMock("s3_access"), MagicMock("adls_access"), MagicMock()
+  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
   proxy_fs.setuser(user)
 
   proxy_fs.isdir('s3a://bucket/key')
@@ -49,34 +51,43 @@ def test_fs_selection():
   hdfs.isfile.assert_called_once_with('hdfs://localhost:42/user/alice/file')
   assert_false(s3fs.isfile.called)
 
+  proxy_fs.isdir('adl://net/key')
+  s3fs.isdir.assert_called_once_with('adl://net/key')
+  assert_false(hdfs.isdir.called)
+
+  proxy_fs.isdir('adl:/key')
+  s3fs.isdir.assert_called_once_with('adl:/key')
+  assert_false(hdfs.isdir.called)
+
   proxy_fs.open('/user/alice/file')
   hdfs.open.assert_called_once_with('/user/alice/file')
   assert_false(s3fs.open.called)
 
   assert_raises(IOError, proxy_fs.stats, 'ftp://host')
-  assert_raises(IOError, proxy_fs.stats, 's3//bucket/key')
 
 
 # TODO: remove after HUE-2947 is resolved
 def test__get_fs():
   make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
   user = User.objects.get(username='test')
-
   add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
+  add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
 
-  s3fs, hdfs = MockFs(), MockFs()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs')
+  s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs()
+  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
   proxy_fs.setuser(user)
 
   f = proxy_fs._get_fs
 
   eq_(f('s3a://bucket'), s3fs)
   eq_(f('S3A://bucket/key'), s3fs)
-  eq_(f('hdfs://path'), hdfs)
+  eq_(f('adl:/path'), adls)
+  eq_(f('adl://net/path'), adls)
+  eq_(f('hdfs:/path'), hdfs)
+  eq_(f('hdfs://net/path'), hdfs)
   eq_(f('/tmp'), hdfs)
 
   assert_raises(IOError, f, 'ftp://host')
-  assert_raises(IOError, f, 's3//bucket/key')
 
 
 def test_multi_fs_selection():
@@ -88,9 +99,10 @@ def test_multi_fs_selection():
   make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
   user = User.objects.get(username='test')
   add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
+  add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
 
-  s3fs, hdfs = MagicMock(), MagicMock()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs')
+  s3fs, adls, hdfs = MagicMock("s3_access"), MagicMock("adls_access"), MagicMock()
+  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
   proxy_fs.setuser(user)
 
   proxy_fs.copy('s3a://bucket1/key', 's3a://bucket2/key')
@@ -101,6 +113,14 @@ def test_multi_fs_selection():
   s3fs.copyfile.assert_called_once_with('s3a://bucket/key', 'key2')
   assert_false(hdfs.copyfile.called)
 
+  proxy_fs.copyfile('adl://net/key', 'key2')
+  s3fs.copyfile.assert_called_once_with('adl://net/key', 'key2')
+  assert_false(hdfs.copyfile.called)
+
+  proxy_fs.copyfile('adl:/key', 'key2')
+  s3fs.copyfile.assert_called_once_with('adl:/key', 'key2')
+  assert_false(hdfs.copyfile.called)
+
   proxy_fs.rename('/tmp/file', 'shmile')
   hdfs.rename.assert_called_once_with('/tmp/file', 'shmile')
   assert_false(s3fs.rename.called)
@@ -114,19 +134,21 @@ def test__get_fs_pair():
   make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
   user = User.objects.get(username='test')
   add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
+  add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
 
-  s3fs, hdfs = MockFs(), MockFs()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs')
+  s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs()
+  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
   proxy_fs.setuser(user)
 
   f = proxy_fs._get_fs_pair
 
   eq_(f('s3a://bucket1/key', 's3a://bucket2/key'), (s3fs, s3fs))
   eq_(f('s3a://bucket/key', 'key2'), (s3fs, s3fs))
+  eq_(f('adl://net/key', 'key2'), (adls, adls))
+  eq_(f('adl:/key', 'key2'), (adls, adls))
   eq_(f('/tmp/file', 'shmile'), (hdfs, hdfs))
 
   assert_raises(IOError, f, 'ftp://host', 'key2')
-  assert_raises(IOError, f, 's3//bucket/key', 'hdfs://normal/path')
 
 
 def test_constructor_given_invalid_arguments():
@@ -135,10 +157,15 @@ def test_constructor_given_invalid_arguments():
 
 
 class MockFs():
-  def __init__(self):
+  def __init__(self, filebrowser_action=None):
     self.user = None
+    self._filebrowser_action = filebrowser_action
+
+  def setuser(self, user):
+    self.user = user
+  def filebrowser_action(self):
+    return self._filebrowser_action
 
-  def setuser(self, user): self.user = user
 
 
 class TestFsPermissions(object):
@@ -147,41 +174,51 @@ class TestFsPermissions(object):
     user_client = make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
     user = User.objects.get(username='test')
 
-    proxy_fs = ProxyFS({'s3a': MockFs(), 'hdfs': MockFs()}, 'hdfs')
+    s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs()
+    proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
     proxy_fs.setuser(user)
 
     f = proxy_fs._get_fs
 
     remove_from_group(user.username, 'has_s3')
+    remove_from_group(user.username, 'has_adls')
 
     # No perms by default
     assert_raises(Exception, f, 's3a://bucket')
     assert_raises(Exception, f, 'S3A://bucket/key')
+    assert_raises(Exception, f, 'adl://net/key')
+    assert_raises(Exception, f, 'adl:/key')
     f('hdfs://path')
     f('/tmp')
 
     try:
       # Add perm
-      add_permission(user.username, 'has_s3', permname='s3_access', appname='filebrowser')
+      add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
+      add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
 
       f('s3a://bucket')
       f('S3A://bucket/key')
+      f('adl://net/key')
+      f('adl:/key')
       f('hdfs://path')
       f('/tmp')
     finally:
       remove_from_group(user.username, 'has_s3')
-
+      remove_from_group(user.username, 'has_adls')
 
   def test_fs_permissions_admin_user(self):
     user_client = make_logged_in_client(username='admin', groupname='default', recreate=True, is_superuser=True)
     user = User.objects.get(username='admin')
 
-    proxy_fs = ProxyFS({'s3a': MockFs(), 'hdfs': MockFs()}, 'hdfs')
+    s3fs, adls, hdfs = MockFs("s3_access"), MockFs("adls_access"), MockFs()
+    proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs, 'adl': adls}, 'hdfs')
     proxy_fs.setuser(user)
 
     f = proxy_fs._get_fs
 
     f('s3a://bucket')
     f('S3A://bucket/key')
+    f('adl://net/key')
+    f('adl:/key')
     f('hdfs://path')
     f('/tmp')

+ 1 - 1
desktop/core/src/desktop/templates/assist.mako

@@ -470,7 +470,7 @@ from notebook.conf import ENABLE_QUERY_BUILDER, ENABLE_QUERY_SCHEDULING, get_ord
     <div class="assist-db-header-actions">
       <a class="inactive-action" href="javascript:void(0)" data-bind="click: goHome" title="Go to ${ home_dir }"><i class="pointer fa fa-home"></i></a>
       <a class="inactive-action" data-bind="dropzone: {
-            url: '/filebrowser/upload/file?dest=adl:/' + path,
+            url: '/filebrowser/upload/file?dest=adl:' + path,
             params: { dest: path },
             paramName: 'hdfs_file',
             onError: function(x, e){ $(document).trigger('error', e); },

+ 7 - 0
desktop/libs/aws/src/aws/s3/s3fs.py

@@ -35,6 +35,8 @@ from aws.conf import get_default_region, get_locations
 from aws.s3 import normpath, s3file, translate_s3_error, S3A_ROOT
 from aws.s3.s3stat import S3Stat
 
+from filebrowser.settings import PERMISSION_ACTION_S3
+
 
 DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
 
@@ -76,6 +78,7 @@ class S3FileSystem(object):
   def __init__(self, s3_connection):
     self._s3_connection = s3_connection
     self._bucket_cache = None
+    self._filebrowser_action = PERMISSION_ACTION_S3
 
   def _init_bucket_cache(self):
     if self._bucket_cache is None:
@@ -342,6 +345,10 @@ class S3FileSystem(object):
   def restore(self, *args, **kwargs):
     raise NotImplementedError(_('Moving to trash is not implemented for S3'))
 
+  def filebrowser_action(self):
+    return self._filebrowser_action
+
+
   @translate_s3_error
   @auth_error_handler
   def mkdir(self, path, *args, **kwargs):

+ 6 - 1
desktop/libs/azure/src/azure/adls/webhdfs.py

@@ -21,6 +21,7 @@ Interfaces for ADLS via HttpFs/WebHDFS
 import logging
 import threading
 
+from filebrowser.settings import PERMISSION_ACTION_ADLS
 from hadoop.fs.webhdfs import WebHdfs as HadoopWebHdfs
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.hdfs_site import get_umask_mode
@@ -59,6 +60,7 @@ class WebHdfs(HadoopWebHdfs):
     self._netloc = split.netloc
     self._is_remote = True
     self._has_trash_support = False
+    self._filebrowser_action = PERMISSION_ACTION_ADLS
 
     self._client = http_client.HttpClient(url, exc_class=WebHdfsException, logger=LOG)
     self._root = resource.Resource(self._client)
@@ -88,4 +90,7 @@ class WebHdfs(HadoopWebHdfs):
     }
 
   def get_upload_chuck_size(self):
-    return UPLOAD_CHUCK_SIZE
+    return UPLOAD_CHUCK_SIZE
+
+  def filebrowser_action(self):
+    return self._filebrowser_action

+ 3 - 0
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py

@@ -886,6 +886,9 @@ class WebHdfs(Hdfs):
 
     self.do_as_user(username, self.rename, tmp_file, dst)
 
+  def filebrowser_action(self):
+    return None
+
 
 class File(object):
   """