浏览代码

HUE-5325 [aws] Enable S3 browser on only when keys are there

Permission is in FileBrowser, not the AWS lib.
Do not add S3 permissions to the default group.
Fix the logic to block S3 access via File Browser if the user does not have
the permissions or is not a Hue admin.
Romain Rigaux 9 年之前
父节点
当前提交
7c31aa1

+ 1 - 3
apps/filebrowser/src/filebrowser/api.py

@@ -23,8 +23,6 @@ from desktop.lib.django_util import JsonResponse
 from desktop.lib.fsmanager import FS_GETTERS
 from desktop.lib.i18n import smart_unicode
 
-from aws.conf import has_s3_access
-
 
 LOG = logging.getLogger(__name__)
 
@@ -48,7 +46,7 @@ def get_filesystems(request):
 
   filesystems = {}
   for k, v in FS_GETTERS.items():
-    if k.startswith('s3') and has_s3_access(request.user):
+    if k.startswith('s3'):
       filesystems[k] = v is not None
     else:
       filesystems[k] = v is not None

+ 1 - 0
apps/useradmin/src/useradmin/models.py

@@ -280,6 +280,7 @@ def update_app_permissions(**kwargs):
            not (new_dp.app == 'metastore' and new_dp.action == 'write') and \
            not (new_dp.app == 'hbase' and new_dp.action == 'write') and \
            not (new_dp.app == 'security' and new_dp.action == 'impersonate') and \
+           not (new_dp.app == 'filebrowser' and new_dp.action == 's3_access') and \
            not (new_dp.app == 'oozie' and new_dp.action == 'disable_editor_access'):
           GroupPermission.objects.create(group=default_group, hue_permission=new_dp)
 

+ 19 - 2
desktop/core/src/desktop/lib/fs/proxyfs.py

@@ -21,9 +21,15 @@ import errno
 
 from urlparse import urlparse
 
+from django.contrib.auth.models import User
+
+from aws.conf import has_s3_access
+from aws.s3 import S3A_ROOT
+
 
 class ProxyFS(object):
-  def __init__(self, filesystems_dict, default_scheme):
+
+  def __init__(self, filesystems_dict, default_scheme, default_user=None):
     if default_scheme not in filesystems_dict:
       raise ValueError(
         'Default scheme "%s" is not a member of provided schemes: %s' % (default_scheme, filesystems_dict.keys()))
@@ -32,6 +38,7 @@ class ProxyFS(object):
     self._fs_set = set(self._fs_dict.values())
     self._default_scheme = default_scheme
     self._default_fs = self._fs_dict[self._default_scheme]
+    self.user = default_user
 
   def __getattr__(self, item):
     if hasattr(self, "_default_fs"):
@@ -46,6 +53,15 @@ class ProxyFS(object):
       object.__setattr__(self, key, value)
 
   def _get_scheme(self, path):
+    if path.lower().startswith(S3A_ROOT):
+      from desktop.auth.backend import rewrite_user # Avoid cyclic loop
+      try:
+        user = User.objects.get(username=self.user)
+        if not has_s3_access(rewrite_user(user)):
+          raise IOError(errno.EPERM, "Missing permissions for %s" % path)
+      except User.DoesNotExist:
+        raise IOError(errno.EPERM, "Can't check permissions for %s on %s" % (self.user, path))
+
     split = urlparse(path)
     if split.scheme:
       return split.scheme
@@ -73,8 +89,9 @@ class ProxyFS(object):
     return src_fs, self._get_fs(dst)
 
   def setuser(self, user):
-    """Set a new user. Return the current user."""
+    """Set a new user. Return the past current user."""
     curr = self.user
+    self.user = user
     for fs in self._fs_set:
       fs.setuser(user)
     return curr

+ 71 - 4
desktop/core/src/desktop/lib/fs/proxyfs_test.py

@@ -20,7 +20,11 @@ from __future__ import absolute_import
 from nose.tools import assert_raises, assert_false, eq_
 from nose import SkipTest
 
+from django.contrib.auth.models import User
+
 from desktop.lib.fs import ProxyFS
+from desktop.lib.django_test_util import make_logged_in_client
+from desktop.lib.test_utils import add_permission, remove_from_group
 
 
 def test_fs_selection():
@@ -29,8 +33,11 @@ def test_fs_selection():
   except ImportError:
     raise SkipTest("Skips until HUE-2947 is resolved")
 
+  make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
+  add_permission('test', 'test', permname='s3_access', appname='filebrowser')
+
   s3fs, hdfs = MagicMock(), MagicMock()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs')
+  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs', default_user='test')
 
   proxy_fs.isdir('s3a://bucket/key')
   s3fs.isdir.assert_called_once_with('s3a://bucket/key')
@@ -50,8 +57,11 @@ def test_fs_selection():
 
 # TODO: remove after HUE-2947 is resolved
 def test__get_fs():
+  make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
+  add_permission('test', 'test', permname='s3_access', appname='filebrowser')
+
   s3fs, hdfs = 'fake_s3', 'fake_hdfs'
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs')
+  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs', default_user='test')
   f = proxy_fs._get_fs
 
   eq_(f('s3a://bucket'), s3fs)
@@ -68,8 +78,12 @@ def test_multi_fs_selection():
     from mock import MagicMock
   except ImportError:
     raise SkipTest("Skips until HUE-2947 is resolved")
+
+  make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
+  add_permission('test', 'test', permname='s3_access', appname='filebrowser')
+
   s3fs, hdfs = MagicMock(), MagicMock()
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs')
+  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs', default_user='test')
 
   proxy_fs.copy('s3a://bucket1/key', 's3a://bucket2/key')
   s3fs.copy.assert_called_once_with('s3a://bucket1/key', 's3a://bucket2/key')
@@ -89,8 +103,12 @@ def test_multi_fs_selection():
 
 # TODO: remove after HUE-2947 is resolved
 def test__get_fs_pair():
+  make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
+  add_permission('test', 'test', permname='s3_access', appname='filebrowser')
+
   s3fs, hdfs = 'fake_s3', 'fake_hdfs'
-  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs')
+  proxy_fs = ProxyFS({'s3a': s3fs, 'hdfs': hdfs}, 'hdfs', default_user='test')
+
   f = proxy_fs._get_fs_pair
 
   eq_(f('s3a://bucket1/key', 's3a://bucket2/key'), (s3fs, s3fs))
@@ -103,3 +121,52 @@ def test__get_fs_pair():
 
 def test_constructor_given_invalid_arguments():
   assert_raises(ValueError, ProxyFS, {'s3a': {}}, 'hdfs')
+
+
+
+class MockFs():
+  def setuser(self, user): pass
+
+
+class TestFsPermissions(object):
+
+  def test_fs_permissions_regular_user(self):
+    user_client = make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
+    user = User.objects.get(username='test')
+
+    proxy_fs = ProxyFS({'s3a': MockFs(), 'hdfs': MockFs()}, 'hdfs')
+    f = proxy_fs._get_fs
+
+    proxy_fs.setuser(user)
+
+    # No perms by default
+    assert_raises(Exception, f, 's3a://bucket')
+    assert_raises(Exception, f, 'S3A://bucket/key')
+    f('hdfs://path')
+    f('/tmp')
+
+    try:
+      # Add perm
+      add_permission(user.username, 'has_s3', permname='s3_access', appname='filebrowser')
+
+      f('s3a://bucket')
+      f('S3A://bucket/key')
+      f('hdfs://path')
+      f('/tmp')
+    finally:
+      remove_from_group('test', 'has_s3')
+
+
+  def test_fs_permissions_admin_user(self):
+    user_client = make_logged_in_client(username='admin', groupname='default', recreate=True, is_superuser=True)
+    user = User.objects.get(username='admin')
+
+    proxy_fs = ProxyFS({'s3a': MockFs(), 'hdfs': MockFs()}, 'hdfs')
+    f = proxy_fs._get_fs
+
+    proxy_fs.setuser(user)
+
+    f('s3a://bucket')
+    f('S3A://bucket/key')
+    f('hdfs://path')
+    f('/tmp')

+ 1 - 1
desktop/core/src/desktop/lib/fsmanager.py

@@ -21,7 +21,7 @@ import sys
 import logging
 
 import aws
-from aws.conf import is_default_configured as is_s3_enabled
+from aws.conf import is_enabled as is_s3_enabled
 
 from desktop.lib.fs import ProxyFS
 from hadoop import cluster

+ 1 - 1
desktop/core/src/desktop/settings.py

@@ -33,7 +33,7 @@ import desktop.redaction
 from desktop.lib.paths import get_desktop_root
 from desktop.lib.python_util import force_dict_to_strings
 
-from aws.conf import is_default_configured as is_s3_enabled
+from aws.conf import is_enabled as is_s3_enabled
 
 
 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)

+ 1 - 1
desktop/core/src/desktop/tests.py

@@ -1077,7 +1077,7 @@ def test_session_secure_cookie():
       finally:
         for reset in resets:
           reset()
-
+ 
       resets = [
         desktop.conf.SSL_CERTIFICATE.set_for_testing(present=None),
         desktop.conf.SSL_PRIVATE_KEY.set_for_testing(present=None),

+ 3 - 3
desktop/libs/aws/src/aws/__init__.py

@@ -46,6 +46,6 @@ def _make_client(identifier):
   return Client.from_config(client_conf)
 
 
-def get_s3fs(identifier='default'):
-   connection = get_client(identifier).get_s3_connection()
-   return S3FileSystem(connection)
+def get_s3fs(user, identifier='default'):
+  connection = get_client(identifier).get_s3_connection()
+  return S3FileSystem(connection)

+ 3 - 11
desktop/libs/aws/src/aws/conf.py

@@ -98,11 +98,7 @@ AWS_ACCOUNTS = UnspecifiedConfigSection(
 
 
 def is_enabled():
-  return 'default' in AWS_ACCOUNTS.keys() and AWS_ACCOUNTS['default'].get_raw()
-
-
-def is_default_configured():
-  return is_enabled() and (AWS_ACCOUNTS['default'].ACCESS_KEY_ID.get() is not None or has_iam_metadata())
+  return 'default' in AWS_ACCOUNTS.keys() and AWS_ACCOUNTS['default'].get_raw() and (AWS_ACCOUNTS['default'].ACCESS_KEY_ID.get() is not None or has_iam_metadata())
 
 
 def has_iam_metadata():
@@ -111,18 +107,14 @@ def has_iam_metadata():
 
 
 def has_s3_access(user):
-  return user.is_authenticated and user.is_active and \
-         (user.is_superuser or user.has_hue_permission(action="s3_access", app="filebrowser"))
+  return not user.is_anonymous() and (user.is_superuser or user.has_hue_permission(action="s3_access", app="filebrowser")) and is_enabled()
 
 
 def config_validator(user):
   res = []
 
   if is_enabled():
-    if not is_default_configured():  # Make a redundant call to is_enabled so that we only check default if it's non-empty
-      res.append(('aws.aws_accounts', 'Default AWS account is not configured'))
-
-    regions = get_regions('s3')  # S3 is only supported service so far
+    regions = get_regions('s3')
     region_names = [r.name for r in regions]
 
     for name in AWS_ACCOUNTS.keys():

+ 1 - 1
desktop/libs/aws/src/aws/s3/upload.py

@@ -117,7 +117,7 @@ class S3FileUploadHandler(FileUploadHandler):
     try:
       fs = request.fs
     except AttributeError:
-      fs = get_s3fs()
+      fs = get_s3fs(request.user)
 
     if not fs:
       raise S3FileUploadError(_("No S3 filesystem found."))