Quellcode durchsuchen

[core] Use dedicated home_directory methods and introduce DEFAULT_HOME_PATH config for supported FS (#3742)

## What changes were proposed in this pull request?

- If two or more filesystems were configured at the same time in Hue like S3 and ABFS and let's say, `REMOTE_STORAGE_HOME` is set to `s3a://test_bucket/dir`, then earlier it defaults user to same `s3a://test_bucket/dir` path when user clicks both the S3 and ABFS left-nav icons.
- Using the dedicated home directory method helps navigating this issue as for above scenario, the user will default to `s3a://test_bucket/dir` when S3 icon is clicked but will be default to `abfs://` when ABFS icon is clicked.



- To improve it further, now the user can set `default_home_path` per FS level so user can default to `s3a://test_bucket/dir` for S3 icon and `abfs://test_container/dir` for ABFS icon. 
- For backward compatibility, `REMOTE_STORAGE_HOME` will still hold priority over per FS level `DEFAULT_HOME_PATH`.



- This PR also removes unnecessary encoding, and improved few home_diectory related methods. 
- For RAZ env, if now someone sets `REMOTE_STORAGE_HOME` or `DEFAULT_HOME_PATH` path ending with `/user` or `/user/`, then we are now appending the username for both scenarios to reduce misconfigurations.

## How was this patch tested?

- Tested E2E in live setup.
- Added new and updated existing unit tests.
Harsh Gupta vor 1 Jahr
Ursprung
Commit
7ee5a336a9

+ 11 - 11
apps/filebrowser/src/filebrowser/api.py

@@ -15,22 +15,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import os
 import logging
 import posixpath
-import os
 
 from django.http import HttpResponse
 from django.utils.translation import gettext as _
 
-from desktop.lib.django_util import JsonResponse
+from aws.s3.s3fs import get_s3_home_directory
+from azure.abfs.__init__ import get_abfs_home_directory
 from desktop.lib import fsmanager
-from desktop.lib.i18n import smart_unicode
-from desktop.lib.fs.ozone.ofs import get_ofs_home_directory
+from desktop.lib.django_util import JsonResponse
 from desktop.lib.fs.gc.gs import get_gs_home_directory
-
-from azure.abfs.__init__ import get_home_dir_for_abfs
-from aws.s3.s3fs import get_s3_home_directory
-
+from desktop.lib.fs.ozone.ofs import get_ofs_home_directory
+from desktop.lib.i18n import smart_unicode
 from filebrowser.views import _normalize_path
 
 LOG = logging.getLogger()
@@ -64,7 +62,7 @@ def get_filesystems(request):
 
 
 @error_handler
-def get_filesystems_with_home_dirs(request): # Using as a public API only for now
+def get_filesystems_with_home_dirs(request):  # Using as a public API only for now
   filesystems = []
   user_home_dir = ''
 
@@ -76,7 +74,7 @@ def get_filesystems_with_home_dirs(request): # Using as a public API only for no
     elif fs == 'gs':
       user_home_dir = get_gs_home_directory(request.user)
     elif fs == 'abfs':
-      user_home_dir = get_home_dir_for_abfs(request.user)
+      user_home_dir = get_abfs_home_directory(request.user)
     elif fs == 'ofs':
       user_home_dir = get_ofs_home_directory()
 
@@ -107,10 +105,11 @@ def touch(request):
 
   if name and (posixpath.sep in name):
     raise Exception(_("Error creating %s file. Slashes are not allowed in filename." % name))
-  
+
   request.fs.create(request.fs.join(path, name))
   return HttpResponse(status=200)
 
+
 @error_handler
 def rename(request):
   src_path = request.POST.get('src_path')
@@ -132,6 +131,7 @@ def rename(request):
   request.fs.rename(src_path, dest_path)
   return HttpResponse(status=200)
 
+
 @error_handler
 def content_summary(request, path):
   path = _normalize_path(path)

+ 1 - 0
apps/filebrowser/src/filebrowser/conf.py

@@ -84,6 +84,7 @@ REDIRECT_DOWNLOAD = Config(
   type=coerce_bool,
   default=False)
 
+# DEPRECATED in favor of DEFAULT_HOME_PATH per FS config level.
 REMOTE_STORAGE_HOME = Config(
   key="remote_storage_home",
   type=str,

+ 2 - 2
apps/filebrowser/src/filebrowser/views.py

@@ -284,8 +284,8 @@ def view(request, path):
 
   # default_abfs_home is set in jquery.filechooser.js
   if 'default_abfs_home' in request.GET:
-    from azure.abfs.__init__ import get_home_dir_for_abfs
-    home_dir_path = get_home_dir_for_abfs(request.user)
+    from azure.abfs.__init__ import get_abfs_home_directory
+    home_dir_path = get_abfs_home_directory(request.user)
     if request.fs.isdir(home_dir_path):
       return format_preserving_redirect(
           request,

+ 9 - 0
desktop/conf.dist/hue.ini

@@ -1002,6 +1002,9 @@ tls=no
 # The JSON credentials to authenticate to Google Cloud e.g. '{ "type": "service_account", "project_id": .... }'
 ## json_credentials=None
 
+# Optionally set this for a different home directory path. e.g. gs://gethue-bucket/user
+## default_home_path=gs://<bucket_name>/<relative_path>
+
 ## Configuration for Ozone File System
 # ------------------------------------------------------------------------
 [[ozone]]
@@ -1712,6 +1715,7 @@ submit_to=True
 # Redirect client to WebHdfs or S3 for file download. Note: Turning this on will override notebook/redirect_whitelist for user selected file downloads on WebHdfs & S3.
 ## redirect_download=false
 
+# DEPRECATED in favor of default_home_path per FS config level.
 # Optionally set this if you want a different home directory path. e.g. s3a://gethue.
 ## remote_storage_home=s3a://gethue
 
@@ -2026,6 +2030,8 @@ submit_to=True
 # The time in seconds before a delegate key is expired. Used when filebrowser/redirect_download is used. Default to 4 Hours.
 ## key_expiry=14400
 
+# Optionally set this for a different home directory path. e.g. s3a://gethue-bucket/user
+## default_home_path=s3a://<bucket_name>/<relative_path>
 
 ###########################################################################
 # Settings for the Azure lib
@@ -2058,6 +2064,9 @@ submit_to=True
 ## fs_defaultfs=abfs://<container_name>@<account_name>.dfs.core.windows.net
 ## webhdfs_url=https://<account_name>.dfs.core.windows.net
 
+# Optionally set this for a different home directory path. e.g. abfs://gethue-container/user
+## default_home_path=abfs://<container_name>/<relative_path>
+
 ###########################################################################
 # Settings for the Sentry lib
 ###########################################################################

+ 10 - 0
desktop/conf/pseudo-distributed.ini.tmpl

@@ -986,6 +986,9 @@
       # The JSON credentials to authenticate to Google Cloud e.g. '{ "type": "service_account", "project_id": .... }'
       ## json_credentials=None
 
+      # Optionally set this for a different home directory path. e.g. gs://gethue-bucket/user
+      ## default_home_path=gs://<bucket_name>/<relative_path>
+
   ## Configuration for Ozone File System
   # ------------------------------------------------------------------------
   [[ozone]]
@@ -1695,6 +1698,7 @@
   # Redirect client to WebHdfs or S3 for file download. Note: Turning this on will override notebook/redirect_whitelist for user selected file downloads on WebHdfs & S3.
   ## redirect_download=false
 
+  # DEPRECATED in favor of default_home_path per FS config level.
   # Optionally set this if you want a different home directory path. e.g. s3a://gethue.
   ## remote_storage_home=s3a://gethue
 
@@ -2010,6 +2014,9 @@
       # The time in seconds before a delegate key is expired. Used when filebrowser/redirect_download is used. Default to 4 Hours.
       ## key_expiry=14400
 
+      # Optionally set this for a different home directory path. e.g. s3a://gethue-bucket/user
+      ## default_home_path=s3a://<bucket_name>/<relative_path>
+
 
 ###########################################################################
 # Settings for the Azure lib
@@ -2042,6 +2049,9 @@
       ## fs_defaultfs=abfs://<container_name>@<account_name>.dfs.core.windows.net
       ## webhdfs_url=https://<account_name>.dfs.core.windows.net
 
+      # Optionally set this for a different home directory path. e.g. abfs://gethue-container/user
+      ## default_home_path=abfs://<container_name>/<relative_path>
+
 ###########################################################################
 # Settings for the Sentry lib
 ###########################################################################

+ 7 - 1
desktop/core/src/desktop/conf.py

@@ -2804,7 +2804,13 @@ GC_ACCOUNTS = UnspecifiedConfigSection(
         key='json_credentials',
         type=str,
         default=None,
-      )
+      ),
+      DEFAULT_HOME_PATH=Config(
+        key="default_home_path",
+        type=str,
+        default=None,
+        help="Optionally set this for a different home directory path. e.g. gs://gethue"
+      ),
     )
   )
 )

+ 23 - 19
desktop/core/src/desktop/lib/fs/gc/gs.py

@@ -16,34 +16,31 @@
 # limitations under the License.
 import os
 import re
+import time
 import logging
 import posixpath
-import time
 
 from boto.exception import BotoClientError, GSResponseError
 from boto.gs.connection import Location
 from boto.gs.key import Key
-
 from boto.s3.prefix import Prefix
 from django.utils.translation import gettext as _
 
-from desktop.conf import PERMISSION_ACTION_GS, is_raz_gs
-from desktop.lib.fs.gc import GS_ROOT, abspath, parse_uri, translate_gs_error, normpath, join as gs_join
-from desktop.lib.fs.gc.gsstat import GSStat
+from aws.s3.s3fs import S3FileSystem
+from desktop.conf import GC_ACCOUNTS, PERMISSION_ACTION_GS, is_raz_gs
+from desktop.lib.fs.gc import GS_ROOT, abspath, join as gs_join, normpath, parse_uri, translate_gs_error
 from desktop.lib.fs.gc.gsfile import open as gsfile_open
-
+from desktop.lib.fs.gc.gsstat import GSStat
 from filebrowser.conf import REMOTE_STORAGE_HOME
 
-from aws.s3.s3fs import S3FileSystem
-
-
 DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
 BUCKET_NAME_PATTERN = re.compile(
-  "^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9_\-]*[a-zA-Z0-9])\.)*(?:[A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9_\-]*[A-Za-z0-9]))$")
+r"^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9_\-]*[a-zA-Z0-9])\.)*(?:[A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9_\-]*[A-Za-z0-9]))$")
 
 
 LOG = logging.getLogger()
 
+
 class GSFileSystemException(IOError):
   def __init__(self, *args, **kwargs):
     super(GSFileSystemException, self).__init__(*args, **kwargs)
@@ -81,9 +78,16 @@ def auth_error_handler(view_fn):
 def get_gs_home_directory(user=None):
   from desktop.models import _handle_user_dir_raz
 
-  remote_home_gs = 'gs://'
+  # REMOTE_STORAGE_HOME is deprecated in favor of DEFAULT_HOME_PATH per FS config level.
+  # But for backward compatibility, we are still giving preference to REMOTE_STORAGE_HOME path first and if it's not set,
+  # then check for DEFAULT_HOME_PATH which is set per FS config block. This helps in setting diff DEFAULT_HOME_PATH for diff FS in Hue.
+
   if hasattr(REMOTE_STORAGE_HOME, 'get') and REMOTE_STORAGE_HOME.get() and REMOTE_STORAGE_HOME.get().startswith('gs://'):
     remote_home_gs = REMOTE_STORAGE_HOME.get()
+  elif 'default' in GC_ACCOUNTS and GC_ACCOUNTS['default'].DEFAULT_HOME_PATH.get() and GC_ACCOUNTS['default'].DEFAULT_HOME_PATH.get().startswith('gs://'):
+    remote_home_gs = GC_ACCOUNTS['default'].DEFAULT_HOME_PATH.get()
+  else:
+    remote_home_gs = 'gs://'
 
   remote_home_gs = _handle_user_dir_raz(user, remote_home_gs)
 
@@ -100,7 +104,7 @@ class GSFileSystem(S3FileSystem):
       headers=headers,
       filebrowser_action=filebrowser_action
     )
-  
+
   @staticmethod
   def join(*comp_list):
     return gs_join(*comp_list)
@@ -156,7 +160,7 @@ class GSFileSystem(S3FileSystem):
 
     Returns:
       GSStat: An object representing the stats of the file or directory.
-    
+
     Raises:
       GSFileSystemException: If the file or directory does not exist.
     """
@@ -347,7 +351,7 @@ class GSFileSystem(S3FileSystem):
   def _stats(self, path):
     if GSFileSystem.isroot(path):
       return GSStat.for_gs_root()
-    
+
     try:
       key = self._get_key(path)
     except BotoClientError as e:
@@ -359,7 +363,7 @@ class GSFileSystem(S3FileSystem):
         raise GSFileSystemException(_('User is not authorized to access path: "%s"') % path)
       else:
         raise GSFileSystemException(_('Failed to access path "%s": %s') % (path, e.reason))
-    except Exception as e: # SSL errors show up here, because they've been remapped in boto
+    except Exception as e:  # SSL errors show up here, because they've been remapped in boto
       raise GSFileSystemException(_('Failed to access path "%s": %s') % (path, str(e)))
 
     if key is None:
@@ -367,9 +371,9 @@ class GSFileSystem(S3FileSystem):
       bucket = self._get_bucket(bucket_name)
 
       key = Key(bucket, key_name)
-    
+
     return self._stats_key(key, self.fs)
-  
+
   @staticmethod
   def _stats_key(key, fs='gs'):
     if key.size is not None:
@@ -402,7 +406,7 @@ class GSFileSystem(S3FileSystem):
     """
     src_st = self.stats(src)
     if src_st.isDir and not recursive:
-      return None # omitting directory
+      return None  # omitting directory
 
     # Check if the source is a directory and destination is not a directory
     dst = abspath(src, dst)
@@ -421,7 +425,7 @@ class GSFileSystem(S3FileSystem):
     src_bucket = self._get_bucket(src_bucket)
     dst_bucket = self._get_bucket(dst_bucket)
 
-    # Determine whether to keep the source basename when copying directories and 
+    # Determine whether to keep the source basename when copying directories and
     # calculate the cut-off length for key names accordingly.
     if keep_src_basename:
       cut = len(posixpath.dirname(src_key))  # cut of the parent directory name

+ 147 - 0
desktop/core/src/desktop/lib/fs/gc/gs_test.py

@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import pytest
+from django.test import TestCase
+
+from desktop.conf import GC_ACCOUNTS, RAZ, is_gs_enabled
+from desktop.lib.django_test_util import make_logged_in_client
+from desktop.lib.fs.gc.gs import get_gs_home_directory
+from desktop.lib.fsmanager import get_client
+from filebrowser.conf import REMOTE_STORAGE_HOME
+from useradmin.models import User
+
+LOG = logging.getLogger()
+
+
+@pytest.mark.django_db
+def test_get_gs_home_directory():
+  client = make_logged_in_client(username="test", groupname="test", recreate=True, is_superuser=False)
+  user = User.objects.get(username="test")
+
+  client_not_me = make_logged_in_client(username="test_not_me", groupname="test_not_me", recreate=True, is_superuser=False)
+  user_not_me = User.objects.get(username="test_not_me")
+
+  # When REMOTE_STORAGE_HOME ends with /user in RAZ GS environment.
+  resets = [RAZ.IS_ENABLED.set_for_testing(True), REMOTE_STORAGE_HOME.set_for_testing('gs://gethue-bucket/user')]
+
+  try:
+    default_gs_home_path = get_gs_home_directory(user)
+    assert default_gs_home_path == 'gs://gethue-bucket/user/test'
+
+    default_gs_home_path = get_gs_home_directory(user_not_me)
+    assert default_gs_home_path == 'gs://gethue-bucket/user/test_not_me'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When GS filesystem's DEFAULT_HOME_PATH ends with /user in RAZ GS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    GC_ACCOUNTS.set_for_testing({'default': {'default_home_path': 'gs://gethue-other-bucket/user'}}),
+  ]
+
+  try:
+    default_gs_home_path = get_gs_home_directory(user)
+    assert default_gs_home_path == 'gs://gethue-other-bucket/user/test'
+
+    default_gs_home_path = get_gs_home_directory(user_not_me)
+    assert default_gs_home_path == 'gs://gethue-other-bucket/user/test_not_me'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When GS filesystem's DEFAULT_HOME_PATH is set in non-RAZ GS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(False),
+    GC_ACCOUNTS.set_for_testing({'default': {'default_home_path': 'gs://gethue-other-bucket/test-dir'}}),
+  ]
+
+  try:
+    default_gs_home_path = get_gs_home_directory(user)
+    assert default_gs_home_path == 'gs://gethue-other-bucket/test-dir'
+
+    default_gs_home_path = get_gs_home_directory(user_not_me)
+    assert default_gs_home_path == 'gs://gethue-other-bucket/test-dir'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When both REMOTE_STORAGE_HOME and GS filesystem's DEFAULT_HOME_PATH are set in RAZ GS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    REMOTE_STORAGE_HOME.set_for_testing('gs://gethue-bucket/user'),
+    GC_ACCOUNTS.set_for_testing({'default': {'default_home_path': 'gs://gethue-other-bucket/user'}}),
+  ]
+
+  try:
+    # Gives preference to REMOTE_STORAGE_HOME for of backward compatibility.
+    default_gs_home_path = get_gs_home_directory(user)
+    assert default_gs_home_path == 'gs://gethue-bucket/user/test'
+
+    default_gs_home_path = get_gs_home_directory(user_not_me)
+    assert default_gs_home_path == 'gs://gethue-bucket/user/test_not_me'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When GS filesystem's DEFAULT_HOME_PATH is set but path does not end with ../user or ../user/ in RAZ GS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    GC_ACCOUNTS.set_for_testing({'default': {'default_home_path': 'gs://gethue-other-bucket/dir'}}),
+  ]
+
+  try:
+    default_gs_home_path = get_gs_home_directory(user)
+    assert default_gs_home_path == 'gs://gethue-other-bucket/dir'
+
+    default_gs_home_path = get_gs_home_directory(user_not_me)
+    assert default_gs_home_path == 'gs://gethue-other-bucket/dir'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When some different path is set in both RAZ and non-RAZ GS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    REMOTE_STORAGE_HOME.set_for_testing('abfs://gethue-container/user'),
+    GC_ACCOUNTS.set_for_testing({'default': {'default_home_path': 'abfs://gethue-other-container/dir'}}),
+  ]
+
+  try:
+    default_gs_home_path = get_gs_home_directory(user)
+    assert default_gs_home_path == 'gs://'
+
+    default_gs_home_path = get_gs_home_directory(user_not_me)
+    assert default_gs_home_path == 'gs://'
+  finally:
+    for reset in resets:
+      reset()
+
+
+class TestGCS(TestCase):
+  def setup_method(self, method):
+    if not is_gs_enabled():
+      pytest.skip('gs not enabled')
+
+  def test_with_credentials(self):
+    # Simple test that makes sure no errors are thrown.
+    client = get_client(fs='gs')
+    buckets = client.listdir_stats('gs://')
+    LOG.info(len(buckets))

+ 0 - 37
desktop/core/src/desktop/lib/fs/gc/tests.py

@@ -1,37 +0,0 @@
-# Licensed to Cloudera, Inc. under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  Cloudera, Inc. licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from __future__ import absolute_import
-
-import logging
-import pytest
-import unittest
-from django.test import TestCase
-from desktop.conf import is_gs_enabled
-from desktop.lib.fsmanager import get_client
-
-LOG = logging.getLogger()
-
-
-class TestGCS(TestCase):
-  def setup_method(self, method):
-    if not is_gs_enabled():
-      pytest.skip('gs not enabled')
-
-  def test_with_credentials(self):
-    # Simple test that makes sure no errors are thrown.
-    client = get_client(fs='gs')
-    buckets = client.listdir_stats('gs://')
-    LOG.info(len(buckets))

+ 9 - 14
desktop/core/src/desktop/lib/fs/proxyfs.py

@@ -13,26 +13,21 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from builtins import object
 import logging
-
+from builtins import object
 from urllib.parse import urlparse as lib_urlparse
 
 from crequest.middleware import CrequestMiddleware
-from useradmin.models import User
-
-from desktop.auth.backend import is_admin
-from desktop.conf import DEFAULT_USER, ENABLE_ORGANIZATIONS, is_ofs_enabled, is_raz_gs
-from desktop.lib.fs.ozone import OFS_ROOT
-
-from desktop.lib.fs.gc.gs import get_gs_home_directory
 
 from aws.conf import is_raz_s3
 from aws.s3.s3fs import get_s3_home_directory
-
+from azure.abfs.__init__ import get_abfs_home_directory
 from azure.conf import is_raz_abfs
-from azure.abfs.__init__ import get_home_dir_for_abfs
-
+from desktop.auth.backend import is_admin
+from desktop.conf import DEFAULT_USER, ENABLE_ORGANIZATIONS, is_ofs_enabled, is_raz_gs
+from desktop.lib.fs.gc.gs import get_gs_home_directory
+from desktop.lib.fs.ozone import OFS_ROOT
+from useradmin.models import User
 
 LOG = logging.getLogger()
 DEFAULT_USER = DEFAULT_USER.get()
@@ -207,7 +202,7 @@ class ProxyFS(object):
     Initially home_path will have path value for HDFS, try creating the user home dir for it first.
     Then, we check if S3/ABFS is configured via RAZ. If yes, try creating user home dir for them next.
     """
-    from desktop.conf import RAZ # Imported dynamically in order to have proper value.
+    from desktop.conf import RAZ  # Imported dynamically in order to have proper value.
 
     try:
       self._get_fs(home_path).create_home_dir(home_path)
@@ -222,7 +217,7 @@ class ProxyFS(object):
     if is_raz_s3():
       home_path = get_s3_home_directory(User.objects.get(username=self.getuser()))
     elif is_raz_abfs():
-      home_path = get_home_dir_for_abfs(User.objects.get(username=self.getuser()))
+      home_path = get_abfs_home_directory(User.objects.get(username=self.getuser()))
     elif is_raz_gs():
       home_path = get_gs_home_directory(User.objects.get(username=self.getuser()))
 

+ 14 - 18
desktop/core/src/desktop/models.py

@@ -1751,20 +1751,13 @@ def get_cluster_config(user):
 
 def get_remote_home_storage(user=None):
   remote_home_storage = REMOTE_STORAGE_HOME.get() if hasattr(REMOTE_STORAGE_HOME, 'get') and REMOTE_STORAGE_HOME.get() else None
-
-  if not remote_home_storage:
-    if get_raz_api_url() and get_raz_s3_default_bucket():
-      remote_home_storage = 's3a://%(bucket)s' % get_raz_s3_default_bucket()
-
-  remote_home_storage = _handle_user_dir_raz(user, remote_home_storage)
-
-  return remote_home_storage
+  return _handle_user_dir_raz(user, remote_home_storage)
 
 
 def _handle_user_dir_raz(user, remote_home_storage):
-  # In RAZ env, apppend username so that it defaults to user's dir and doesn't give 403 error
-  if user and remote_home_storage and RAZ.IS_ENABLED.get() and remote_home_storage.endswith('/user'):
-    remote_home_storage += '/' + user.username
+  # In RAZ environment, apppend username so that it defaults to user's directory and does not give 403 error
+  if user and remote_home_storage and RAZ.IS_ENABLED.get() and remote_home_storage.endswith(('/user', '/user/')):
+    remote_home_storage = remote_home_storage.rstrip('/') + '/' + user.username
 
   return remote_home_storage
 
@@ -2017,7 +2010,7 @@ class ClusterConfig(object):
 
     for hdfs_connector in hdfs_connectors:
       force_home = remote_home_storage and not remote_home_storage.startswith('/')
-      home_path = self.user.get_home_directory(force_home=force_home).encode('utf-8')
+      home_path = self.user.get_home_directory(force_home=force_home)
       interpreters.append({
         'type': 'hdfs',
         'displayName': hdfs_connector,
@@ -2030,7 +2023,8 @@ class ClusterConfig(object):
       })
 
     if 'filebrowser' in self.apps and fsmanager.is_enabled_and_has_access('s3a', self.user):
-      home_path = remote_home_storage if remote_home_storage else 's3a://'.encode('utf-8')
+      from aws.s3.s3fs import get_s3_home_directory
+      home_path = get_s3_home_directory(self.user)
       interpreters.append({
         'type': 's3',
         'displayName': _('S3'),
@@ -2040,7 +2034,8 @@ class ClusterConfig(object):
       })
 
     if 'filebrowser' in self.apps and fsmanager.is_enabled_and_has_access('gs', self.user):
-      home_path = remote_home_storage if remote_home_storage else 'gs://'.encode('utf-8')
+      from desktop.lib.fs.gc.gs import get_gs_home_directory
+      home_path = get_gs_home_directory(self.user)
       interpreters.append({
         'type': 'gs',
         'displayName': _('GS'),
@@ -2050,7 +2045,8 @@ class ClusterConfig(object):
       })
 
     if 'filebrowser' in self.apps and fsmanager.is_enabled_and_has_access('adl', self.user):
-      home_path = remote_home_storage if remote_home_storage else 'adl:/'.encode('utf-8')
+      # ADLS does not have a dedicated get_home_directory method
+      home_path = remote_home_storage if remote_home_storage else 'adl:/'
       interpreters.append({
         'type': 'adls',
         'displayName': _('ADLS'),
@@ -2060,8 +2056,8 @@ class ClusterConfig(object):
       })
 
     if 'filebrowser' in self.apps and fsmanager.is_enabled_and_has_access('abfs', self.user):
-      from azure.abfs.__init__ import get_home_dir_for_abfs
-      home_path = remote_home_storage if remote_home_storage else get_home_dir_for_abfs(self.user).encode('utf-8')
+      from azure.abfs.__init__ import get_abfs_home_directory
+      home_path = get_abfs_home_directory(self.user)
       interpreters.append({
         'type': 'abfs',
         'displayName': _('ABFS'),
@@ -2072,7 +2068,7 @@ class ClusterConfig(object):
 
     if 'filebrowser' in self.apps and fsmanager.is_enabled_and_has_access('ofs', self.user):
       from desktop.lib.fs.ozone.ofs import get_ofs_home_directory
-      home_path = get_ofs_home_directory().encode('utf-8')
+      home_path = get_ofs_home_directory()
       interpreters.append({
         'type': 'ofs',
         'displayName': _('Ozone'),

Datei-Diff unterdrückt, da er zu groß ist
+ 188 - 339
desktop/core/src/desktop/models_tests.py


+ 26 - 16
desktop/libs/aws/src/aws/conf.py

@@ -13,23 +13,22 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from __future__ import absolute_import
 
-import logging
 import os
 import re
 import sys
+import logging
 
 import requests
 
-from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, coerce_bool, coerce_password_from_script
+from desktop.lib.conf import Config, ConfigSection, UnspecifiedConfigSection, coerce_bool, coerce_password_from_script
 from desktop.lib.idbroker import conf as conf_idbroker
-from hadoop.core_site import get_s3a_access_key, get_s3a_secret_key, get_s3a_session_token, get_raz_api_url, get_raz_s3_default_bucket
+from hadoop.core_site import get_raz_api_url, get_raz_s3_default_bucket, get_s3a_access_key, get_s3a_secret_key, get_s3a_session_token
 
 if sys.version_info[0] > 2:
-  from django.utils.translation import gettext_lazy as _, gettext as _t
+  from django.utils.translation import gettext as _t, gettext_lazy as _
 else:
-  from django.utils.translation import ugettext_lazy as _, ugettext as _t
+  from django.utils.translation import ugettext as _t, ugettext_lazy as _
 
 
 LOG = logging.getLogger()
@@ -39,18 +38,20 @@ DEFAULT_CALLING_FORMAT = 'boto.s3.connection.OrdinaryCallingFormat'
 SUBDOMAIN_ENDPOINT_RE = 's3.(?P<region>[a-z0-9-]+).amazonaws.com'
 HYPHEN_ENDPOINT_RE = 's3-(?P<region>[a-z0-9-]+).amazonaws.com'
 DUALSTACK_ENDPOINT_RE = 's3.dualstack.(?P<region>[a-z0-9-]+).amazonaws.com'
-AWS_ACCOUNT_REGION_DEFAULT = 'us-east-1' # Location.USEast
+AWS_ACCOUNT_REGION_DEFAULT = 'us-east-1'  # Location.USEast
 PERMISSION_ACTION_S3 = "s3_access"
 REGION_CACHED = None
 IS_IAM_CACHED = None
 IS_EC2_CACHED = None
 
+
 def clear_cache():
   global REGION_CACHED, IS_IAM_CACHED, IS_EC2_CACHED
   REGION_CACHED = None
   IS_IAM_CACHED = None
   IS_EC2_CACHED = None
 
+
 def get_locations():
   return ('EU',  # Ireland
     'af-south-1',
@@ -265,6 +266,12 @@ AWS_ACCOUNTS = UnspecifiedConfigSection(
         default=14400,
         type=int
       ),
+      DEFAULT_HOME_PATH=Config(
+        key="default_home_path",
+        type=str,
+        default=None,
+        help="Optionally set this for a different home directory path. e.g. s3a://gethue"
+      ),
     )
   )
 )
@@ -292,11 +299,10 @@ def is_ec2_instance():
 
   try:
     # Low chance of false positive
-    IS_EC2_CACHED = (os.path.exists('/sys/hypervisor/uuid') and open('/sys/hypervisor/uuid', 'r').read()[:3].lower() == 'ec2') or \
-      (
-        os.path.exists('/sys/devices/virtual/dmi/id/product_uuid') and \
-        open('/sys/devices/virtual/dmi/id/product_uuid', 'r').read()[:3].lower() == 'ec2'
-      )
+    IS_EC2_CACHED = (os.path.exists('/sys/hypervisor/uuid') and open('/sys/hypervisor/uuid', 'r').read()[:3].lower() == 'ec2') or (
+      os.path.exists('/sys/devices/virtual/dmi/id/product_uuid')
+      and open('/sys/devices/virtual/dmi/id/product_uuid', 'r').read()[:3].lower() == 'ec2'
+    )
   except Exception as e:
     LOG.info("Detecting if Hue on an EC2 host, error might be expected: %s" % e)
 
@@ -324,7 +330,7 @@ def has_iam_metadata():
       IS_IAM_CACHED = 'iam' in metadata
     else:
       IS_IAM_CACHED = False
-  except:
+  except Exception:
     IS_IAM_CACHED = False
     LOG.exception("Encountered error when checking IAM metadata")
   return IS_IAM_CACHED
@@ -340,13 +346,17 @@ def has_s3_access(user):
 def is_raz_s3():
   from desktop.conf import RAZ  # Must be imported dynamically in order to have proper value
 
-  return (RAZ.IS_ENABLED.get() and 'default' in list(AWS_ACCOUNTS.keys()) and \
-          AWS_ACCOUNTS['default'].HOST.get() and AWS_ACCOUNTS['default'].get_raw())
+  return (
+    RAZ.IS_ENABLED.get()
+    and 'default' in list(AWS_ACCOUNTS.keys())
+    and AWS_ACCOUNTS['default'].HOST.get()
+    and AWS_ACCOUNTS['default'].get_raw()
+  )
 
 
 def config_validator(user):
   res = []
-  import desktop.lib.fsmanager # Circular dependecy
+  import desktop.lib.fsmanager  # Circular dependecy
 
   if is_enabled():
     try:

+ 31 - 18
desktop/libs/aws/src/aws/s3/s3fs.py

@@ -16,15 +16,14 @@
 
 from __future__ import absolute_import
 
-from builtins import str
-from builtins import object
-import itertools
-import logging
 import os
-import posixpath
 import re
 import sys
 import time
+import logging
+import itertools
+import posixpath
+from builtins import object, str
 
 from boto.exception import BotoClientError, S3ResponseError
 from boto.s3.connection import Location
@@ -32,24 +31,26 @@ from boto.s3.key import Key
 from boto.s3.prefix import Prefix
 
 from aws import s3
-from aws.conf import get_default_region, get_locations, PERMISSION_ACTION_S3, is_raz_s3
-from aws.s3 import normpath, s3file, translate_s3_error, S3A_ROOT
+from aws.conf import AWS_ACCOUNTS, PERMISSION_ACTION_S3, get_default_region, get_locations, is_raz_s3
+from aws.s3 import S3A_ROOT, normpath, s3file, translate_s3_error
 from aws.s3.s3stat import S3Stat
-
 from filebrowser.conf import REMOTE_STORAGE_HOME
 
 if sys.version_info[0] > 2:
-  import urllib.request, urllib.error
+  import urllib.error
+  import urllib.request
   from urllib.parse import quote as urllib_quote, urlparse as lib_urlparse
+
   from django.utils.translation import gettext as _
 else:
   from urllib import quote as urllib_quote
-  from urlparse import urlparse as lib_urlparse
+
   from django.utils.translation import ugettext as _
+  from urlparse import urlparse as lib_urlparse
 
 DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
 BUCKET_NAME_PATTERN = re.compile(
-  "^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9_\-]*[a-zA-Z0-9])\.)*(?:[A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9_\-]*[A-Za-z0-9]))$")
+  r"^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9_\-]*[a-zA-Z0-9])\.)*(?:[A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9_\-]*[A-Za-z0-9]))$")
 
 LOG = logging.getLogger()
 
@@ -58,10 +59,12 @@ class S3FileSystemException(IOError):
   def __init__(self, *args, **kwargs):
     super(S3FileSystemException, self).__init__(*args, **kwargs)
 
+
 class S3ListAllBucketsException(S3FileSystemException):
   def __init__(self, *args, **kwargs):
     super(S3FileSystemException, self).__init__(*args, **kwargs)
 
+
 def auth_error_handler(view_fn):
   def decorator(*args, **kwargs):
     try:
@@ -89,9 +92,20 @@ def auth_error_handler(view_fn):
 def get_s3_home_directory(user=None):
   from desktop.models import _handle_user_dir_raz
 
-  remote_home_s3 = 's3a://'
+  # REMOTE_STORAGE_HOME is deprecated in favor of DEFAULT_HOME_PATH per FS config level.
+  # But for backward compatibility, we are still giving preference to REMOTE_STORAGE_HOME path first and if it's not set,
+  # then check for DEFAULT_HOME_PATH which is set per FS config block. This helps in setting diff DEFAULT_HOME_PATH for diff FS in Hue.
+
   if hasattr(REMOTE_STORAGE_HOME, 'get') and REMOTE_STORAGE_HOME.get() and REMOTE_STORAGE_HOME.get().startswith('s3a://'):
     remote_home_s3 = REMOTE_STORAGE_HOME.get()
+  elif (
+    'default' in AWS_ACCOUNTS
+    and AWS_ACCOUNTS['default'].DEFAULT_HOME_PATH.get()
+    and AWS_ACCOUNTS['default'].DEFAULT_HOME_PATH.get().startswith('s3a://')
+  ):
+    remote_home_s3 = AWS_ACCOUNTS['default'].DEFAULT_HOME_PATH.get()
+  else:
+    remote_home_s3 = 's3a://'
 
   remote_home_s3 = _handle_user_dir_raz(user, remote_home_s3)
 
@@ -116,7 +130,7 @@ class S3FileSystem(object):
     except S3ResponseError as e:
       if e.status == 301 or e.status == 400:
         raise S3FileSystemException(
-          _('Failed to retrieve bucket "%s" in region "%s" with "%s". Your bucket is in region "%s"') % 
+          _('Failed to retrieve bucket "%s" in region "%s" with "%s". Your bucket is in region "%s"') %
           (name, self._get_location(), e.message or e.reason, self.get_bucket_location(name)))
       else:
         raise e
@@ -213,7 +227,7 @@ class S3FileSystem(object):
         raise S3FileSystemException(_('User is not authorized to access path: "%s"') % path)
       else:
         raise S3FileSystemException(_('Failed to access path "%s": %s') % (path, e.reason))
-    except Exception as e: # SSL errors show up here, because they've been remapped in boto
+    except Exception as e:  # SSL errors show up here, because they've been remapped in boto
       raise S3FileSystemException(_('Failed to access path "%s": %s') % (path, str(e)))
     if key is None:
       key = self._get_key(path, validate=False)
@@ -382,7 +396,6 @@ class S3FileSystem(object):
             LOG.error(msg)
             raise S3FileSystemException(msg)
 
-
   @translate_s3_error
   @auth_error_handler
   def remove(self, path, skip_trash=True):
@@ -455,7 +468,7 @@ class S3FileSystem(object):
   def _copy(self, src, dst, recursive, use_src_basename):
     src_st = self.stats(src)
     if src_st.isDir and not recursive:
-      return # omitting directory
+      return  # omitting directory
 
     dst = s3.abspath(src, dst)
     dst_st = self._stats(dst)
@@ -513,7 +526,7 @@ class S3FileSystem(object):
       self.rmtree(old, skipTrash=True)
     else:
       raise S3FileSystemException('Destination path is same as source path, skipping the operation.')
-  
+
   @translate_s3_error
   @auth_error_handler
   def _check_key_parent_path(self, src, dst):
@@ -600,5 +613,5 @@ class S3FileSystem(object):
     self.user = user  # Only used in Cluster middleware request.fs
 
   def get_upload_chuck_size(self):
-    from hadoop.conf import UPLOAD_CHUNK_SIZE # circular dependency
+    from hadoop.conf import UPLOAD_CHUNK_SIZE  # circular dependency
     return UPLOAD_CHUNK_SIZE.get()

+ 138 - 74
desktop/libs/aws/src/aws/s3/s3fs_test.py

@@ -1,3 +1,4 @@
+#!/usr/bin/env python
 # Licensed to Cloudera, Inc. under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -13,36 +14,145 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from __future__ import absolute_import
 
-import json
 import os
-import pytest
-import tempfile
+import json
 import string
-import sys
+import tempfile
+from unittest.mock import Mock, patch
 
-from desktop.lib.django_test_util import make_logged_in_client
-from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group
-from useradmin.models import User
+import pytest
 
+from aws.conf import AWS_ACCOUNTS
 from aws.s3 import join, parse_uri
-from aws.s3.s3fs import S3FileSystem, S3FileSystemException
+from aws.s3.s3fs import S3FileSystem, S3FileSystemException, get_s3_home_directory
 from aws.s3.s3test_utils import S3TestBase, generate_id
 from aws.s3.upload import DEFAULT_WRITE_SIZE
+from desktop.conf import RAZ
+from desktop.lib.django_test_util import make_logged_in_client
+from desktop.lib.test_utils import add_permission, add_to_group, grant_access, remove_from_group
+from filebrowser.conf import REMOTE_STORAGE_HOME
+from useradmin.models import User
 
 
-if sys.version_info[0] > 2:
-  from unittest.mock import patch, Mock
-else:
-  from mock import patch, Mock
-
-
-class TestS3FileSystem():
-
+@pytest.mark.django_db
+def test_get_s3_home_directory():
+  client = make_logged_in_client(username="test", groupname="test", recreate=True, is_superuser=False)
+  user = User.objects.get(username="test")
+
+  client_not_me = make_logged_in_client(username="test_not_me", groupname="test_not_me", recreate=True, is_superuser=False)
+  user_not_me = User.objects.get(username="test_not_me")
+
+  # When REMOTE_STORAGE_HOME ends with /user in RAZ S3 environment.
+  resets = [RAZ.IS_ENABLED.set_for_testing(True), REMOTE_STORAGE_HOME.set_for_testing('s3a://gethue-bucket/user')]
+
+  try:
+    default_s3_home_path = get_s3_home_directory(user)
+    assert default_s3_home_path == 's3a://gethue-bucket/user/test'
+
+    default_s3_home_path = get_s3_home_directory(user_not_me)
+    assert default_s3_home_path == 's3a://gethue-bucket/user/test_not_me'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When S3 filesystem's DEFAULT_HOME_PATH ends with /user in RAZ S3 environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    AWS_ACCOUNTS.set_for_testing(
+      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 's3a://gethue-other-bucket/user'}}
+    ),
+  ]
+
+  try:
+    default_s3_home_path = get_s3_home_directory(user)
+    assert default_s3_home_path == 's3a://gethue-other-bucket/user/test'
+
+    default_s3_home_path = get_s3_home_directory(user_not_me)
+    assert default_s3_home_path == 's3a://gethue-other-bucket/user/test_not_me'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When S3 filesystem's DEFAULT_HOME_PATH is set in non-RAZ S3 environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(False),
+    AWS_ACCOUNTS.set_for_testing(
+      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 's3a://gethue-other-bucket/test-dir'}}
+    ),
+  ]
+
+  try:
+    default_s3_home_path = get_s3_home_directory(user)
+    assert default_s3_home_path == 's3a://gethue-other-bucket/test-dir'
+
+    default_s3_home_path = get_s3_home_directory(user_not_me)
+    assert default_s3_home_path == 's3a://gethue-other-bucket/test-dir'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When both REMOTE_STORAGE_HOME and S3 filesystem's DEFAULT_HOME_PATH are set in RAZ S3 environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    REMOTE_STORAGE_HOME.set_for_testing('s3a://gethue-bucket/user'),
+    AWS_ACCOUNTS.set_for_testing(
+      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 's3a://gethue-other-bucket/user'}}
+    ),
+  ]
+
+  try:
+    # Gives preference to REMOTE_STORAGE_HOME for of backward compatibility.
+    default_s3_home_path = get_s3_home_directory(user)
+    assert default_s3_home_path == 's3a://gethue-bucket/user/test'
+
+    default_s3_home_path = get_s3_home_directory(user_not_me)
+    assert default_s3_home_path == 's3a://gethue-bucket/user/test_not_me'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When S3 filesystem's DEFAULT_HOME_PATH is set but path does not end with ../user or ../user/ in RAZ S3 environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    AWS_ACCOUNTS.set_for_testing(
+      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 's3a://gethue-other-bucket/dir'}}
+    ),
+  ]
+
+  try:
+    default_s3_home_path = get_s3_home_directory(user)
+    assert default_s3_home_path == 's3a://gethue-other-bucket/dir'
+
+    default_s3_home_path = get_s3_home_directory(user_not_me)
+    assert default_s3_home_path == 's3a://gethue-other-bucket/dir'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When some different path is set in both RAZ and non-RAZ S3 environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    REMOTE_STORAGE_HOME.set_for_testing('abfs://gethue-container/user'),
+    AWS_ACCOUNTS.set_for_testing(
+      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 'abfs://gethue-other-container/dir'}}
+    ),
+  ]
+
+  try:
+    default_s3_home_path = get_s3_home_directory(user)
+    assert default_s3_home_path == 's3a://'
+
+    default_s3_home_path = get_s3_home_directory(user_not_me)
+    assert default_s3_home_path == 's3a://'
+  finally:
+    for reset in resets:
+      reset()
+
+
+class TestS3FileSystem:
   def test_rmtree_bucket(self):
     with patch('aws.s3.s3fs.S3FileSystem._delete_bucket') as _delete_bucket:
-
       fs = S3FileSystem(s3_connection=Mock())
 
       fs.rmtree(path='s3a://gethue')
@@ -52,19 +162,11 @@ class TestS3FileSystem():
   def test_rmtree_key(self):
     with patch('aws.s3.s3fs.S3FileSystem._get_key') as _get_key:
       with patch('aws.s3.s3fs.S3FileSystem.isdir') as isdir:
-
         key = Mock(
           name='data',
           exists=Mock(return_value=True),
-          bucket=Mock(
-            list=Mock(return_value=[]),
-            delete_key=Mock()
-          ),
-          delete=Mock(
-            return_value=Mock(
-              exists=Mock(return_value=False)
-            )
-          )
+          bucket=Mock(list=Mock(return_value=[]), delete_key=Mock()),
+          delete=Mock(return_value=Mock(exists=Mock(return_value=False))),
         )
         _get_key.return_value = key
         isdir.return_value = False
@@ -79,19 +181,11 @@ class TestS3FileSystem():
   def test_rmtree_empty_dir(self):
     with patch('aws.s3.s3fs.S3FileSystem._get_key') as _get_key:
       with patch('aws.s3.s3fs.S3FileSystem.isdir') as isdir:
-
         key = Mock(
           name='data',
           exists=Mock(return_value=True),
-          bucket=Mock(
-            list=Mock(return_value=[]),
-            delete_key=Mock()
-          ),
-          delete=Mock(
-            return_value=Mock(
-              exists=Mock(return_value=False)
-            )
-          )
+          bucket=Mock(list=Mock(return_value=[]), delete_key=Mock()),
+          delete=Mock(return_value=Mock(exists=Mock(return_value=False))),
         )
         _get_key.return_value = key
         isdir.return_value = True
@@ -107,23 +201,11 @@ class TestS3FileSystem():
   def test_rmtree_non_empty_dir(self):
     with patch('aws.s3.s3fs.S3FileSystem._get_key') as _get_key:
       with patch('aws.s3.s3fs.S3FileSystem.isdir') as isdir:
-
         key = Mock(
           name='data',
           exists=Mock(return_value=True),
-          bucket=Mock(
-            list=Mock(return_value=['data/1', 'data/2']),
-            delete_keys=Mock(
-              return_value=Mock(
-                errors=[]
-              )
-            )
-          ),
-          delete=Mock(
-            return_value=Mock(
-              exists=Mock(return_value=False)
-            )
-          )
+          bucket=Mock(list=Mock(return_value=['data/1', 'data/2']), delete_keys=Mock(return_value=Mock(errors=[]))),
+          delete=Mock(return_value=Mock(exists=Mock(return_value=False))),
         )
         _get_key.return_value = key
         isdir.return_value = True
@@ -138,7 +220,6 @@ class TestS3FileSystem():
 
 
 class S3FSTest(S3TestBase):
-
   @classmethod
   def setup_class(cls):
     S3TestBase.setup_class()
@@ -150,7 +231,6 @@ class S3FSTest(S3TestBase):
       add_to_group('test')
       cls.user = User.objects.get(username="test")
 
-
   def test_open(self):
     path = self.get_test_path('test_open.txt')
 
@@ -174,7 +254,6 @@ class S3FSTest(S3TestBase):
       with pytest.raises(Exception):
         self.fs.open(path, mode='?r')
 
-
   def test_read(self):
     path = self.get_test_path('test_read.txt')
     with self.cleaning(path):
@@ -184,15 +263,12 @@ class S3FSTest(S3TestBase):
       assert 'Hel' == self.fs.read(path, 0, 3)
       assert 'ell' == self.fs.read(path, 1, 3)
 
-
   def test_isfile(self):
     pass
 
-
   def test_isdir(self):
     pass
 
-
   def test_exists(self):
     dir_path = self.get_test_path('test_exists')
     file_path = join(dir_path, 'file')
@@ -210,7 +286,6 @@ class S3FSTest(S3TestBase):
     fake_bucket = 'fake%s' % generate_id(8, string.ascii_lowercase + string.digits)
     assert not self.fs.exists('s3a://%s' % fake_bucket)
 
-
   def test_stats(self):
     with pytest.raises(ValueError):
       self.fs.stats('ftp://archive')
@@ -219,14 +294,13 @@ class S3FSTest(S3TestBase):
       self.fs.stats(not_exists)
 
     root_stat = self.fs.stats('s3a://')
-    assert True == root_stat.isDir
+    assert True is root_stat.isDir
     assert 's3a://' == root_stat.path
 
     bucket_stat = self.fs.stats('s3a://%s' % self.bucket_name)
-    assert True == bucket_stat.isDir
+    assert True is bucket_stat.isDir
     assert 's3a://%s' % self.bucket_name == bucket_stat.path
 
-
   def test_copyfile(self):
     src_path = self.get_test_path('test_copy_file_src')
     dst_path = self.get_test_path('test_copy_file_dst')
@@ -239,7 +313,6 @@ class S3FSTest(S3TestBase):
       actual = self.fs.read(dst_path, 0, len(data) + 100)
       assert data == actual
 
-
   def test_full_copy(self):
     src_path = self.get_test_path('test_full_copy_src')
     dst_path = self.get_test_path('test_full_copy_dst')
@@ -269,7 +342,6 @@ class S3FSTest(S3TestBase):
       with pytest.raises(S3FileSystemException):
         self.fs.copy(src_path, dst_file_path, True)
 
-
   def test_copy_remote_dir(self):
     src_dir = self.get_test_path('test_copy_remote_dir_src')
     dst_dir = self.get_test_path('test_copy_remote_dir_dst')
@@ -291,7 +363,6 @@ class S3FSTest(S3TestBase):
       assert src_names
       assert src_names == dst_names
 
-
   def test_copy_from_local(self):
     src_name = 'test_copy_from_local_src'
     src_path = os.path.join(tempfile.gettempdir(), src_name)
@@ -307,7 +378,6 @@ class S3FSTest(S3TestBase):
       actual = self.fs.read(dst_path, 0, len(data) + 100)
       assert data == actual
 
-
   def test_rename_dir(self):
     src_dir = self.get_test_path('test_rename_dir_src')
     dst_dir = self.get_test_path('test_rename_dir_dst')
@@ -337,13 +407,12 @@ class S3FSTest(S3TestBase):
 
       # Assert that the children files are not duplicated at top-level destination
       bucket_ls = self.bucket.list()
-      assert not 'file_one.txt' in bucket_ls
-      assert not 'file_two.txt' in bucket_ls
+      assert 'file_one.txt' not in bucket_ls
+      assert 'file_two.txt' not in bucket_ls
 
       # Assert that only the renamed directory, and not an empty file, exists
       assert 1 == len([key for key in bucket_ls if key.name.strip('/') == self.get_key(dst_dir).name.strip('/')])
 
-
   def test_rename_star(self):
     src_dir = self.get_test_path('test_rename_star_src')
     dst_dir = self.get_test_path('test_rename_star_dst')
@@ -370,7 +439,6 @@ class S3FSTest(S3TestBase):
       assert src_names
       assert src_names == dst_names
 
-
   def test_rmtree(self):
     with pytest.raises(NotImplementedError):
       self.fs.rmtree('universe', skipTrash=False)
@@ -390,12 +458,10 @@ class S3FSTest(S3TestBase):
       assert not self.fs.exists(nested_dir)
       assert not self.fs.exists(directory)
 
-
   def test_listing_buckets(self):
     buckets = self.fs.listdir('s3a://')
     assert len(buckets) > 0
 
-
   def test_mkdir(self):
     dir_path = self.get_test_path('test_mkdir')
     assert not self.fs.exists(dir_path)
@@ -403,7 +469,6 @@ class S3FSTest(S3TestBase):
     self.fs.mkdir(dir_path)
     assert self.fs.exists(dir_path)
 
-
   def test_upload_file(self):
     with tempfile.NamedTemporaryFile() as local_file:
       # Make sure we can upload larger than the UPLOAD chunk size
@@ -431,7 +496,6 @@ class S3FSTest(S3TestBase):
       expected = file(local_file).read()
       assert actual == expected, 'files do not match: %s != %s' % (len(actual), len(expected))
 
-
   def test_check_access(self):
     dir_path = self.get_test_path('test_check_access')
     self.fs.mkdir(dir_path)

+ 26 - 14
desktop/libs/azure/src/azure/abfs/__init__.py

@@ -15,29 +15,29 @@
 # limitations under the License.
 from __future__ import absolute_import
 
-import calendar
-import errno
 import re
+import time
+import errno
 import logging
+import calendar
 import tempfile
 import posixpath
-import time
-
-from hadoop.fs import normpath as fs_normpath
-from azure.conf import get_default_abfs_fs
 
+from azure.conf import ABFS_CLUSTERS, get_default_abfs_fs
 from desktop.conf import RAZ
 from filebrowser.conf import REMOTE_STORAGE_HOME
+from hadoop.fs import normpath as fs_normpath
 
 LOG = logging.getLogger()
 
-#check this first for problems
+# check this first for problems
 ABFS_PATH_RE = re.compile(
-  '^/*[aA][bB][fF][sS]{1,2}://([$a-z0-9](?!.*--)[-a-z0-9]{1,61}[a-z0-9])(@[^.]*?\.dfs\.core\.windows\.net)?(/(.*?)/?)?$')
+  r'^/*[aA][bB][fF][sS]{1,2}://([$a-z0-9](?!.*--)[-a-z0-9]{1,61}[a-z0-9])(@[^.]*?\.dfs\.core\.windows\.net)?(/(.*?)/?)?$')
 ABFS_ROOT_S = 'abfss://'
 ABFS_ROOT = 'abfs://'
 ABFSACCOUNT_NAME = re.compile('^/*[aA][bB][fF][sS]{1,2}://[$a-z0-9](?!.*--)[-a-z0-9]{1,61}[a-z0-9](@.*?)$')
 
+
 def parse_uri(uri):
   """
   Returns filesystem_name, direct_name, base_direct_name
@@ -50,6 +50,7 @@ def parse_uri(uri):
   account_name_and_path = match.group(2) or ''
   return match.group(1), direct_name, account_name_and_path
 
+
 def only_filesystem_and_account_name(uri):
   """
   Given a path returns only the filesystem and account name,
@@ -60,6 +61,7 @@ def only_filesystem_and_account_name(uri):
     return match.group(1) + match.group(2)
   return uri
 
+
 def is_root(uri):
   """
   Checks if Uri is the Root Directory
@@ -76,10 +78,11 @@ def strip_scheme(path):
     if filesystem == '':
       raise ValueError('File System must be Specified')
     path = filesystem + '/' + file_path
-  except:
+  except Exception:
     return path
   return path
 
+
 def strip_path(path):
   """
   Return only the end of a path given another path
@@ -89,6 +92,7 @@ def strip_path(path):
   split_path = path.split('/')
   return split_path[len(split_path) - 1]
 
+
 def normpath(path):
   """
   Return the normlized path, but ignore leading prefix if it exists
@@ -103,6 +107,7 @@ def normpath(path):
     normalized = fs_normpath(path)
   return normalized
 
+
 def parent_path(path):
   """
   Returns the parent of the specified folder
@@ -124,6 +129,7 @@ def parent_path(path):
     return normpath(ABFS_ROOT + filesystem + '/' + parent)
   return normpath(ABFS_ROOT_S + filesystem + '/' + parent)
 
+
 def join(first, *complist):
   """
   Join a path on to another path
@@ -151,13 +157,13 @@ def abfspath(path, fs_defaultfs=None):
   if fs_defaultfs is None:
     try:
       fs_defaultfs = get_default_abfs_fs()
-    except:
+    except Exception:
       LOG.warning("Configuration for ABFS is not set, may run into errors")
       return path
   filesystem, dir_name = ("", "")
   try:
     filesystem, dir_name = parse_uri(path)[:2]
-  except:
+  except Exception:
     return path
   account_name = ABFSACCOUNT_NAME.match(fs_defaultfs)
   LOG.debug("%s" % fs_defaultfs)
@@ -170,7 +176,7 @@ def abfspath(path, fs_defaultfs=None):
   return path
 
 
-def get_home_dir_for_abfs(user=None):
+def get_abfs_home_directory(user=None):
   """
   Attempts to go to the directory set in the config file or core-site.xml else defaults to abfs://
   """
@@ -179,11 +185,17 @@ def get_home_dir_for_abfs(user=None):
   try:
     filesystem = parse_uri(get_default_abfs_fs())[0]
     remote_home_abfs = "abfs://" + filesystem
-  except:
+  except Exception:
     remote_home_abfs = 'abfs://'
 
+  # REMOTE_STORAGE_HOME is deprecated in favor of DEFAULT_HOME_PATH per FS config level.
+  # But for backward compatibility, we are still giving preference to REMOTE_STORAGE_HOME path first and if it's not set,
+  # then check for DEFAULT_HOME_PATH which is set per FS config block. This helps in setting diff DEFAULT_HOME_PATH for diff FS in Hue.
+
   if hasattr(REMOTE_STORAGE_HOME, 'get') and REMOTE_STORAGE_HOME.get() and REMOTE_STORAGE_HOME.get().startswith('abfs://'):
     remote_home_abfs = REMOTE_STORAGE_HOME.get()
+  elif 'default' in ABFS_CLUSTERS and ABFS_CLUSTERS['default'].DEFAULT_HOME_PATH.get() and ABFS_CLUSTERS['default'].DEFAULT_HOME_PATH.get().startswith('abfs://'):
+    remote_home_abfs = ABFS_CLUSTERS['default'].DEFAULT_HOME_PATH.get()
 
   remote_home_abfs = _handle_user_dir_raz(user, remote_home_abfs)
 
@@ -199,7 +211,7 @@ def abfsdatetime_to_timestamp(datetime):
   """
   # There is chance (depends on platform) to get
   # `'z' is a bad directive in format ...` error (see https://bugs.python.org/issue6641),
-  #LOG.debug("%s" %datetime)
+  # LOG.debug("%s" %datetime)
   stripped = time.strptime(datetime[:-4], '%a, %d %b %Y %H:%M:%S')
   if datetime[-4:] != ' GMT':
     raise ValueError('Time [%s] is not in GMT.' % datetime)

+ 20 - 37
desktop/libs/azure/src/azure/abfs/abfs.py

@@ -18,43 +18,35 @@
 """
 Interfaces for ABFS
 """
-from future import standard_library
-standard_library.install_aliases()
-from builtins import object
-import logging
+
 import os
+import re
 import sys
+import logging
 import threading
-import re
-
+import urllib.error
+import urllib.request
+from builtins import object
 from math import ceil
 from posixpath import join
-
-from hadoop.hdfs_site import get_umask_mode
-from hadoop.fs.exceptions import WebHdfsException
-
-from desktop.conf import RAZ
-from desktop.lib.rest import http_client, resource
-from desktop.lib.rest.raz_http_client import RazHttpClient
+from urllib.parse import quote as urllib_quote, urlparse as lib_urlparse
 
 import azure.abfs.__init__ as Init_ABFS
 from azure.abfs.abfsfile import ABFSFile
 from azure.abfs.abfsstats import ABFSStat
 from azure.conf import PERMISSION_ACTION_ABFS, is_raz_abfs
-
-if sys.version_info[0] > 2:
-  import urllib.request, urllib.error
-  from urllib.parse import quote as urllib_quote
-  from urllib.parse import urlparse as lib_urlparse
-else:
-  from urlparse import urlparse as lib_urlparse
-  from urllib import quote as urllib_quote
+from desktop.conf import RAZ
+from desktop.lib.rest import http_client, resource
+from desktop.lib.rest.raz_http_client import RazHttpClient
+from hadoop.fs.exceptions import WebHdfsException
+from hadoop.hdfs_site import get_umask_mode
 
 LOG = logging.getLogger()
 
 # Azure has a 30MB block limit on upload.
 UPLOAD_CHUCK_SIZE = 30 * 1000 * 1000
 
+
 class ABFSFileSystemException(IOError):
 
   def __init__(self, *args, **kwargs):
@@ -134,7 +126,7 @@ class ABFS(object):
 
   def _getheaders(self):
     headers = {
-      "x-ms-version": "2019-12-12" # For latest SAS support
+      "x-ms-version": "2019-12-12"  # For latest SAS support
     }
 
     if self._token_type and self._access_token:
@@ -190,7 +182,7 @@ class ABFS(object):
       return ABFSStat.for_root(path)
     try:
       file_system, dir_name = Init_ABFS.parse_uri(path)[:2]
-    except:
+    except Exception:
       raise IOError
 
     if dir_name == '':
@@ -294,7 +286,6 @@ class ABFS(object):
 
     return [x.name for x in listofDir]
 
-
   def listfilesystems(self, root=Init_ABFS.ABFS_ROOT, params=None, **kwargs):
     """
     Lists the names of the File Systems, limited arguements
@@ -307,7 +298,7 @@ class ABFS(object):
     """
     Attempts to go to the directory set by the user in the configuration file. If not defaults to abfs://
     """
-    return Init_ABFS.get_home_dir_for_abfs()
+    return Init_ABFS.get_abfs_home_directory()
 
   # Find or alter information about the URI path
   # --------------------------------
@@ -682,14 +673,6 @@ class ABFS(object):
     Check access of a file/directory (Work in Progress/Not Ready)
     """
     raise NotImplementedError("")
-    try:
-      status = self.stats(path)
-      if 'x-ms-permissions' not in status.keys():
-        raise b
-    except b:
-      LOG.debug("Permisions have not been set")
-    except:
-      Exception
 
   def mkswap(self, filename, subdir='', suffix='swp', basedir=None):
     """
@@ -718,9 +701,9 @@ class ABFS(object):
     return self._filebrowser_action
 
   # Other Methods to condense stuff
-  #----------------------------
+  # ----------------------------
   # Write Files on creation
-  #----------------------------
+  # ----------------------------
   def _writedata(self, path, data, size):
     """
     Adds text to a given file
@@ -733,11 +716,11 @@ class ABFS(object):
         length = chunk_size
       else:
         length = chunk
-      self._append(path, data[i*chunk_size:i*chunk_size + length], length)
+      self._append(path, data[i * chunk_size:i * chunk_size + length], length)
     self.flush(path, {'position': int(size)})
 
   # Use Patch HTTP request
-  #----------------------------
+  # ----------------------------
   def _patching_sl(self, schemeless_path, param, data=None, header=None, **kwargs):
     """
     A wraper function for patch

+ 179 - 87
desktop/libs/azure/src/azure/abfs/abfs_test.py

@@ -14,37 +14,137 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from __future__ import absolute_import
 
-import logging
-import json
 import os
-import pytest
-import unittest
-import tempfile
+import json
 import time
+import logging
+import tempfile
 
+import pytest
 from django.contrib.auth.models import User
 from django.test import TestCase
 
-from desktop.lib.django_test_util import make_logged_in_client
-from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group
-
-from azure.abfs.__init__ import abfspath
+from azure.abfs.__init__ import abfspath, get_abfs_home_directory
 from azure.abfs.abfs import ABFS
-from azure.active_directory import ActiveDirectory
-from azure.conf import ABFS_CLUSTERS,AZURE_ACCOUNTS, is_abfs_enabled
-
 from azure.abfs.upload import DEFAULT_WRITE_SIZE
+from azure.active_directory import ActiveDirectory
+from azure.conf import ABFS_CLUSTERS, AZURE_ACCOUNTS, is_abfs_enabled
+from desktop.conf import RAZ
+from desktop.lib.django_test_util import make_logged_in_client
+from desktop.lib.test_utils import add_permission, add_to_group, grant_access, remove_from_group
+from filebrowser.conf import REMOTE_STORAGE_HOME
 
 LOG = logging.getLogger()
 
-"""
-Interfaces for ADLS via HttpFs/WebHDFS
-"""
+
+@pytest.mark.django_db
+def test_get_abfs_home_directory():
+  client = make_logged_in_client(username="test", groupname="test", recreate=True, is_superuser=False)
+  user = User.objects.get(username="test")
+
+  client_not_me = make_logged_in_client(username="test_not_me", groupname="test_not_me", recreate=True, is_superuser=False)
+  user_not_me = User.objects.get(username="test_not_me")
+
+  # When REMOTE_STORAGE_HOME ends with /user in RAZ ABFS environment.
+  resets = [RAZ.IS_ENABLED.set_for_testing(True), REMOTE_STORAGE_HOME.set_for_testing('abfs://gethue-container/user')]
+
+  try:
+    default_abfs_home_path = get_abfs_home_directory(user)
+    assert default_abfs_home_path == 'abfs://gethue-container/user/test'
+
+    default_abfs_home_path = get_abfs_home_directory(user_not_me)
+    assert default_abfs_home_path == 'abfs://gethue-container/user/test_not_me'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When ABFS filesystem's DEFAULT_HOME_PATH ends with /user in RAZ ABFS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 'abfs://gethue-other-container/user'}}),
+  ]
+
+  try:
+    default_abfs_home_path = get_abfs_home_directory(user)
+    assert default_abfs_home_path == 'abfs://gethue-other-container/user/test'
+
+    default_abfs_home_path = get_abfs_home_directory(user_not_me)
+    assert default_abfs_home_path == 'abfs://gethue-other-container/user/test_not_me'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When ABFS filesystem's DEFAULT_HOME_PATH is set in non-RAZ ABFS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(False),
+    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 'abfs://gethue-other-container/test-dir'}}),
+  ]
+
+  try:
+    default_abfs_home_path = get_abfs_home_directory(user)
+    assert default_abfs_home_path == 'abfs://gethue-other-container/test-dir'
+
+    default_abfs_home_path = get_abfs_home_directory(user_not_me)
+    assert default_abfs_home_path == 'abfs://gethue-other-container/test-dir'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When both REMOTE_STORAGE_HOME and ABFS filesystem's DEFAULT_HOME_PATH are set in RAZ ABFS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    REMOTE_STORAGE_HOME.set_for_testing('abfs://gethue-container/user'),
+    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 'abfs://gethue-other-container/user'}}),
+  ]
+
+  try:
+    # Gives preference to REMOTE_STORAGE_HOME for of backward compatibility.
+    default_abfs_home_path = get_abfs_home_directory(user)
+    assert default_abfs_home_path == 'abfs://gethue-container/user/test'
+
+    default_abfs_home_path = get_abfs_home_directory(user_not_me)
+    assert default_abfs_home_path == 'abfs://gethue-container/user/test_not_me'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When ABFS filesystem's DEFAULT_HOME_PATH is set but path does not end with ../user or ../user/ in RAZ ABFS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 'abfs://gethue-other-container/dir'}}),
+  ]
+
+  try:
+    default_abfs_home_path = get_abfs_home_directory(user)
+    assert default_abfs_home_path == 'abfs://gethue-other-container/dir'
+
+    default_abfs_home_path = get_abfs_home_directory(user_not_me)
+    assert default_abfs_home_path == 'abfs://gethue-other-container/dir'
+  finally:
+    for reset in resets:
+      reset()
+
+  # When some different path is set in both RAZ and non-RAZ ABFS environment.
+  resets = [
+    RAZ.IS_ENABLED.set_for_testing(True),
+    REMOTE_STORAGE_HOME.set_for_testing('s3a://gethue-bucket/user'),
+    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 's3a://gethue-other-bucket/dir'}}),
+  ]
+
+  try:
+    default_abfs_home_path = get_abfs_home_directory(user)
+    assert default_abfs_home_path == 'abfs://'
+
+    default_abfs_home_path = get_abfs_home_directory(user_not_me)
+    assert default_abfs_home_path == 'abfs://'
+  finally:
+    for reset in resets:
+      reset()
+
+
 @pytest.mark.integration
 class ABFSTestBase(TestCase):
-
   def setup_method(self, method):
     if not is_abfs_enabled():
       pytest.skip("Skipping Test")
@@ -53,61 +153,60 @@ class ABFSTestBase(TestCase):
     grant_access('test', 'test', 'filebrowser')
     add_to_group('test')
     self.user = User.objects.get(username="test")
-      
-    self.test_fs = 'abfs://test' + (str(int(time.time()) ))
+
+    self.test_fs = 'abfs://test' + (str(int(time.time())))
     LOG.debug("%s" % self.test_fs)
     self.client.mkdir(self.test_fs)
 
   def teardown_method(self, method):
     self.client.rmtree(self.test_fs)
-    
+
   def test_list(self):
     testfile = 'abfs://'
     filesystems = self.client.listdir(testfile)
     LOG.debug("%s" % filesystems)
     assert filesystems is not None, filesystems
-    
-    pathing = self.client.listdir(testfile + filesystems[0],  {"recursive" : "true"} )
+
+    pathing = self.client.listdir(testfile + filesystems[0], {"recursive": "true"})
     LOG.debug("%s" % pathing)
     assert pathing is not None, pathing
-    
+
     directory = self.client.listdir(testfile + filesystems[0] + '/' + pathing[0])
     LOG.debug("%s" % directory)
     assert directory is not None, directory
-    
+
     directory = self.client.listdir(self.test_fs)
     LOG.debug("%s" % directory)
     assert directory is not None, directory
-    
+
     directory = self.client.listdir(abfspath(self.test_fs))
     LOG.debug("%s" % directory)
     assert directory is not None, directory
-    
+
     pathing = self.client._statsf(filesystems[276])
     LOG.debug("%s" % pathing)
     assert pathing is not None, pathing
-    
+
     pathing = self.client._statsf(filesystems[277])
     LOG.debug("%s" % pathing)
     assert pathing is not None, pathing
-    
-    
+
   def test_existence(self):
     test_fs = self.test_fs
     test_dir = test_fs + '/test_existence'
     test_file = test_dir + '/test.txt'
     self.client.mkdir(test_dir)
     self.client.create(test_file)
-    
-    #Testing root and filesystems
+
+    # Testing root and filesystems
     assert self.client.exists('abfs://')
     assert self.client.exists(test_fs)
-    
-    #testing created directories and files
+
+    # testing created directories and files
     assert self.client.exists(test_dir)
     assert self.client.exists(test_file)
     assert not self.client.exists(test_dir + 'a')
-     
+
   def test_stat_output(self):
     """
     Only tests if the stat outputs something
@@ -119,59 +218,58 @@ class ABFSTestBase(TestCase):
     self.client.mkdir(test_dir)
     self.client.mkdir(test_dir2)
     self.client.mkdir(test_dir3)
-    
-    #testing filesystems
+
+    # testing filesystems
     result = self.client.stats(test_fs)
     LOG.debug("%s" % result)
     assert result is not None, result
     result = self.client.listdir_stats(test_fs)
     LOG.debug("%s" % result)
-    
-    #testing directories
+
+    # testing directories
     result = self.client.stats(test_dir)
     LOG.debug("%s" % result)
     result = self.client.listdir_stats(test_dir)
     LOG.debug("%s" % result)
-    
+
     result = self.client.stats(test_dir2)
     LOG.debug("%s" % result)
     result = self.client.listdir_stats(test_dir2)
     LOG.debug("%s" % result)
-    
+
     result = self.client.stats(test_dir3)
     LOG.debug("%s" % result)
     result = self.client.listdir_stats(test_dir3)
     LOG.debug("%s" % result)
-    
+
   def test_mkdir(self):
     test_dir = self.test_fs + '/test_mkdir'
     assert not self.client.exists(test_dir)
-    
+
     self.client.mkdir(test_dir)
     assert self.client.exists(test_dir)
     self.client.isdir(test_dir)
-    
-    
+
   def test_append_and_flush(self):
     test_fs = self.test_fs
     test_file = test_fs + '/test.txt'
     self.client.create(test_file)
-    
+
     test_string = "This is a test."
     test_len = len(test_string)
-    resp = self.client._append(test_file, test_string) #only works with strings
+    resp = self.client._append(test_file, test_string)  # only works with strings
     LOG.debug("%s" % self.client.stats(test_file))
     try:
       LOG.debug("%s" % resp)
-      resp = self.client.read(test_file, length = test_len)
-    except:
+      resp = self.client.read(test_file, length=test_len)
+    except Exception:
       LOG.debug("Not written yet")
-    
-    self.client.flush(test_file, {"position" : test_len} )
+
+    self.client.flush(test_file, {"position": test_len})
     resp = self.client.read(test_file)
     assert resp == test_string
     self.client.remove(test_file)
-  
+
   def test_rename(self):
     test_fs = self.test_fs
     test_dir = test_fs + '/test'
@@ -180,67 +278,66 @@ class ABFSTestBase(TestCase):
     test_file = test_fs + '/test.txt'
     test_file2 = test_fs + '/test2.txt'
     test_file3 = test_fs + '/test 3.txt'
-    
+
     self.client.mkdir(test_dir)
     assert self.client.exists(test_dir)
     assert not self.client.exists(test_dir2)
-    
+
     self.client.rename(test_dir, test_dir2)
     assert not self.client.exists(test_dir)
     assert self.client.exists(test_dir2)
-    
+
     self.client.create(test_file)
     assert self.client.exists(test_file)
     assert not self.client.exists(test_file2)
-    
+
     self.client.rename(test_file, test_file2)
     assert not self.client.exists(test_file)
     assert self.client.exists(test_file2)
-    
+
     self.client.rename(test_dir2, test_dir3)
     assert not self.client.exists(test_dir2)
     assert self.client.exists(test_dir3)
-    
+
     self.client.rename(test_dir3, test_dir2)
     assert not self.client.exists(test_dir3)
     assert self.client.exists(test_dir2)
-    
-    
+
   def test_chmod(self):
     test_dir = self.test_fs + '/test_chmod'
     self.client.mkdir(test_dir)
-    test_dir_permission = test_dir +'/test'
-    test_file_permission = test_dir +'/test.txt'
-    
+    test_dir_permission = test_dir + '/test'
+    test_file_permission = test_dir + '/test.txt'
+
     self.client.create(test_file_permission)
     self.client.chmod(test_file_permission, '0777')
     self.client.stats(test_file_permission)
-    
+
     self.client.mkdir(test_dir_permission)
     self.client.chmod(test_dir_permission, '0000')
     self.client.chmod(test_dir_permission, '0777')
     self.client.stats(test_dir_permission)
-    
+
   def test_chown(self):
     test_dir = self.test_fs + '/test_chown'
     self.client.mkdir(test_dir)
-    test_dir_permission = test_dir +'/test'
-    test_file_permission = test_dir +'/test.txt'
-    
+    test_dir_permission = test_dir + '/test'
+    test_file_permission = test_dir + '/test.txt'
+
     self.client.create(test_file_permission)
-    self.client.chown(test_file_permission, group = '$superuser' )
+    self.client.chown(test_file_permission, group='$superuser')
     self.client.stats(test_file_permission)
-    
+
     self.client.mkdir(test_dir_permission)
-    self.client.chown(test_dir_permission, group = '$superuser')
+    self.client.chown(test_dir_permission, group='$superuser')
     self.client.stats(test_dir_permission)
-    
+
   def test_create_with_file_permissions(self):
     test_dir = self.test_fs + '/test_chown'
     test_file = test_dir + '/test.txt'
     self.client.mkdir(test_dir)
-    self.client.create(test_file, headers = {'x-ms-permissions' : '0777'})
-    
+    self.client.create(test_file, headers={'x-ms-permissions': '0777'})
+
   def test_upload(self):
     with tempfile.NamedTemporaryFile() as local_file:
       # Make sure we can upload larger than the UPLOAD chunk size
@@ -251,7 +348,7 @@ class ABFSTestBase(TestCase):
       dest_dir = self.test_fs + '/test_upload'
       local_file = local_file.name
       dest_path = '%s/%s' % (dest_dir, os.path.basename(local_file))
-      
+
       add_permission(self.user.username, 'has_abfs', permname='abfs_access', appname='filebrowser')
       # Just upload the current python file
       try:
@@ -259,15 +356,14 @@ class ABFSTestBase(TestCase):
         response = json.loads(resp.content)
       finally:
         remove_from_group(self.user.username, 'has_abfs')
-      
+
       assert 0 == response['status'], response
       stats = self.client.stats(dest_path)
 
       actual = self.client.read(dest_path)
       expected = file(local_file).read()
       assert actual == expected, 'files do not match: %s != %s' % (len(actual), len(expected))
-   
-   
+
   def test_copy_file(self):
     test_fs = self.test_fs
     testdir1 = test_fs + '/testcpy1'
@@ -276,19 +372,18 @@ class ABFSTestBase(TestCase):
     self.client.mkdir(testdir1)
     self.client.mkdir(testdir2)
     self.client.create(test_file)
-    
+
     test_string = "This is a test."
     test_len = len(test_string)
     resp = self.client._append(test_file, test_string)
-    self.client.flush(test_file, {"position" : test_len} )
-    
+    self.client.flush(test_file, {"position": test_len})
+
     self.client.copy(test_file, testdir2)
     self.client.stats(testdir2 + '/test.txt')
     resp = self.client.read(testdir2 + '/test.txt')
     resp2 = self.client.read(test_file)
     assert resp == resp2, "Files %s and %s are not equal" % (test_file, testdir2 + '/test.txt')
-    
-  
+
   def test_copy_dir(self):
     test_fs = self.test_fs
     testdir1 = test_fs + '/testcpy1'
@@ -299,12 +394,11 @@ class ABFSTestBase(TestCase):
     self.client.mkdir(testdir2)
     self.client.mkdir(test_dir3)
     self.client.mkdir(test_dir4)
-    
-    
+
     self.client.copy(test_dir3, testdir2)
     self.client.stats(testdir2 + '/test')
     self.client.stats(testdir2 + '/test/test2')
-    
+
   @staticmethod
   def test_static_methods():
     test_dir = 'abfss://testfs/test_static/'
@@ -315,5 +409,3 @@ class ABFSTestBase(TestCase):
     LOG.debug("%s" % parent)
     join_path = ABFS.join(test_dir, 'test1')
     LOG.debug("%s" % join_path)
-
-    

+ 59 - 17
desktop/libs/azure/src/azure/conf.py

@@ -15,12 +15,11 @@
 # limitations under the License.
 from __future__ import absolute_import
 
-import logging
 import sys
+import logging
 
-from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, coerce_password_from_script, coerce_bool
+from desktop.lib.conf import Config, ConfigSection, UnspecifiedConfigSection, coerce_bool, coerce_password_from_script
 from desktop.lib.idbroker import conf as conf_idbroker
-
 from hadoop import core_site
 
 if sys.version_info[0] > 2:
@@ -36,6 +35,7 @@ REFRESH_URL = 'https://login.microsoftonline.com/<tenant_id>/oauth2/<version>tok
 META_DATA_URL = 'http://169.254.169.254/metadata/instance'
 AZURE_METADATA = None
 
+
 def get_default_client_id():
   """
   Attempt to set AWS client id from script, else core-site, else None
@@ -43,6 +43,7 @@ def get_default_client_id():
   client_id_script = AZURE_ACCOUNTS['default'].CLIENT_ID_SCRIPT.get()
   return client_id_script or core_site.get_adls_client_id() or core_site.get_azure_client_id()
 
+
 def get_default_secret_key():
   """
   Attempt to set AWS secret key from script, else core-site, else None
@@ -50,35 +51,46 @@ def get_default_secret_key():
   client_secret_script = AZURE_ACCOUNTS['default'].CLIENT_SECRET_SCRIPT.get()
   return client_secret_script or core_site.get_adls_authentication_code() or core_site.get_azure_client_secret()
 
+
 def get_default_tenant_id():
   """
   Attempt to set AWS tenant id from script, else core-site, else None
   """
   return AZURE_ACCOUNTS['default'].TENANT_ID_SCRIPT.get()
 
+
 def get_refresh_url(conf, version):
   refresh_url = core_site.get_adls_refresh_url() or core_site.get_azure_client_endpoint()
   if not refresh_url:
     refresh_url = REFRESH_URL.replace('<tenant_id>', conf.TENANT_ID.get()).replace('<version>', version + '/' if version else '')
   return refresh_url
 
+
 def get_default_region():
   return ""
 
+
 def get_default_adls_url():
   return ADLS_CLUSTERS['default'].WEBHDFS_URL.get()
 
+
 def get_default_adls_fs():
   return ADLS_CLUSTERS['default'].FS_DEFAULTFS.get()
 
+
 def get_default_abfs_url():
   return ABFS_CLUSTERS['default'].WEBHDFS_URL.get()
 
+
 def get_default_abfs_fs():
   default_fs = core_site.get_default_fs()
 
-  return default_fs if default_fs and default_fs.startswith('abfs://') and \
-                       ABFS_CLUSTERS['default'].ENABLE_DEFAULTFS_FROM_CORESITE.get() else ABFS_CLUSTERS['default'].FS_DEFAULTFS.get()
+  return (
+    default_fs
+    if default_fs and default_fs.startswith('abfs://') and ABFS_CLUSTERS['default'].ENABLE_DEFAULTFS_FROM_CORESITE.get()
+    else ABFS_CLUSTERS['default'].FS_DEFAULTFS.get()
+  )
+
 
 ADLS_CLUSTERS = UnspecifiedConfigSection(
   "adls_clusters",
@@ -148,44 +160,72 @@ ABFS_CLUSTERS = UnspecifiedConfigSection(
         key="enable_defaultfs_from_coresite",
         type=coerce_bool,
         default=True,
-        help="Enable this param to use the defaultFS from core-site.xml"),
-      FS_DEFAULTFS=Config("fs_defaultfs", help="abfs://<container_name>@<account_name>.dfs.core.windows.net", type=str, default=None),
-      WEBHDFS_URL=Config("webhdfs_url",
-                         help="https://<account_name>.dfs.core.windows.net",
-                         type=str, default=None),
+        help="Enable this to use the defaultFS value from core-site.xml"
+      ),
+      FS_DEFAULTFS=Config(
+        key="fs_defaultfs",
+        help="abfs://<container_name>@<account_name>.dfs.core.windows.net",
+        type=str,
+        default=None
+      ),
+      WEBHDFS_URL=Config(
+        key="webhdfs_url",
+        help="https://<account_name>.dfs.core.windows.net",
+        type=str,
+        default=None
+      ),
+      DEFAULT_HOME_PATH=Config(
+        key="default_home_path",
+        type=str,
+        default=None,
+        help="Optionally set this for a different home directory path. e.g. abfs://gethue"
+      ),
     )
   )
 )
 
+
 def is_raz_abfs():
   from desktop.conf import RAZ  # Must be imported dynamically in order to have proper value
   return (RAZ.IS_ENABLED.get() and 'default' in list(ABFS_CLUSTERS.keys()))
 
+
 def is_adls_enabled():
-  return ('default' in list(AZURE_ACCOUNTS.keys()) and AZURE_ACCOUNTS['default'].get_raw() and AZURE_ACCOUNTS['default'].CLIENT_ID.get() \
+  return ('default' in list(AZURE_ACCOUNTS.keys()) and AZURE_ACCOUNTS['default'].get_raw() and AZURE_ACCOUNTS['default'].CLIENT_ID.get()
     or (conf_idbroker.is_idbroker_enabled('azure') and has_azure_metadata())) and 'default' in list(ADLS_CLUSTERS.keys())
 
+
 def is_abfs_enabled():
-  return is_raz_abfs() or \
-    ('default' in list(AZURE_ACCOUNTS.keys()) and AZURE_ACCOUNTS['default'].get_raw() and AZURE_ACCOUNTS['default'].CLIENT_ID.get() or \
-    (conf_idbroker.is_idbroker_enabled('azure') and has_azure_metadata())) and 'default' in list(ABFS_CLUSTERS.keys())
+  return (
+    is_raz_abfs()
+    or (
+      'default' in list(AZURE_ACCOUNTS.keys())
+      and AZURE_ACCOUNTS['default'].get_raw()
+      and AZURE_ACCOUNTS['default'].CLIENT_ID.get()
+      or (conf_idbroker.is_idbroker_enabled('azure') and has_azure_metadata())
+    )
+    and 'default' in list(ABFS_CLUSTERS.keys())
+  )
+
 
 def has_adls_access(user):
-  from desktop.conf import RAZ  # Must be imported dynamically in order to have proper value
   from desktop.auth.backend import is_admin
+  from desktop.conf import RAZ  # Must be imported dynamically in order to have proper value
 
   return user.is_authenticated and user.is_active and (
     is_admin(user) or user.has_hue_permission(action="adls_access", app="filebrowser") or RAZ.IS_ENABLED.get()
   )
 
+
 def has_abfs_access(user):
-  from desktop.conf import RAZ  # Must be imported dynamically in order to have proper value
   from desktop.auth.backend import is_admin
+  from desktop.conf import RAZ  # Must be imported dynamically in order to have proper value
 
   return user.is_authenticated and user.is_active and (
     is_admin(user) or user.has_hue_permission(action="abfs_access", app="filebrowser") or RAZ.IS_ENABLED.get()
   )
 
+
 def azure_metadata():
   global AZURE_METADATA
   if AZURE_METADATA is None:
@@ -198,13 +238,15 @@ def azure_metadata():
       AZURE_METADATA = False
   return AZURE_METADATA
 
+
 def has_azure_metadata():
   return azure_metadata() is not None
 
+
 def config_validator(user):
   res = []
 
-  import desktop.lib.fsmanager # Avoid cyclic loop
+  import desktop.lib.fsmanager  # Avoid cyclic loop
 
   if is_adls_enabled() or is_abfs_enabled():
     try:

+ 8 - 10
desktop/libs/hadoop/src/hadoop/core_site_tests.py

@@ -16,15 +16,14 @@
 # limitations under the License.
 
 from __future__ import absolute_import
-from hadoop import conf
-import logging
+
 import os
 import sys
+import logging
 import tempfile
 
 from desktop.models import get_remote_home_storage
-
-from hadoop import core_site
+from hadoop import conf, core_site
 
 if sys.version_info[0] > 2:
   open_file = open
@@ -65,10 +64,10 @@ def test_core_site():
     <name>fs.s3a.bucket.gethue-dev.endpoint</name>
     <value>s3.us-west-2.amazonaws.com</value>
   </property>
-  <property>    
-    <name>fs.azure.ext.raz.rest.host.url</name>    
-    <value>https://gehue-adls-master:6082/</value>  
-  </property> 
+  <property>
+    <name>fs.azure.ext.raz.rest.host.url</name>
+    <value>https://gehue-adls-master:6082/</value>
+  </property>
   <property>
     <name>fs.azure.ext.raz.adls.access.cluster.name</name>
     <value>gehue-adls</value>
@@ -76,7 +75,7 @@ def test_core_site():
   <property>
     <name>fs.defaultFS</name>
     <value>abfs://data@gethuedevstorage.dfs.core.windows.net/hue-adls</value>
-  </property> 
+  </property>
 </configuration>
     """
     open_file(os.path.join(hadoop_home, 'core-site.xml'), 'w').write(xml)
@@ -93,7 +92,6 @@ def test_core_site():
 
     assert core_site.get_default_fs() == 'abfs://data@gethuedevstorage.dfs.core.windows.net/hue-adls'
 
-    assert get_remote_home_storage() == 's3a://gethue-dev'
   finally:
     core_site.reset()
     for f in finish:

Einige Dateien werden nicht angezeigt, da zu viele Dateien in diesem Diff geändert wurden.