瀏覽代碼

[api] Refactor /filesystems API and include extra HDFS configs in its response (#3983)

- Removed extra response fields from the /list API and included them in /filesystems API only for HDFS. This way we can use the configs only when HDFS is configured and it will not clutter the response for /list for all filesystems.

- Trash is also configurable now to make it easier to enable and test locally. Although it is recommended we read the values from `core-site.xml` in the prod deployments so be at better sync with the running HDFS service. Because of this reason, I've not added the config explicitly in the `.ini` files and also we are dynamically defaulting always to the `core-site.xml` value.
Harsh Gupta 9 月之前
父節點
當前提交
34f8b8d810

+ 49 - 42
apps/filebrowser/src/filebrowser/api.py

@@ -64,6 +64,7 @@ from filebrowser.views import (
   read_contents,
   stat_absolute_path,
 )
+from hadoop.conf import has_hdfs_enabled, is_hdfs_trash_enabled
 from hadoop.core_site import get_trash_interval
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.fs.fsutils import do_overwrite_save
@@ -86,6 +87,7 @@ def error_handler(view_fn):
   return decorator
 
 
+# Deprecated in favor of get_all_filesystems method for new filebrowser
 @error_handler
 def get_filesystems(request):
   response = {}
@@ -116,29 +118,54 @@ def api_error_handler(view_fn):
   return decorator
 
 
+def _get_hdfs_home_directory(user):
+  return user.get_home_directory()
+
+
+def _get_config(fs, request):
+  config = {}
+  if fs == 'hdfs':
+    is_hdfs_superuser = _is_hdfs_superuser(request)
+    config = {
+      'is_trash_enabled': is_hdfs_trash_enabled(),
+      # TODO: Check if any of the below fields should be part of new Hue user and group management APIs
+      'is_hdfs_superuser': is_hdfs_superuser,
+      'groups': [str(x) for x in Group.objects.values_list('name', flat=True)] if is_hdfs_superuser else [],
+      'users': [str(x) for x in User.objects.values_list('username', flat=True)] if is_hdfs_superuser else [],
+      'superuser': request.fs.superuser,
+      'supergroup': request.fs.supergroup,
+    }
+  return config
+
+
 @api_error_handler
-def get_filesystems_with_home_dirs(request):
-  filesystems = []
-  user_home_dir = ''
+def get_all_filesystems(request):
+  """
+  Retrieves all configured filesystems along with user-specific configurations.
+
+  This endpoint collects information about available filesystems (e.g., HDFS, S3, GS, etc.),
+  user home directories, and additional configurations specific to each filesystem type.
 
+  Args:
+    request (HttpRequest): The incoming HTTP request object.
+
+  Returns:
+    JsonResponse: A JSON response containing a list of filesystems with their configurations.
+  """
+  fs_home_dir_mapping = {
+    'hdfs': _get_hdfs_home_directory,
+    's3a': get_s3_home_directory,
+    'gs': get_gs_home_directory,
+    'abfs': get_abfs_home_directory,
+    'ofs': get_ofs_home_directory,
+  }
+
+  filesystems = []
   for fs in fsmanager.get_filesystems(request.user):
-    if fs == 'hdfs':
-      user_home_dir = request.user.get_home_directory()
-    elif fs == 's3a':
-      user_home_dir = get_s3_home_directory(request.user)
-    elif fs == 'gs':
-      user_home_dir = get_gs_home_directory(request.user)
-    elif fs == 'abfs':
-      user_home_dir = get_abfs_home_directory(request.user)
-    elif fs == 'ofs':
-      user_home_dir = get_ofs_home_directory()
-
-    filesystems.append(
-      {
-        'file_system': fs,
-        'user_home_directory': user_home_dir,
-      }
-    )
+    user_home_dir = fs_home_dir_mapping[fs](request.user)
+    config = _get_config(fs, request)
+
+    filesystems.append({'file_system': fs, 'user_home_directory': user_home_dir, 'config': config})
 
   return JsonResponse(filesystems, safe=False)
 
@@ -209,12 +236,7 @@ def download(request):
 
 
 def _massage_page(page, paginator):
-  return {
-      'page_number': page.number,
-      'page_size': paginator.per_page,
-      'total_pages': paginator.num_pages,
-      'total_size': paginator.count
-  }
+  return {'page_number': page.number, 'page_size': paginator.per_page, 'total_pages': paginator.num_pages, 'total_size': paginator.count}
 
 
 @api_error_handler
@@ -280,22 +302,7 @@ def listdir_paged(request):
   if page:
     page.object_list = [_massage_stats(request, stat_absolute_path(path, s)) for s in shown_stats]
 
-  # TODO: Shift below fields to /get_config?
-  is_hdfs = request.fs._get_scheme(path) == 'hdfs'
-  is_trash_enabled = is_hdfs and int(get_trash_interval()) > 0
-  is_fs_superuser = is_hdfs and _is_hdfs_superuser(request)
-
-  response = {
-    'is_trash_enabled': is_trash_enabled,
-    'files': page.object_list if page else [],
-    'page': _massage_page(page, paginator) if page else {},
-    # TODO: Check what to keep or what to remove? or move some fields to /get_config?
-    'is_fs_superuser': is_fs_superuser,
-    'groups': is_fs_superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [],
-    'users': is_fs_superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [],
-    'superuser': request.fs.superuser,
-    'supergroup': request.fs.supergroup,
-  }
+  response = {'files': page.object_list if page else [], 'page': _massage_page(page, paginator) if page else {}}
 
   return JsonResponse(response)
 

+ 63 - 1
apps/filebrowser/src/filebrowser/api_test.py

@@ -20,7 +20,7 @@ from unittest.mock import Mock, patch
 
 from django.core.files.uploadedfile import SimpleUploadedFile
 
-from filebrowser.api import rename, upload_file
+from filebrowser.api import get_all_filesystems, rename, upload_file
 from filebrowser.conf import (
   MAX_FILE_SIZE_UPLOAD_LIMIT,
   RESTRICT_FILE_EXTENSIONS,
@@ -442,3 +442,65 @@ class TestRenameAPI:
       assert response.content.decode('utf-8') == 'Missing required parameters: source_path and destination_path'
     finally:
       reset()
+
+
+class TestGetFilesystemsAPI:
+  def test_get_all_filesystems_without_hdfs(self):
+    with patch('filebrowser.api.fsmanager.get_filesystems') as get_filesystems:
+      with patch('filebrowser.api.get_s3_home_directory') as get_s3_home_directory:
+        with patch('filebrowser.api._is_hdfs_superuser') as _is_hdfs_superuser:
+          get_filesystems.return_value = ['s3a', 'ofs']
+          get_s3_home_directory.return_value = 's3a://test-bucket/test-user-home-dir/'
+          _is_hdfs_superuser.return_value = False
+          request = Mock(
+            method='GET',
+            user=Mock(),
+          )
+
+          response = get_all_filesystems(request)
+          response_data = json.loads(response.content)
+
+          assert response.status_code == 200
+          assert response_data == [
+            {'file_system': 's3a', 'user_home_directory': 's3a://test-bucket/test-user-home-dir/', 'config': {}},
+            {'file_system': 'ofs', 'user_home_directory': 'ofs://', 'config': {}},
+          ]
+
+  def test_get_all_filesystems_success(self):
+    with patch('filebrowser.api.fsmanager.get_filesystems') as get_filesystems:
+      with patch('filebrowser.api.get_s3_home_directory') as get_s3_home_directory:
+        with patch('filebrowser.api._is_hdfs_superuser') as _is_hdfs_superuser:
+          with patch('filebrowser.api.User') as User:
+            with patch('filebrowser.api.Group') as Group:
+              get_filesystems.return_value = ['hdfs', 's3a', 'ofs']
+              get_s3_home_directory.return_value = 's3a://test-bucket/test-user-home-dir/'
+              _is_hdfs_superuser.return_value = False
+              request = Mock(
+                method='GET',
+                user=Mock(get_home_directory=Mock(return_value='/user/test-user')),
+                fs=Mock(
+                  superuser='test-user',
+                  supergroup='test-supergroup',
+                ),
+              )
+
+              response = get_all_filesystems(request)
+              response_data = json.loads(response.content)
+
+              assert response.status_code == 200
+              assert response_data == [
+                {
+                  'file_system': 'hdfs',
+                  'user_home_directory': '/user/test-user',
+                  'config': {
+                    'is_trash_enabled': False,
+                    'is_hdfs_superuser': False,
+                    'groups': [],
+                    'users': [],
+                    'superuser': 'test-user',
+                    'supergroup': 'test-supergroup',
+                  },
+                },
+                {'file_system': 's3a', 'user_home_directory': 's3a://test-bucket/test-user-home-dir/', 'config': {}},
+                {'file_system': 'ofs', 'user_home_directory': 'ofs://', 'config': {}},
+              ]

+ 1 - 1
desktop/core/src/desktop/api_public.py

@@ -225,7 +225,7 @@ def analyze_table(request, dialect, database, table, columns=None):
 @api_view(["GET"])
 def storage_get_filesystems(request):
   django_request = get_django_request(request)
-  return filebrowser_api.get_filesystems_with_home_dirs(django_request)
+  return filebrowser_api.get_all_filesystems(django_request)
 
 
 @api_view(["GET"])

+ 2 - 1
desktop/core/src/desktop/lib/fs/ozone/ofs.py

@@ -39,7 +39,8 @@ from hadoop.hdfs_site import get_umask_mode
 LOG = logging.getLogger()
 
 
-def get_ofs_home_directory():
+def get_ofs_home_directory(user=None):
+  # TODO: Check if Ozone bring the concept of home directory in the future
   return OFS_ROOT
 
 

+ 15 - 3
desktop/libs/hadoop/src/hadoop/conf.py

@@ -16,7 +16,6 @@
 # limitations under the License.
 
 import os
-import sys
 import fnmatch
 import logging
 
@@ -24,6 +23,7 @@ from django.utils.translation import gettext_lazy as _t
 
 from desktop.conf import default_ssl_validate, has_connectors
 from desktop.lib.conf import Config, ConfigSection, UnspecifiedConfigSection, coerce_bool
+from hadoop.core_site import get_trash_interval
 
 LOG = logging.getLogger()
 DEFAULT_NN_HTTP_PORT = 50070
@@ -35,10 +35,10 @@ def find_file_recursive(desired_glob, root):
       matches = fnmatch.filter(filenames, desired_glob)
       if matches:
         if len(matches) != 1:
-          logging.warning("Found multiple jars matching %s: %s" % (desired_glob, matches))
+          LOG.warning("Found multiple jars matching %s: %s" % (desired_glob, matches))
         return os.path.join(dirpath, matches[0])
 
-    logging.error("Trouble finding jars matching %s" % (desired_glob,))
+    LOG.error("Trouble finding jars matching %s" % (desired_glob,))
     return None
 
   f.__doc__ = "Finds %s/%s" % (root, desired_glob)
@@ -65,6 +65,12 @@ def get_hadoop_conf_dir_default():
   return os.environ.get("HADOOP_CONF_DIR", "/etc/hadoop/conf")
 
 
+def is_hdfs_trash_enabled():
+  return (
+    'default' in list(HDFS_CLUSTERS.keys()) and HDFS_CLUSTERS['default'].get_raw() and (HDFS_CLUSTERS['default'].TRASH_INTERVAL.get() > 0)
+  )
+
+
 HDFS_CLUSTERS = UnspecifiedConfigSection(
   "hdfs_clusters",
   help="One entry for each HDFS cluster",
@@ -130,6 +136,12 @@ HDFS_CLUSTERS = UnspecifiedConfigSection(
           default=True,  # True here for backward compatibility
           type=coerce_bool
       ),
+      TRASH_INTERVAL=Config(
+          'trash_interval',
+          help="Set the interval for trash collection in HDFS. Set to 0 to disable trash.",
+          dynamic_default=get_trash_interval,
+          type=int
+      ),
     )
   )
 )