Quellcode durchsuchen

HUE-8823 [fs] Allow to start without an HDFS cluster

Also work when S3 is the only fs.
Same if Azure.

Tested:
app_blacklist=security,search,jobsub,pig,hbase,sqoop,zookeeper,spark,oozie,hive,impala,solr
Romain vor 6 Jahren
Ursprung
Commit
271a04d43c

+ 20 - 5
desktop/core/src/desktop/lib/fsmanager.py

@@ -22,23 +22,33 @@ import logging
 
 import aws
 import azure.client
-from azure.conf import is_adls_enabled
+
 from aws.conf import is_enabled as is_s3_enabled
+from azure.conf import is_adls_enabled
+from hadoop.cluster import get_hdfs
+from hadoop.conf import has_hdfs_enabled
 
 from desktop.lib.fs import ProxyFS
-from hadoop import cluster
+
 
 FS_CACHE = {}
 
-DEFAULT_SCHEMA = 'hdfs'
+DEFAULT_SCHEMA = None
 
 FS_GETTERS = {
-  'hdfs': cluster.get_hdfs,
 }
+
+if has_hdfs_enabled():
+  FS_GETTERS['hdfs'] = get_hdfs
+  DEFAULT_SCHEMA = 'hdfs'
 if is_s3_enabled():
   FS_GETTERS['s3a'] = aws.get_s3fs
+  if DEFAULT_SCHEMA is None:
+    DEFAULT_SCHEMA = 's3a'
 if is_adls_enabled():
   FS_GETTERS['adl'] = azure.client.get_client
+  if DEFAULT_SCHEMA is None:
+      DEFAULT_SCHEMA = 'adl'
 
 
 def get_filesystem(name='default'):
@@ -53,6 +63,7 @@ def get_filesystem(name='default'):
 
 def _make_fs(name):
   fs_dict = {}
+
   for schema, getter in FS_GETTERS.iteritems():
     try:
       if getter is not None:
@@ -69,7 +80,11 @@ def _make_fs(name):
         logging.warn('Can not get filesystem called "%s" for "%s" schema' % (name, schema))
     except Exception, e:
       logging.error('Failed to get filesystem called "%s" for "%s" schema: %s' % (name, schema, e))
-  return ProxyFS(fs_dict, DEFAULT_SCHEMA)
+
+  if fs_dict:
+    return ProxyFS(fs_dict, DEFAULT_SCHEMA)
+  else:
+    return None
 
 
 def clear_cache():

+ 4 - 3
desktop/core/src/desktop/models.py

@@ -41,6 +41,7 @@ from settings import HUE_DESKTOP_VERSION
 from aws.conf import is_enabled as is_s3_enabled, has_s3_access
 from azure.conf import is_adls_enabled, has_adls_access
 from dashboard.conf import get_engines, HAS_REPORT_ENABLED
+from hadoop.conf import has_hdfs_enabled
 from kafka.conf import has_kafka
 from notebook.conf import SHOW_NOTEBOOKS, get_ordered_interpreters
 
@@ -1761,7 +1762,7 @@ class ClusterConfig():
   def _get_browser(self):
     interpreters = []
 
-    if 'filebrowser' in self.apps and ANALYTIC_DB not in self.cluster_type:
+    if has_hdfs_enabled() and 'filebrowser' in self.apps and ANALYTIC_DB not in self.cluster_type:
       interpreters.append({
         'type': 'hdfs',
         'displayName': _('Files'),
@@ -1770,7 +1771,7 @@ class ClusterConfig():
         'page': '/filebrowser/' + (not self.user.is_anonymous() and 'view=' + self.user.get_home_directory() or '')
       })
 
-    if is_s3_enabled() and has_s3_access(self.user) and not IS_EMBEDDED.get():
+    if is_s3_enabled() and 'filebrowser' in self.apps and has_s3_access(self.user) and not IS_EMBEDDED.get():
       interpreters.append({
         'type': 's3',
         'displayName': _('S3'),
@@ -1779,7 +1780,7 @@ class ClusterConfig():
         'page': '/filebrowser/view=S3A://'
       })
 
-    if is_adls_enabled() and has_adls_access(self.user) and ANALYTIC_DB not in self.cluster_type:
+    if is_adls_enabled() and 'filebrowser' in self.apps and has_adls_access(self.user) and ANALYTIC_DB not in self.cluster_type:
       interpreters.append({
         'type': 'adls',
         'displayName': _('ADLS'),

+ 1 - 2
desktop/core/src/desktop/templates/assist.mako

@@ -17,13 +17,12 @@
 <%!
 from django.utils.translation import ugettext as _
 
+from dashboard.conf import HAS_SQL_ENABLED
 from filebrowser.conf import SHOW_UPLOAD_BUTTON
 from metadata.conf import has_catalog, OPTIMIZER
 from metastore.conf import ENABLE_NEW_CREATE_TABLE
 from notebook.conf import ENABLE_QUERY_BUILDER, ENABLE_QUERY_SCHEDULING, get_ordered_interpreters
 
-from dashboard.conf import HAS_SQL_ENABLED
-
 from desktop import appmanager
 from desktop import conf
 from desktop.conf import IS_EMBEDDED, USE_NEW_SIDE_PANELS, VCS

+ 6 - 2
desktop/libs/aws/src/aws/s3/s3fs.py

@@ -76,6 +76,10 @@ class S3FileSystem(object):
   def __init__(self, s3_connection):
     self._s3_connection = s3_connection
     self._filebrowser_action = PERMISSION_ACTION_S3
+    self.user = None
+    self.is_sentry_managed = lambda path: False
+    self.superuser = None
+    self.supergroup = None
 
   def _get_bucket(self, name):
     return self._s3_connection.get_bucket(name)
@@ -496,8 +500,8 @@ class S3FileSystem(object):
     return True
 
   def setuser(self, user):
-    pass  # user-concept doesn't have sense for this implementation
+    self.user = user  # Only used in Cluster middleware request.fs
 
   def get_upload_chuck_size(self):
     from hadoop.conf import UPLOAD_CHUNK_SIZE # circular dependency
-    return UPLOAD_CHUNK_SIZE.get()
+    return UPLOAD_CHUNK_SIZE.get()

+ 1 - 3
desktop/libs/hadoop/src/hadoop/cluster.py

@@ -15,8 +15,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os
 import logging
+import os
 
 from django.utils.functional import wraps
 
@@ -29,7 +29,6 @@ from desktop.lib.paths import get_build_dir
 
 LOG = logging.getLogger(__name__)
 
-
 FS_CACHE = None
 FS_DEFAULT_NAME = 'default'
 MR_CACHE = None # MR now means YARN
@@ -37,7 +36,6 @@ MR_NAME_CACHE = 'default'
 DEFAULT_USER = DEFAULT_USER.get()
 
 
-
 def rm_ha(funct):
   """
   Support RM HA by trying other RM API.

+ 4 - 0
desktop/libs/hadoop/src/hadoop/conf.py

@@ -54,6 +54,10 @@ UPLOAD_CHUNK_SIZE = Config(
   default=1024 * 1024 * 64)
 
 
+def has_hdfs_enabled():
+  return HDFS_CLUSTERS.keys()
+
+
 HDFS_CLUSTERS = UnspecifiedConfigSection(
   "hdfs_clusters",
   help="One entry for each HDFS cluster",

+ 1 - 1
docs/ROADMAP.md

@@ -37,4 +37,4 @@ Links
 * [Open Tasks](https://issues.cloudera.org/projects/HUE/issues)
 * [Easy Tasks](https://issues.cloudera.org/browse/HUE-8745?filter=10431)
 * [Designs](/docs/designs)
-* How to [contribute](CONTRIBUTING.md).
+* How to [contribute](CONTRIBUTING.md)