Răsfoiți Sursa

[core] Moved MR and YARN cluster HA decorators to desktop hadoop lib so that they can be reused

Jenny Kim 10 ani în urmă
părinte
comite
aeb1d88

+ 3 - 46
apps/jobbrowser/src/jobbrowser/api.py

@@ -17,9 +17,11 @@
 
 import logging
 
+from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.paginator import Paginator
-from django.utils.functional import wraps
+
 from hadoop import cluster
+from hadoop.cluster import jt_ha, rm_ha
 from hadoop.api.jobtracker.ttypes import ThriftJobPriority, TaskTrackerNotFoundException, ThriftJobState
 
 import hadoop.yarn.history_server_api as history_server_api
@@ -30,8 +32,6 @@ import hadoop.yarn.node_manager_api as node_manager_api
 from jobbrowser.conf import SHARE_JOBS
 from jobbrowser.models import Job, JobLinkage, TaskList, Tracker
 from jobbrowser.yarn_models import Application, Job as YarnJob, KilledJob as KilledYarnJob, Container, SparkJob
-from hadoop.cluster import get_next_ha_mrcluster, get_next_ha_yarncluster
-from desktop.lib.exceptions_renderable import PopupException
 
 
 LOG = logging.getLogger(__name__)
@@ -46,49 +46,6 @@ def get_api(user, jt):
     return JtApi(jt)
 
 
-def jt_ha(funct):
-  """
-  Support JT plugin HA by trying other MR cluster.
-
-  This modifies the cached JT and so will happen just once by failover.
-  """
-  def decorate(api, *args, **kwargs):
-    try:
-      return funct(api, *args, **kwargs)
-    except Exception, ex:
-      if 'Could not connect to' in str(ex):
-        LOG.info('JobTracker not available, trying JT plugin HA: %s.' % ex)
-        jt_ha = get_next_ha_mrcluster()
-        if jt_ha is not None:
-          if jt_ha[1].host == api.jt.host:
-            raise ex
-          config, api.jt = jt_ha
-          return funct(api, *args, **kwargs)
-      raise ex
-  return wraps(funct)(decorate)
-
-
-def rm_ha(funct):
-  """
-  Support RM HA by trying other RM API.
-  """
-  def decorate(api, *args, **kwargs):
-    try:
-      return funct(api, *args, **kwargs)
-    except Exception, ex:
-      ex_message = str(ex)
-      if 'Connection refused' in ex_message or 'standby RM' in ex_message:
-        LOG.info('Resource Manager not available, trying another RM: %s.' % ex)
-        rm_ha = get_next_ha_yarncluster()
-        if rm_ha is not None:
-          if rm_ha[1].url == api.resource_manager_api.url:
-            raise ex
-          config, api.resource_manager_api = rm_ha
-          return funct(api, *args, **kwargs)
-      raise ex
-  return wraps(funct)(decorate)
-
-
 class JobBrowserApi(object):
 
   def paginate_task(self, task_list, pagenum):

+ 62 - 15
desktop/libs/hadoop/src/hadoop/cluster.py

@@ -18,6 +18,8 @@
 import os
 import logging
 
+from django.utils.functional import wraps
+
 from hadoop import conf
 from hadoop.fs import webhdfs, LocalSubFileSystem
 from hadoop.job_tracker import LiveJobTracker
@@ -35,22 +37,47 @@ MR_NAME_CACHE = 'default'
 DEFAULT_USER = DEFAULT_USER.get()
 
 
-def _make_filesystem(identifier):
-  choice = os.getenv("FB_FS")
-
-  if choice == "testing":
-    path = os.path.join(get_build_dir(), "fs")
-    if not os.path.isdir(path):
-      LOG.warning(("Could not find fs directory: %s. Perhaps you need to run manage.py filebrowser_test_setup?") % path)
-    return LocalSubFileSystem(path)
-  else:
-    cluster_conf = conf.HDFS_CLUSTERS[identifier]
-    return webhdfs.WebHdfs.from_config(cluster_conf)
-
+def jt_ha(funct):
+  """
+  Support JT plugin HA by trying other MR cluster.
 
-def _make_mrcluster(identifier):
-  cluster_conf = conf.MR_CLUSTERS[identifier]
-  return LiveJobTracker.from_conf(cluster_conf)
+  This modifies the cached JT and so will happen just once by failover.
+  """
+  def decorate(api, *args, **kwargs):
+    try:
+      return funct(api, *args, **kwargs)
+    except Exception, ex:
+      if 'Could not connect to' in str(ex):
+        LOG.info('JobTracker not available, trying JT plugin HA: %s.' % ex)
+        jt_ha = get_next_ha_mrcluster()
+        if jt_ha is not None:
+          if jt_ha[1].host == api.jt.host:
+            raise ex
+          config, api.jt = jt_ha
+          return funct(api, *args, **kwargs)
+      raise ex
+  return wraps(funct)(decorate)
+
+
+def rm_ha(funct):
+  """
+  Support RM HA by trying other RM API.
+  """
+  def decorate(api, *args, **kwargs):
+    try:
+      return funct(api, *args, **kwargs)
+    except Exception, ex:
+      ex_message = str(ex)
+      if 'Connection refused' in ex_message or 'standby RM' in ex_message:
+        LOG.info('Resource Manager not available, trying another RM: %s.' % ex)
+        rm_ha = get_next_ha_yarncluster()
+        if rm_ha is not None:
+          if rm_ha[1].url == api.resource_manager_api.url:
+            raise ex
+          config, api.resource_manager_api = rm_ha
+          return funct(api, *args, **kwargs)
+      raise ex
+  return wraps(funct)(decorate)
 
 
 def get_hdfs(identifier="default"):
@@ -58,6 +85,7 @@ def get_hdfs(identifier="default"):
   get_all_hdfs()
   return FS_CACHE[identifier]
 
+
 def get_defaultfs():
   fs = get_hdfs()
 
@@ -66,6 +94,7 @@ def get_defaultfs():
   else:
     return fs.fs_defaultfs
 
+
 def get_all_hdfs():
   global FS_CACHE
   if FS_CACHE is not None:
@@ -272,3 +301,21 @@ def restore_caches(old):
   """
   global FS_CACHE, MR_CACHE
   FS_CACHE, MR_CACHE = old
+
+
+def _make_filesystem(identifier):
+  choice = os.getenv("FB_FS")
+
+  if choice == "testing":
+    path = os.path.join(get_build_dir(), "fs")
+    if not os.path.isdir(path):
+      LOG.warning(("Could not find fs directory: %s. Perhaps you need to run manage.py filebrowser_test_setup?") % path)
+    return LocalSubFileSystem(path)
+  else:
+    cluster_conf = conf.HDFS_CLUSTERS[identifier]
+    return webhdfs.WebHdfs.from_config(cluster_conf)
+
+
+def _make_mrcluster(identifier):
+  cluster_conf = conf.MR_CLUSTERS[identifier]
+  return LiveJobTracker.from_conf(cluster_conf)