瀏覽代碼

[core] JT plugin HA

Try to find another available configured MR1 cluster with a RUNNING status
in case of failure to connect to the JT plugin.
A JT in standby mode does not have its plugin listening.
A JT in active mode has its plugin listening.
Add a list of MapReduce cluster to hue.ini for listing the other standby JT.
For JobBrowser and Oozie.
Romain Rigaux 12 年之前
父節點
當前提交
c07bf39c7b

+ 28 - 1
apps/jobbrowser/src/jobbrowser/api.py

@@ -18,7 +18,7 @@
 import logging
 import logging
 
 
 from desktop.lib.paginator import Paginator
 from desktop.lib.paginator import Paginator
-
+from django.utils.functional import wraps
 from hadoop import cluster
 from hadoop import cluster
 from hadoop.api.jobtracker.ttypes import ThriftJobPriority, TaskTrackerNotFoundException, ThriftJobState
 from hadoop.api.jobtracker.ttypes import ThriftJobPriority, TaskTrackerNotFoundException, ThriftJobState
 
 
@@ -30,6 +30,7 @@ import hadoop.yarn.node_manager_api as node_manager_api
 from jobbrowser.conf import SHARE_JOBS
 from jobbrowser.conf import SHARE_JOBS
 from jobbrowser.models import Job, JobLinkage, TaskList, Tracker
 from jobbrowser.models import Job, JobLinkage, TaskList, Tracker
 from jobbrowser.yarn_models import Application, Job as YarnJob, Container
 from jobbrowser.yarn_models import Application, Job as YarnJob, Container
+from hadoop.cluster import get_next_ha_mrcluster
 
 
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
@@ -44,6 +45,26 @@ def get_api(user, jt):
     return JtApi(jt)
     return JtApi(jt)
 
 
 
 
+def jt_ha(funct):
+  """
+  Support JT plugin HA by trying other MR cluster.
+
+  This modifies the cached JT and so will happen just once by failover.
+  """
+  def decorate(api, *args, **kwargs):
+    try:
+      return funct(api, *args, **kwargs)
+    except Exception, ex:
+      if 'Could not connect to' in str(ex):
+        LOG.info('JobTracker not available, trying JT plugin HA: %s.' % ex)
+        jt_ha = get_next_ha_mrcluster()
+        if jt_ha is not None:
+          config, api.jt = jt_ha
+          return funct(api, *args, **kwargs)
+      raise ex
+  return wraps(funct)(decorate)
+
+
 class JobBrowserApi(object):
 class JobBrowserApi(object):
 
 
   def paginate_task(self, task_list, pagenum):
   def paginate_task(self, task_list, pagenum):
@@ -55,12 +76,15 @@ class JtApi(JobBrowserApi):
   def __init__(self, jt):
   def __init__(self, jt):
     self.jt = jt
     self.jt = jt
 
 
+  @jt_ha
   def get_job_link(self, jobid):
   def get_job_link(self, jobid):
     return JobLinkage(self.jt, jobid)
     return JobLinkage(self.jt, jobid)
 
 
+  @jt_ha
   def get_job(self, jobid):
   def get_job(self, jobid):
     return Job.from_id(jt=self.jt, jobid=jobid)
     return Job.from_id(jt=self.jt, jobid=jobid)
 
 
+  @jt_ha
   def get_jobs(self, user, **kwargs):
   def get_jobs(self, user, **kwargs):
     """
     """
     Returns an array of jobs where the returned
     Returns an array of jobs where the returned
@@ -94,6 +118,7 @@ class JtApi(JobBrowserApi):
 
 
     return self.filter_jobs(user, jobs, **kwargs)
     return self.filter_jobs(user, jobs, **kwargs)
 
 
+  @jt_ha
   def filter_jobs(self, user, jobs, **kwargs):
   def filter_jobs(self, user, jobs, **kwargs):
     check_permission = not SHARE_JOBS.get() and not user.is_superuser
     check_permission = not SHARE_JOBS.get() and not user.is_superuser
 
 
@@ -132,6 +157,7 @@ class JtApi(JobBrowserApi):
 
 
     return filter(predicate, jobs)
     return filter(predicate, jobs)
 
 
+  @jt_ha
   def get_tasks(self, jobid, **filters):
   def get_tasks(self, jobid, **filters):
     return TaskList.select(self.jt,
     return TaskList.select(self.jt,
                            jobid,
                            jobid,
@@ -141,6 +167,7 @@ class JtApi(JobBrowserApi):
                            _DEFAULT_OBJ_PER_PAGINATION,
                            _DEFAULT_OBJ_PER_PAGINATION,
                            _DEFAULT_OBJ_PER_PAGINATION * (filters['pagenum'] - 1))
                            _DEFAULT_OBJ_PER_PAGINATION * (filters['pagenum'] - 1))
 
 
+  @jt_ha
   def get_tracker(self, trackerid):
   def get_tracker(self, trackerid):
     return Tracker.from_name(self.jt, trackerid)
     return Tracker.from_name(self.jt, trackerid)
 
 

+ 2 - 0
desktop/conf.dist/hue.ini

@@ -236,6 +236,7 @@
   # Configuration for HDFS NameNode
   # Configuration for HDFS NameNode
   # ------------------------------------------------------------------------
   # ------------------------------------------------------------------------
   [[hdfs_clusters]]
   [[hdfs_clusters]]
+    # HA support by using HttpFs
 
 
     [[[default]]]
     [[[default]]]
       # Enter the filesystem uri
       # Enter the filesystem uri
@@ -264,6 +265,7 @@
   # Configuration for MapReduce 0.20 JobTracker (MR1)
   # Configuration for MapReduce 0.20 JobTracker (MR1)
   # ------------------------------------------------------------------------
   # ------------------------------------------------------------------------
   [[mapred_clusters]]
   [[mapred_clusters]]
+    # HA support by specifying multiple configs
 
 
     [[[default]]]
     [[[default]]]
       # Enter the host on which you are running the Hadoop JobTracker
       # Enter the host on which you are running the Hadoop JobTracker

+ 2 - 0
desktop/conf/pseudo-distributed.ini.tmpl

@@ -242,6 +242,7 @@
   # Configuration for HDFS NameNode
   # Configuration for HDFS NameNode
   # ------------------------------------------------------------------------
   # ------------------------------------------------------------------------
   [[hdfs_clusters]]
   [[hdfs_clusters]]
+    # HA support by using HttpFs
 
 
     [[[default]]]
     [[[default]]]
       # Enter the filesystem uri
       # Enter the filesystem uri
@@ -269,6 +270,7 @@
   # Configuration for MapReduce JobTracker
   # Configuration for MapReduce JobTracker
   # ------------------------------------------------------------------------
   # ------------------------------------------------------------------------
   [[mapred_clusters]]
   [[mapred_clusters]]
+    # HA support by specifying multiple configs
 
 
     [[[default]]]
     [[[default]]]
       # Enter the host on which you are running the Hadoop JobTracker
       # Enter the host on which you are running the Hadoop JobTracker

+ 34 - 5
desktop/libs/hadoop/src/hadoop/cluster.py

@@ -77,6 +77,30 @@ def get_default_mrcluster():
       return candidates.values()[0]
       return candidates.values()[0]
     return None
     return None
 
 
+def get_next_ha_mrcluster():
+  """
+  Return the next available JT instance or None
+  
+  This method currently works for distincting between active/standby JT as a standby JT does not respond.
+  A cleaner but more complicated way would be to do something like the MRHAAdmin tool and
+  org.apache.hadoop.ha.HAServiceStatus#getServiceStatus().
+  """
+  candidates = all_mrclusters()
+
+  for name in conf.MR_CLUSTERS.keys():
+    config = conf.MR_CLUSTERS[name]
+    if config.SUBMIT_TO.get():
+      try:
+        jt = candidates[name]
+        status = jt.cluster_status()
+        if status.stateAsString == 'RUNNING':
+          return (config, jt)
+        else:
+          LOG.info('JobTracker %s is not RUNNING, skipping it: %s' % (name, status))
+      except Exception, ex:
+        LOG.info('JobTracker %s is not available, skipping it: %s' % (name, ex))
+  return None
+
 def get_mrcluster(identifier="default"):
 def get_mrcluster(identifier="default"):
   global MR_CACHE
   global MR_CACHE
   all_mrclusters()
   all_mrclusters()
@@ -95,16 +119,21 @@ def get_cluster_conf_for_job_submission():
   """
   """
   Check the `submit_to' for each MR/Yarn cluster, and return the
   Check the `submit_to' for each MR/Yarn cluster, and return the
   config section of first one that enables submission.
   config section of first one that enables submission.
+
+  HA support for MR1.
   """
   """
   for name in conf.YARN_CLUSTERS.keys():
   for name in conf.YARN_CLUSTERS.keys():
     yarn = conf.YARN_CLUSTERS[name]
     yarn = conf.YARN_CLUSTERS[name]
     if yarn.SUBMIT_TO.get():
     if yarn.SUBMIT_TO.get():
       return yarn
       return yarn
-  for name in conf.MR_CLUSTERS.keys():
-    mr = conf.MR_CLUSTERS[name]
-    if mr.SUBMIT_TO.get():
-      return mr
-  return None
+
+  mr = get_next_ha_mrcluster()
+
+  if mr is not None:
+    config, jt = mr
+    return config
+  else:
+    return None
 
 
 def get_cluster_addr_for_job_submission():
 def get_cluster_addr_for_job_submission():
   """
   """