Pārlūkot izejas kodu

[core] Detail HA howto configuration in hue.ini

Romain Rigaux 12 gadi atpakaļ
vecāks
revīzija
d47bc5e4d7

+ 8 - 1
desktop/conf.dist/hue.ini

@@ -268,7 +268,6 @@
   # Configuration for MapReduce 0.20 JobTracker (MR1)
   # ------------------------------------------------------------------------
   [[mapred_clusters]]
-    # HA support by specifying multiple configs
 
     [[[default]]]
       # Enter the host on which you are running the Hadoop JobTracker
@@ -295,6 +294,14 @@
       # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
       ## hadoop_conf_dir=/etc/hadoop/conf
 
+    # HA support by specifying multiple clusters
+    # e.g.
+
+    # [[[ha]]]
+      # Enter the host on which you are running the failover JobTracker
+      # jobtracker_host=localhost-ha
+
+
   # Configuration for YARN (MR2)
   # ------------------------------------------------------------------------
   [[yarn_clusters]]

+ 11 - 3
desktop/conf/pseudo-distributed.ini.tmpl

@@ -270,10 +270,9 @@
       # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
       ## hadoop_conf_dir=/etc/hadoop/conf
 
-  # Configuration for MapReduce JobTracker
+  # Configuration for MapReduce 0.20 JobTracker (MR1)
   # ------------------------------------------------------------------------
   [[mapred_clusters]]
-    # HA support by specifying multiple configs
 
     [[[default]]]
       # Enter the host on which you are running the Hadoop JobTracker
@@ -283,8 +282,9 @@
       # Thrift plug-in port for the JobTracker
       ## thrift_port=9290
       # Whether to submit jobs to this cluster
-      ## submit_to=False
+      ## submit_to=True
 
+      # Change this if your MapReduce cluster is Kerberos-secured
       ## security_enabled=false
 
       # Settings about this MR1 cluster. If you install MR1 in a
@@ -299,6 +299,14 @@
       # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
       ## hadoop_conf_dir=/etc/hadoop/conf
 
+    # HA support by specifying multiple clusters
+    # e.g.
+
+    # [[[ha]]]
+      # Enter the host on which you are running the failover JobTracker
+      # jobtracker_host=localhost-ha
+
+
   # Configuration for Yarn
   # ------------------------------------------------------------------------
   [[yarn_clusters]]

+ 1 - 0
desktop/libs/hadoop/src/hadoop/cluster.py

@@ -107,6 +107,7 @@ def get_next_ha_mrcluster():
           status = jt.cluster_status()
           if status.stateAsString == 'RUNNING':
             MR_NAME_CACHE = name
+            LOG.warn('Picking HA JobTracker: %s' % name)
             return (config, jt)
           else:
             LOG.info('JobTracker %s is not RUNNING, skipping it: %s' % (name, status))