|
|
@@ -22,46 +22,9 @@ import os
|
|
|
|
|
|
DEFAULT_NN_HTTP_PORT = 50070
|
|
|
|
|
|
-HADOOP_HOME = Config(
|
|
|
- key="hadoop_home",
|
|
|
- default=os.environ.get("HADOOP_HOME", "/usr/lib/hadoop"),
|
|
|
- help=("Path to directory holding hadoop libs - HADOOP_HOME in " +
|
|
|
- "hadoop parlance; defaults to environment variable, when" +
|
|
|
- "set.")
|
|
|
-)
|
|
|
-
|
|
|
-HADOOP_MR1_HOME = Config(
|
|
|
- key="hadoop_mr1_home",
|
|
|
- default=os.environ.get("HADOOP_MR1_HOME", "/usr/lib/hadoop-0.20-mapreduce"),
|
|
|
- help=("Path to directory holding hadoop libs - HADOOP_HOME in " +
|
|
|
- "hadoop parlance; defaults to environment variable, when" +
|
|
|
- "set.")
|
|
|
-)
|
|
|
-
|
|
|
-def hadoop_bin_from_hadoop_home():
|
|
|
- """Returns $HADOOP_HOME/bin/hadoop"""
|
|
|
- return os.path.join(HADOOP_HOME.get(), "bin/hadoop")
|
|
|
-
|
|
|
-HADOOP_BIN = Config("hadoop_bin",
|
|
|
- help="Path to your Hadoop binary",
|
|
|
- dynamic_default=hadoop_bin_from_hadoop_home,
|
|
|
- type=str)
|
|
|
-
|
|
|
-# TODO(philip): This will need more love for dealing with multiple clusters.
|
|
|
-HADOOP_CONF_DIR = Config(
|
|
|
- key="hadoop_conf_dir",
|
|
|
- default="/etc/hadoop/conf",
|
|
|
- help="Directory to pass to hadoop_bin (from Hadoop configuration) as the --config flag.",
|
|
|
-)
|
|
|
-
|
|
|
-def find_file_recursive(desired_glob, root=None):
|
|
|
- if root is None:
|
|
|
- root_f = lambda: HADOOP_HOME.get()
|
|
|
- else:
|
|
|
- root_f = lambda: not callable(root) and root or root()
|
|
|
-
|
|
|
+def find_file_recursive(desired_glob, root):
|
|
|
def f():
|
|
|
- for dirpath, dirnames, filenames in os.walk(root_f()):
|
|
|
+ for dirpath, dirnames, filenames in os.walk(root):
|
|
|
matches = fnmatch.filter(filenames, desired_glob)
|
|
|
if matches:
|
|
|
if len(matches) != 1:
|
|
|
@@ -72,33 +35,9 @@ def find_file_recursive(desired_glob, root=None):
|
|
|
logging.error("Trouble finding jars matching %s" % (desired_glob,))
|
|
|
return None
|
|
|
|
|
|
- if root is None:
|
|
|
- root_str = "$HADOOP_HOME"
|
|
|
- else:
|
|
|
- root_str = root
|
|
|
- f.__doc__ = "Finds %s/%s" % (root_str, desired_glob)
|
|
|
+ f.__doc__ = "Finds %s/%s" % (root, desired_glob)
|
|
|
return f
|
|
|
|
|
|
-HADOOP_EXAMPLES_JAR = Config(
|
|
|
- key="hadoop_examples_jar",
|
|
|
- dynamic_default=find_file_recursive("hadoop-*examples*.jar", lambda: HADOOP_MR1_HOME.get()),
|
|
|
- help="Path to the hadoop-examples.jar (used for tests and jobdesigner setup)",
|
|
|
- type=str,
|
|
|
- private=True)
|
|
|
-
|
|
|
-HADOOP_STREAMING_JAR = Config(
|
|
|
- key="hadoop_streaming_jar",
|
|
|
- dynamic_default=find_file_recursive("hadoop-*streaming*.jar", lambda: HADOOP_MR1_HOME.get()),
|
|
|
- help="Path to the hadoop-streaming.jar (used by jobdesigner)",
|
|
|
- type=str,
|
|
|
- private=True)
|
|
|
-
|
|
|
-HADOOP_TEST_JAR = Config("hadoop_test_jar",
|
|
|
- help="[Used by testing code.] Path to hadoop-test.jar",
|
|
|
- dynamic_default=find_file_recursive("hadoop-*test*.jar", lambda: HADOOP_MR1_HOME.get()),
|
|
|
- type=str,
|
|
|
- private=True)
|
|
|
-
|
|
|
HADOOP_PLUGIN_CLASSPATH = Config("hadoop_plugin_classpath",
|
|
|
help="[Used only in testing code.] Path to the Hadoop plugin jar.",
|
|
|
type=str,
|
|
|
@@ -149,6 +88,31 @@ HDFS_CLUSTERS = UnspecifiedConfigSection(
|
|
|
default=False, type=coerce_bool),
|
|
|
TEMP_DIR=Config("temp_dir", help="HDFS directory for temporary files",
|
|
|
default='/tmp', type=str),
|
|
|
+
|
|
|
+ HADOOP_HDFS_HOME = Config(
|
|
|
+ key="hadoop_hdfs_home",
|
|
|
+ default=os.environ.get("HADOOP_HDFS_HOME", "/usr/lib/hadoop-hdfs"),
|
|
|
+ help=("Path to Hadoop HDFS home - HADOOP_HOME or HADOOP_HDFS_HOME in " +
|
|
|
+ "hadoop parlance. For tarball installations, it is the root of " +
|
|
|
+ "the untarred directory. For packages, " +
|
|
|
+ "it is /usr/lib/hadoop-hdfs." +
|
|
|
+ "Defaults to the environment varible HADOOP_BIN when set, " +
|
|
|
+ "or '/usr/bin/hadoop'."),
|
|
|
+ ),
|
|
|
+ HADOOP_BIN = Config(
|
|
|
+ key="hadoop_bin",
|
|
|
+ default=os.environ.get("HADOOP_BIN", "/usr/bin/hadoop"),
|
|
|
+ help=("Path to your Hadoop launcher script. E.g. /usr/bin/hadoop. " +
|
|
|
+ "Defaults to the environment varible HADOOP_BIN when set, " +
|
|
|
+ "or '/usr/bin/hadoop'.")
|
|
|
+ ),
|
|
|
+ HADOOP_CONF_DIR = Config(
|
|
|
+ key="hadoop_conf_dir",
|
|
|
+ default=os.environ.get("HADOOP_CONF_DIR", "/etc/hadoop/conf"),
|
|
|
+ help=("Directory to pass to hadoop_bin (from Hadoop configuration) " +
|
|
|
+ "as the --config flag. Defaults to the environment variable " +
|
|
|
+ "HADOOP_CONF_DIR when set, or '/etc/hadoop/conf'.")
|
|
|
+ ),
|
|
|
)
|
|
|
)
|
|
|
)
|
|
|
@@ -159,11 +123,11 @@ MR_CLUSTERS = UnspecifiedConfigSection(
|
|
|
each=ConfigSection(
|
|
|
help="Information about a single MapReduce cluster",
|
|
|
members=dict(
|
|
|
- JT_HOST=Config("jobtracker_host", help="Host/IP for JobTracker"),
|
|
|
- JT_PORT=Config("jobtracker_port",
|
|
|
- default=8021,
|
|
|
- help="Service port for the JobTracker",
|
|
|
- type=int),
|
|
|
+ HOST=Config("jobtracker_host", help="Host/IP for JobTracker"),
|
|
|
+ PORT=Config("jobtracker_port",
|
|
|
+ default=8021,
|
|
|
+ help="Service port for the JobTracker",
|
|
|
+ type=int),
|
|
|
JT_THRIFT_PORT=Config("thrift_port", help="Thrift port for JobTracker", default=9290,
|
|
|
type=int),
|
|
|
JT_KERBEROS_PRINCIPAL=Config("jt_kerberos_principal", help="Kerberos principal for JobTracker",
|
|
|
@@ -171,7 +135,29 @@ MR_CLUSTERS = UnspecifiedConfigSection(
|
|
|
SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
|
|
|
default=False, type=coerce_bool),
|
|
|
SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
|
|
|
- default=False, type=coerce_bool),
|
|
|
+ default=True, type=coerce_bool),
|
|
|
+
|
|
|
+ HADOOP_MAPRED_HOME = Config(
|
|
|
+ key="hadoop_mapred_home",
|
|
|
+ default=os.environ.get("HADOOP_MR1_HOME", "/usr/lib/hadoop-0.20-mapreduce"),
|
|
|
+ help=("Path to directory holding Hadoop MR1 libs. " +
|
|
|
+ "E.g. /usr/lib/hadoop. Defaults to the environment variable " +
|
|
|
+ "HADOOP_MR1_HOME when set, or '/usr/lib/hadoop'.")
|
|
|
+ ),
|
|
|
+ HADOOP_BIN = Config(
|
|
|
+ key="hadoop_bin",
|
|
|
+ default=os.environ.get("HADOOP_MR1_BIN", "/usr/bin/hadoop"),
|
|
|
+ help=("Path to your Hadoop launcher script. E.g. /usr/bin/hadoop. " +
|
|
|
+ "Defaults to the environment varible HADOOP_MR1_BIN when set, " +
|
|
|
+ "or '/usr/bin/hadoop'.")
|
|
|
+ ),
|
|
|
+ HADOOP_CONF_DIR = Config(
|
|
|
+ key="hadoop_conf_dir",
|
|
|
+ default=os.environ.get("HADOOP_CONF_DIR", "/etc/hadoop/conf"),
|
|
|
+ help=("Directory to pass to hadoop_bin (from Hadoop configuration) " +
|
|
|
+ "as the --config flag. Defaults to the environment variable " +
|
|
|
+ "HADOOP_CONF_DIR when set, or '/etc/hadoop/conf'.")
|
|
|
+ ),
|
|
|
)
|
|
|
)
|
|
|
)
|
|
|
@@ -183,15 +169,37 @@ YARN_CLUSTERS = UnspecifiedConfigSection(
|
|
|
each=ConfigSection(
|
|
|
help="Information about a single Yarn cluster",
|
|
|
members=dict(
|
|
|
- RM_HOST=Config("resourcemanager_host",
|
|
|
- default='localhost',
|
|
|
- help="Host/IP for the ResourceManager"),
|
|
|
- RM_PORT=Config("resourcemanager_port",
|
|
|
- default=8032,
|
|
|
- type=int,
|
|
|
- help="Service port for the ResourceManager"),
|
|
|
+ HOST=Config("resourcemanager_host",
|
|
|
+ default='localhost',
|
|
|
+ help="Host/IP for the ResourceManager"),
|
|
|
+ PORT=Config("resourcemanager_port",
|
|
|
+ default=8032,
|
|
|
+ type=int,
|
|
|
+ help="Service port for the ResourceManager"),
|
|
|
SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
|
|
|
default=False, type=coerce_bool),
|
|
|
+
|
|
|
+ HADOOP_MAPRED_HOME = Config(
|
|
|
+ key="hadoop_mapred_home",
|
|
|
+ default=os.environ.get("HADOOP_MR2_HOME", "/usr/lib/hadoop-mapreduce"),
|
|
|
+ help=("Path to directory holding Hadoop MR2 libs. " +
|
|
|
+ "E.g. /usr/lib/hadoop. Defaults to the environment " +
|
|
|
+ "variable HADOOP_MR2_HOME when set, or '/usr/lib/hadoop'.")
|
|
|
+ ),
|
|
|
+ HADOOP_BIN = Config(
|
|
|
+ key="hadoop_bin",
|
|
|
+ default=os.environ.get("HADOOP_MR2_BIN", "/usr/bin/hadoop"),
|
|
|
+ help=("Path to your Hadoop launcher script. E.g. /usr/bin/hadoop. " +
|
|
|
+ "Defaults to the environment varible HADOOP_MR2_BIN when set, " +
|
|
|
+ "or '/usr/bin/hadoop'.")
|
|
|
+ ),
|
|
|
+ HADOOP_CONF_DIR = Config(
|
|
|
+ key="hadoop_conf_dir",
|
|
|
+ default=os.environ.get("HADOOP_CONF_DIR", "/etc/hadoop/conf"),
|
|
|
+ help=("Directory to pass to hadoop_bin (from Hadoop configuration) " +
|
|
|
+ "as the --config flag. Defaults to the environment variable " +
|
|
|
+ "HADOOP_CONF_DIR when set, or '/etc/hadoop/conf'.")
|
|
|
+ ),
|
|
|
)
|
|
|
)
|
|
|
)
|
|
|
@@ -206,22 +214,15 @@ def config_validator():
|
|
|
from hadoop.fs import webhdfs
|
|
|
from hadoop import job_tracker
|
|
|
res = [ ]
|
|
|
-
|
|
|
- # HADOOP_HOME
|
|
|
- res.extend(validate_path(HADOOP_HOME, is_dir=True))
|
|
|
- # HADOOP_BIN
|
|
|
- res.extend(validate_path(HADOOP_BIN, is_dir=False))
|
|
|
-
|
|
|
- # JARs: even though these are private, we need them to run jobsub
|
|
|
- res.extend(validate_path(HADOOP_EXAMPLES_JAR, is_dir=False))
|
|
|
- res.extend(validate_path(HADOOP_STREAMING_JAR, is_dir=False))
|
|
|
-
|
|
|
submit_to = [ ]
|
|
|
|
|
|
# HDFS_CLUSTERS
|
|
|
has_default = False
|
|
|
for name in HDFS_CLUSTERS.keys():
|
|
|
cluster = HDFS_CLUSTERS[name]
|
|
|
+ res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
|
|
|
+ res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
|
|
|
+ res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
|
|
|
res.extend(webhdfs.test_fs_configuration(cluster))
|
|
|
if name == 'default':
|
|
|
has_default = True
|
|
|
@@ -231,16 +232,23 @@ def config_validator():
|
|
|
# MR_CLUSTERS
|
|
|
for name in MR_CLUSTERS.keys():
|
|
|
cluster = MR_CLUSTERS[name]
|
|
|
+ res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
|
|
|
+ res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
|
|
|
+ res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
|
|
|
res.extend(job_tracker.test_jt_configuration(cluster))
|
|
|
if cluster.SUBMIT_TO.get():
|
|
|
submit_to.append('mapred_clusters.' + name)
|
|
|
|
|
|
- # Only one cluster should have submit_to
|
|
|
+ # YARN_CLUSTERS
|
|
|
for name in YARN_CLUSTERS.keys():
|
|
|
cluster = YARN_CLUSTERS[name]
|
|
|
+ res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
|
|
|
+ res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
|
|
|
+ res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
|
|
|
if cluster.SUBMIT_TO.get():
|
|
|
submit_to.append('yarn_clusters.' + name)
|
|
|
|
|
|
+ # Only one cluster should have submit_to
|
|
|
if len(submit_to) > 1:
|
|
|
res.append(("hadoop", "Only one cluster may enable 'submit_to'. "
|
|
|
"But it is enabled in the following clusters: " +
|