Просмотр исходного кода

[test] Have pseudo_hdfs4 start MR1

* minicluster is totally unsuitable now, because it just can't understand hdfs
  in 0.23. So it's easier to have pseudo_hdfs4 start the mr1 service.
* The pseudo_hdfs4.py file is misnamed. It's actually more like the old
  mini_cluster.py.
* Fixed MR1 home location in jenkins tests
bc Wong 13 лет назад
Родитель
Сommit
ca25e53ef5

+ 186 - 31
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -1,3 +1,20 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 
 
 import atexit
 import atexit
 import logging
 import logging
@@ -13,8 +30,9 @@ import time
 import desktop
 import desktop
 import hadoop
 import hadoop
 from hadoop.mini_cluster import find_unused_port, write_config
 from hadoop.mini_cluster import find_unused_port, write_config
+from hadoop.job_tracker import LiveJobTracker
 
 
-# Shared global HDFS (for CDH4).
+# Shared global HDFS (for CDH4) and MR1 cluster.
 _shared_cluster = None
 _shared_cluster = None
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
@@ -37,12 +55,13 @@ CLEANUP_TMP_DIR = os.environ.get("MINI_CLUSTER_CLEANUP", 'true')
 
 
 
 
 class PseudoHdfs4(object):
 class PseudoHdfs4(object):
-  """This class runs HDFS (CDH4) locally, in pseudo-distributed mode"""
+  """This class runs HDFS (CDH4) and MR1 locally, in pseudo-distributed mode"""
 
 
   def __init__(self):
   def __init__(self):
     self._tmpdir = tempfile.mkdtemp(prefix='tmp_hue_')
     self._tmpdir = tempfile.mkdtemp(prefix='tmp_hue_')
     self._superuser = pwd.getpwuid(os.getuid()).pw_name
     self._superuser = pwd.getpwuid(os.getuid()).pw_name
     self._fs = None
     self._fs = None
+    self._jt = None
 
 
     self._log_dir = None
     self._log_dir = None
     self._dfs_http_port = None
     self._dfs_http_port = None
@@ -50,13 +69,22 @@ class PseudoHdfs4(object):
     self._namenode_port = None
     self._namenode_port = None
     self._fs_default_name = None
     self._fs_default_name = None
 
 
+    self._jt_thrift_port = None
+    self._jt_http_port = None
+    self._jt_port = None
+    self._tt_http_port = None
+
     self._nn_proc = None
     self._nn_proc = None
     self._dn_proc = None
     self._dn_proc = None
+    self._jt_proc = None
+    self._tt_proc = None
 
 
     self.shutdown_hook = None
     self.shutdown_hook = None
 
 
   def __str__(self):
   def __str__(self):
-    return "PseudoHdfs4 (%s) at %s" % (self._fs_default_name, self._tmpdir)
+    return "PseudoHdfs4 (%s) at %s --- MR1 (%s) at http://localhost:%s" % \
+        (self._fs_default_name, self._tmpdir,
+         self.mapred_job_tracker, self._jt_http_port)
 
 
   @property
   @property
   def superuser(self):
   def superuser(self):
@@ -78,8 +106,21 @@ class PseudoHdfs4(object):
   def dfs_http_port(self):
   def dfs_http_port(self):
     return self._dfs_http_port
     return self._dfs_http_port
 
 
+  @property
+  def jt_thrift_port(self):
+    return self._jt_thrift_port
+
+  @property
+  def mapred_job_tracker(self):
+    return "localhost:%s" % (self._jt_port,)
+
+  @property
+  def mapred_job_tracker_http_address(self):
+    return "localhost:%s" % (self._jt_http_port,)
+
   @property
   @property
   def fs(self):
   def fs(self):
+    """Returns a Filesystem object configured for this cluster."""
     if self._fs is None:
     if self._fs is None:
       if self._dfs_http_address is None:
       if self._dfs_http_address is None:
         LOG.warn("Attempt to access uninitialized filesystem")
         LOG.warn("Attempt to access uninitialized filesystem")
@@ -88,20 +129,33 @@ class PseudoHdfs4(object):
         "http://%s/webhdfs/v1" % (self._dfs_http_address,))
         "http://%s/webhdfs/v1" % (self._dfs_http_address,))
     return self._fs
     return self._fs
 
 
+  @property
+  def jt(self):
+    """Returns a LiveJobTracker object configured for this cluster."""
+    if self._jt is None:
+      self._jt = LiveJobTracker("localhost", self.jt_thrift_port)
+    return self._jt
+
   def stop(self):
   def stop(self):
     """Kills the cluster ungracefully."""
     """Kills the cluster ungracefully."""
-    while self._nn_proc is not None and self._nn_proc.poll() is None:
-      os.kill(self._nn_proc.pid, signal.SIGKILL)
-      LOG.info('Stopping NameNode pid %s' % (self._nn_proc.pid,))
-      time.sleep(0.5)
-
-    while self._dn_proc is not None and self._dn_proc.poll() is None:
-      os.kill(self._dn_proc.pid, signal.SIGKILL)
-      LOG.info('Stopping DataNode pid %s' % (self._dn_proc.pid,))
-      time.sleep(0.5)
+    def _kill_proc(name, proc):
+      try:
+        while proc is not None and proc.poll() is None:
+          os.kill(proc.pid, signal.SIGKILL)
+          LOG.info('Stopping %s pid %s' % (name, proc.pid,))
+          time.sleep(0.5)
+      except Exception, ex:
+        LOG.exception('Failed to stop pid %s. You may want to do it manually: %s' % (proc.pid, ex))
+
+    _kill_proc('NameNode', self._nn_proc)
+    _kill_proc('DataNode', self._dn_proc)
+    _kill_proc('JobTracker', self._jt_proc)
+    _kill_proc('TaskTracker', self._tt_proc)
 
 
     self._nn_proc = None
     self._nn_proc = None
     self._dn_proc = None
     self._dn_proc = None
+    self._jt_proc = None
+    self._tt_proc = None
 
 
     if CLEANUP_TMP_DIR == 'false':
     if CLEANUP_TMP_DIR == 'false':
       LOG.info('Skipping cleanup of temp directory "%s"' % (self._tmpdir,))
       LOG.info('Skipping cleanup of temp directory "%s"' % (self._tmpdir,))
@@ -123,6 +177,7 @@ class PseudoHdfs4(object):
     return os.path.join(self._log_dir, filename)
     return os.path.join(self._log_dir, filename)
 
 
   def start(self):
   def start(self):
+    """Start the NN, DN, JT and TT processes"""
     LOG.info("Using temporary directory: %s" % (self._tmpdir,))
     LOG.info("Using temporary directory: %s" % (self._tmpdir,))
 
 
     # Fix up superuser group mapping
     # Fix up superuser group mapping
@@ -143,6 +198,7 @@ class PseudoHdfs4(object):
 
 
     # More stuff to setup in the environment
     # More stuff to setup in the environment
     env = dict(
     env = dict(
+      HADOOP_HOME = hadoop.conf.HADOOP_HOME.get(),
       HADOOP_CONF_DIR = conf_dir,
       HADOOP_CONF_DIR = conf_dir,
       HADOOP_HEAPSIZE = "128",
       HADOOP_HEAPSIZE = "128",
       HADOOP_LOG_DIR = self._log_dir,
       HADOOP_LOG_DIR = self._log_dir,
@@ -154,7 +210,7 @@ class PseudoHdfs4(object):
     if "JAVA_HOME" in os.environ:
     if "JAVA_HOME" in os.environ:
       env['JAVA_HOME'] = os.environ['JAVA_HOME']
       env['JAVA_HOME'] = os.environ['JAVA_HOME']
 
 
-    LOG.debug("Environment:\n" + "\n".join([ str(x) for x in sorted(env.items()) ]))
+    LOG.debug("Hadoop Environment:\n" + "\n".join([ str(x) for x in sorted(env.items()) ]))
 
 
     # Format HDFS
     # Format HDFS
     self._format(conf_dir, env)
     self._format(conf_dir, env)
@@ -165,13 +221,60 @@ class PseudoHdfs4(object):
 
 
     # Make sure they're running
     # Make sure they're running
     deadline = time.time() + STARTUP_DEADLINE
     deadline = time.time() + STARTUP_DEADLINE
-    while not self._is_ready():
+    while not self._is_hdfs_ready(env):
+      if time.time() > deadline:
+        self.stop()
+        raise RuntimeError('%s is taking too long to start' % (self,))
+      time.sleep(5)
+
+    # Start MR1
+    self._start_mr1(env)
+
+  def _start_mr1(self, env):
+    LOG.info("Starting MR1")
+    conf_dir = self._tmppath('conf')
+
+    # We need a different env because it's a different hadoop
+    env = env.copy()
+    env['HADOOP_HOME'] = hadoop.conf.HADOOP_MR1_HOME.get()
+    env["HADOOP_CLASSPATH"] = ':'.join([
+        # -- BEGIN JAVA TRIVIA --
+        # Add the -test- jar to the classpath to work around a subtle issue
+        # involving Java classloaders. In brief, hadoop's RunJar class creates
+        # a child classloader with the test jar on it, but the core classes
+        # are loaded by the system classloader. This is fine except that
+        # some classes in the test jar extend package-protected classes in the
+        # core jar. Even though the classes are in the same package name, they
+        # are thus loaded by different classloaders and therefore an IllegalAccessError
+        # prevents the MiniMRCluster from starting. Adding the test jar to the system
+        # classpath prevents this error since then both the MiniMRCluster and the
+        # core classes are loaded by the system classloader.
+        hadoop.conf.HADOOP_TEST_JAR.get(),
+        # -- END JAVA TRIVIA --
+        hadoop.conf.HADOOP_PLUGIN_CLASSPATH.get(),
+        # Due to CDH-4537, we need to add test dependencies to run minicluster
+        os.path.join(os.path.dirname(__file__), 'test_jars', '*'),
+      ])
+
+    LOG.debug("MR1 Environment:\n" + "\n".join([ str(x) for x in sorted(env.items()) ]))
+
+    # Configure
+    self._write_mapred_site()
+
+    # Run JT & TT
+    self._jt_proc = self._start_daemon('jobtracker', conf_dir, env)
+    self._tt_proc = self._start_daemon('tasktracker', conf_dir, env)
+
+    # Make sure they're running
+    deadline = time.time() + STARTUP_DEADLINE
+    while not self._is_mr1_ready(env):
       if time.time() > deadline:
       if time.time() > deadline:
         self.stop()
         self.stop()
         raise RuntimeError('%s is taking too long to start' % (self,))
         raise RuntimeError('%s is taking too long to start' % (self,))
       time.sleep(5)
       time.sleep(5)
 
 
   def _format(self, conf_dir, env):
   def _format(self, conf_dir, env):
+    """Format HDFS"""
     args = (hadoop.conf.HADOOP_BIN.get(), 
     args = (hadoop.conf.HADOOP_BIN.get(), 
             '--config', conf_dir,
             '--config', conf_dir,
             'namenode', '-format')
             'namenode', '-format')
@@ -193,29 +296,29 @@ class PseudoHdfs4(object):
       stderr.close()
       stderr.close()
 
 
 
 
-  def _is_ready(self):
-    def log_exit(exit_code, proc_name):
-      LOG.info('%s exited with %s' % (proc_name, exit_code))
-      LOG.debug('--------------------- STDOUT:\n' +
-                file(self._logpath(proc_name + '.stdout')))
-      LOG.debug('--------------------- STDERR:\n' +
-                file(self._logpath(proc_name + '.stderr')))
+  def _log_exit(self, proc_name, exit_code):
+    """Log the stdout and stderr for a process"""
+    LOG.info('%s exited with %s' % (proc_name, exit_code))
+    LOG.debug('--------------------- STDOUT:\n' +
+              file(self._logpath(proc_name + '.stdout')).read())
+    LOG.debug('--------------------- STDERR:\n' +
+              file(self._logpath(proc_name + '.stderr')).read())
 
 
+  def _is_hdfs_ready(self, env):
+    """Whether HDFS is servicing requests"""
     if self._nn_proc.poll() is not None:
     if self._nn_proc.poll() is not None:
-      log_exit('namenode', self._nn_proc.poll())
+      self._log_exit('namenode', self._nn_proc.poll())
       return False
       return False
     if self._dn_proc.poll() is not None:
     if self._dn_proc.poll() is not None:
-      log_exit('datanode', self._dn_proc.poll())
+      self._log_exit('datanode', self._dn_proc.poll())
       return False
       return False
 
 
     # Run a `dfsadmin -report' against it
     # Run a `dfsadmin -report' against it
     dfsreport = subprocess.Popen(
     dfsreport = subprocess.Popen(
-      (hadoop.conf.HADOOP_BIN.get(),
-       'dfsadmin',
-       '-Dfs.default.name=%s' % self._fs_default_name,
-       '-report'),
+      (self._get_hadoop_bin(env), 'dfsadmin', '-report'),
       stdout=subprocess.PIPE,
       stdout=subprocess.PIPE,
-      stderr=subprocess.PIPE)
+      stderr=subprocess.PIPE,
+      env=env)
 
 
     ret = dfsreport.wait()
     ret = dfsreport.wait()
     if ret != 0:
     if ret != 0:
@@ -231,12 +334,38 @@ class PseudoHdfs4(object):
     return False
     return False
 
 
 
 
+  def _is_mr1_ready(self, env):
+    """Whether MR1 is servicing requests"""
+    if self._jt_proc.poll() is not None:
+      self._log_exit('jobtracker', self._jt_proc.poll())
+      return False
+    if self._tt_proc.poll() is not None:
+      self._log_exit('tasktracker', self._tt_proc.poll())
+      return False
+
+    # Run a `hadoop job -list all'
+    list_all = subprocess.Popen(
+      (self._get_hadoop_bin(env), 'job', '-list', 'all'),
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      env=env)
+
+    ret = list_all.wait()
+    if ret == 0:
+      return True
+
+    LOG.debug('MR1 not ready yet.\n%s\n%s' %
+              (list_all.stderr.read(), list_all.stderr.read()))
+    return False
+
+
   def _start_daemon(self, proc_name, conf_dir, env):
   def _start_daemon(self, proc_name, conf_dir, env):
     """Start a hadoop daemon. Returns the Popen object."""
     """Start a hadoop daemon. Returns the Popen object."""
-    args = (hadoop.conf.HADOOP_BIN.get(), 
+    hadoop_bin = self._get_hadoop_bin(env)
+    args = (hadoop_bin,
             '--config', conf_dir,
             '--config', conf_dir,
             proc_name)
             proc_name)
-    LOG.info('Starting pseudo HDFS4 cluster: %s' % (args,))
+    LOG.info('Starting Hadoop cluster daemon: %s' % (args,))
     stdout = file(self._logpath(proc_name + ".stdout"), 'w')
     stdout = file(self._logpath(proc_name + ".stdout"), 'w')
     stderr = file(self._logpath(proc_name + ".stderr"), 'w')
     stderr = file(self._logpath(proc_name + ".stderr"), 'w')
 
 
@@ -246,6 +375,9 @@ class PseudoHdfs4(object):
       stderr=stderr,
       stderr=stderr,
       env=env)
       env=env)
 
 
+  def _get_hadoop_bin(self, env):
+    return os.path.join(env['HADOOP_HOME'], 'bin', 'hadoop')
+
   def _write_hdfs_site(self):
   def _write_hdfs_site(self):
     self._dfs_http_port = find_unused_port()
     self._dfs_http_port = find_unused_port()
     self._dfs_http_address = 'localhost:%s' % (self._dfs_http_port,)
     self._dfs_http_address = 'localhost:%s' % (self._dfs_http_port,)
@@ -277,6 +409,21 @@ class PseudoHdfs4(object):
     }
     }
     write_config(core_configs, self._tmppath('conf/core-site.xml'))
     write_config(core_configs, self._tmppath('conf/core-site.xml'))
 
 
+  def _write_mapred_site(self):
+    self._jt_thrift_port = find_unused_port()
+    self._jt_http_port = find_unused_port()
+    self._jt_port = find_unused_port()
+    self._tt_http_port = find_unused_port()
+
+    mapred_configs = {
+      'mapred.job.tracker': 'localhost:%s' % (self._jt_port,),
+      'mapred.job.tracker.http.address': 'localhost:%s' % (self._jt_http_port,),
+      'jobtracker.thrift.address': 'localhost:%s' % (self._jt_thrift_port,),
+      'mapred.jobtracker.plugins': 'org.apache.hadoop.thriftfs.ThriftJobTrackerPlugin',
+      'mapred.task.tracker.http.address': 'localhost:%s' % (self._tt_http_port,),
+    }
+    write_config(mapred_configs, self._tmppath('conf/mapred-site.xml'))
+
   def _write_hadoop_metrics_conf(self, conf_dir):
   def _write_hadoop_metrics_conf(self, conf_dir):
     f = file(os.path.join(conf_dir, "hadoop-metrics.properties"), "w")
     f = file(os.path.join(conf_dir, "hadoop-metrics.properties"), "w")
     try:
     try:
@@ -304,13 +451,18 @@ def shared_cluster():
   if _shared_cluster is None:
   if _shared_cluster is None:
     cluster = PseudoHdfs4()
     cluster = PseudoHdfs4()
     atexit.register(cluster.stop)
     atexit.register(cluster.stop)
-    cluster.start()
+    try:
+      cluster.start()
+    except Exception, ex:
+      LOG.exception("Failed to fully bring up test cluster: %s" % (ex,))
 
 
     # Fix config to reflect the cluster setup.
     # Fix config to reflect the cluster setup.
     closers = [
     closers = [
       hadoop.conf.HDFS_CLUSTERS['default'].NN_HOST.set_for_testing('localhost'),
       hadoop.conf.HDFS_CLUSTERS['default'].NN_HOST.set_for_testing('localhost'),
       hadoop.conf.HDFS_CLUSTERS['default'].NN_HTTP_PORT.set_for_testing(cluster.dfs_http_port),
       hadoop.conf.HDFS_CLUSTERS['default'].NN_HTTP_PORT.set_for_testing(cluster.dfs_http_port),
       hadoop.conf.HDFS_CLUSTERS['default'].NN_HDFS_PORT.set_for_testing(cluster.namenode_port),
       hadoop.conf.HDFS_CLUSTERS['default'].NN_HDFS_PORT.set_for_testing(cluster.namenode_port),
+      hadoop.conf.MR_CLUSTERS['default'].JT_HOST.set_for_testing('localhost'),
+      hadoop.conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(cluster.jt_thrift_port),
     ]
     ]
 
 
     old = hadoop.cluster.clear_caches()
     old = hadoop.cluster.clear_caches()
@@ -322,6 +474,7 @@ def shared_cluster():
 
 
     cluster.shutdown_hook = restore_config
     cluster.shutdown_hook = restore_config
     _shared_cluster = cluster
     _shared_cluster = cluster
+
   return _shared_cluster
   return _shared_cluster
 
 
 
 
@@ -337,6 +490,8 @@ if __name__ == '__main__':
   LOG.info("%s running" % (cluster,))
   LOG.info("%s running" % (cluster,))
   LOG.info("fs.default.name=%s" % (cluster.fs_default_name,))
   LOG.info("fs.default.name=%s" % (cluster.fs_default_name,))
   LOG.info("dfs.http.address=%s" % (cluster.dfs_http_address,))
   LOG.info("dfs.http.address=%s" % (cluster.dfs_http_address,))
+  LOG.info("jobtracker.thrift.port=%s" % (cluster.jt_thrift_port,))
+  LOG.info("mapred.job.tracker=%s" % (cluster.mapred_job_tracker,))
 
 
   from IPython.Shell import IPShellEmbed
   from IPython.Shell import IPShellEmbed
   IPShellEmbed()()
   IPShellEmbed()()

+ 20 - 23
desktop/libs/hadoop/src/hadoop/tests.py

@@ -27,7 +27,7 @@ from desktop.lib.django_test_util import make_logged_in_client
 from hadoop import cluster
 from hadoop import cluster
 from hadoop import conf
 from hadoop import conf
 from hadoop import confparse
 from hadoop import confparse
-from hadoop import mini_cluster
+from hadoop import pseudo_hdfs4
 
 
 @attr('requires_hadoop')
 @attr('requires_hadoop')
 def test_live_jobtracker():
 def test_live_jobtracker():
@@ -35,26 +35,24 @@ def test_live_jobtracker():
   Checks that LiveJobTracker never raises
   Checks that LiveJobTracker never raises
   exceptions for most of its calls.
   exceptions for most of its calls.
   """
   """
-  try:
-    minicluster = mini_cluster.shared_cluster()
-    jt = minicluster.jt
-    # Make sure that none of the following
-    # raise.
-    assert_true(jt.queues())
-    assert_true(jt.cluster_status())
-    assert_true(jt.all_task_trackers())
-    assert_true(jt.active_trackers())
-    assert_true(jt.blacklisted_trackers())
-    # not tested: task_tracker
-    assert_true(jt.running_jobs())
-    assert_true(jt.completed_jobs())
-    assert_true(jt.failed_jobs())
-    assert_true(jt.all_jobs())
-    # not tested: get_job_counters
-    assert_true(jt.get_current_time())
-    # not tested: get_job_xml
-  finally:
-    minicluster.shutdown()
+  minicluster = pseudo_hdfs4.shared_cluster()
+
+  jt = minicluster.jt
+  # Make sure that none of the following
+  # raise.
+  assert_true(jt.queues())
+  assert_true(jt.cluster_status())
+  assert_true(jt.all_task_trackers())
+  assert_true(jt.active_trackers())
+  assert_true(jt.blacklisted_trackers())
+  # not tested: task_tracker
+  assert_true(jt.running_jobs())
+  assert_true(jt.completed_jobs())
+  assert_true(jt.failed_jobs())
+  assert_true(jt.all_jobs())
+  # not tested: get_job_counters
+  assert_true(jt.get_current_time())
+  # not tested: get_job_xml
 
 
 
 
 def test_confparse():
 def test_confparse():
@@ -139,7 +137,7 @@ def test_config_validator_more():
 
 
   # We don't actually use the mini_cluster. But the cluster sets up the correct
   # We don't actually use the mini_cluster. But the cluster sets up the correct
   # configuration that forms the test basis.
   # configuration that forms the test basis.
-  minicluster = mini_cluster.shared_cluster()
+  minicluster = pseudo_hdfs4.shared_cluster()
   cli = make_logged_in_client()
   cli = make_logged_in_client()
 
 
   reset = (
   reset = (
@@ -159,7 +157,6 @@ def test_config_validator_more():
     for old_conf in reset:
     for old_conf in reset:
       old_conf()
       old_conf()
     cluster.restore_caches(old)
     cluster.restore_caches(old)
-    minicluster.shutdown()
 
 
 
 
 def test_non_default_cluster():
 def test_non_default_cluster():

+ 1 - 0
tools/jenkins/build-functions

@@ -49,6 +49,7 @@ MR1_URL=${MR1_URL:-http://nightly.cloudera.com/cdh4/cdh/4/mr1-0.23.3-mr1-cdh4b2-
 
 
 MR1_TGZ=$(basename $MR1_URL)
 MR1_TGZ=$(basename $MR1_URL)
 MR1_VERSION=${MR1_TGZ/.tar.gz/}
 MR1_VERSION=${MR1_TGZ/.tar.gz/}
+MR1_VERSION=${MR1_VERSION/mr1/hadoop}
 MR1_CACHE="$HOME/.hue_cache/${MR1_TGZ}"
 MR1_CACHE="$HOME/.hue_cache/${MR1_TGZ}"
 
 
 build_mr1() {
 build_mr1() {