Browse Source

HUE-4525 [beeswax] Correctly parse Tez jobs and add functionality to test multiple execution engines

This adds a flag to determine what Execution Engines are available
for testing.
Zach York 9 years ago
parent
commit
d553763

+ 2 - 2
apps/beeswax/src/beeswax/api.py

@@ -46,7 +46,7 @@ from beeswax.server import dbms
 from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException, QueryServerTimeoutException
 from beeswax.views import authorized_get_design, authorized_get_query_history, make_parameterization_form,\
                           safe_get_design, save_design, massage_columns_for_json, _get_query_handle_and_state, \
-                          _parse_out_hadoop_jobs
+                          parse_out_jobs
 
 
 LOG = logging.getLogger(__name__)
@@ -216,7 +216,7 @@ def watch_query_refresh_json(request, id):
   except Exception, ex:
     log = str(ex)
 
-  jobs = _parse_out_hadoop_jobs(log)
+  jobs = parse_out_jobs(log)
   job_urls = massage_job_urls_for_json(jobs)
 
   result = {

+ 8 - 0
apps/beeswax/src/beeswax/test_base.py

@@ -50,6 +50,7 @@ _SHARED_HIVE_SERVER_PROCESS = None
 _SHARED_HIVE_SERVER = None
 _SHARED_HIVE_SERVER_LOCK = threading.Lock()
 _SHARED_HIVE_SERVER_CLOSER = None
+_SUPPORTED_EXECUTION_ENGINES = ['mr', 'spark', 'tez']
 
 
 LOG = logging.getLogger(__name__)
@@ -58,6 +59,13 @@ LOG = logging.getLogger(__name__)
 def is_hive_on_spark():
   return os.environ.get('ENABLE_HIVE_ON_SPARK', 'false').lower() == 'true'
 
+def get_available_execution_engines():
+  available_engines = os.environ.get('AVAILABLE_EXECUTION_ENGINES_FOR_TEST', 'mr').lower().split(",")
+  if any(engine not in _SUPPORTED_EXECUTION_ENGINES for engine in available_engines):
+    raise ValueError("Unknown available execution engines: " + available_engines +
+                     ". Supported engines are: " + _SUPPORTED_EXECUTION_ENGINES)
+  return available_engines
+
 
 def _start_server(cluster):
   args = [beeswax.conf.HIVE_SERVER_BIN.get()]

+ 39 - 26
apps/beeswax/src/beeswax/tests.py

@@ -68,7 +68,7 @@ import beeswax.views
 from beeswax import conf, hive_site
 from beeswax.common import apply_natural_sort
 from beeswax.conf import HIVE_SERVER_HOST, AUTH_USERNAME, AUTH_PASSWORD, AUTH_PASSWORD_SCRIPT
-from beeswax.views import collapse_whitespace, _save_design
+from beeswax.views import collapse_whitespace, _save_design, parse_out_jobs
 from beeswax.test_base import make_query, wait_for_query_to_finish, verify_history, get_query_server_config,\
   fetch_query_result_data
 from beeswax.design import hql_query, strip_trailing_semicolon
@@ -79,7 +79,7 @@ from beeswax.server.dbms import QueryServerException
 from beeswax.server.hive_server2_lib import HiveServerClient,\
   PartitionKeyCompatible, PartitionValueCompatible, HiveServerTable,\
   HiveServerTColumnValue2
-from beeswax.test_base import BeeswaxSampleProvider, is_hive_on_spark
+from beeswax.test_base import BeeswaxSampleProvider, is_hive_on_spark, get_available_execution_engines
 from beeswax.hive_site import get_metastore, hiveserver2_jdbc_url
 
 
@@ -362,20 +362,22 @@ for x in sys.stdin:
     """
     Testing query with udf
     """
-    response = _make_query(self.client, "SELECT my_sqrt(foo), my_float(foo) FROM test where foo=4 GROUP BY foo", # Force MR job with GROUP BY
-      udfs=[('my_sqrt', 'org.apache.hadoop.hive.ql.udf.UDFSqrt'),
-            ('my_float', 'org.apache.hadoop.hive.ql.udf.UDFToFloat')], local=False, database=self.db_name)
-    response = wait_for_query_to_finish(self.client, response, max=60.0)
-    content = fetch_query_result_data(self.client, response)
+    execution_engines = get_available_execution_engines()
 
-    assert_equal([2.0, 4.0], content["results"][0])
-    log = content['log']
+    for engine in execution_engines:
+      response = _make_query(self.client, "SELECT my_sqrt(foo), my_float(foo) FROM test where foo=4 GROUP BY foo", # Force MR job with GROUP BY
+        udfs=[('my_sqrt', 'org.apache.hadoop.hive.ql.udf.UDFSqrt'),
+              ('my_float', 'org.apache.hadoop.hive.ql.udf.UDFToFloat')],
+        local=False, database=self.db_name, settings=[('hive.execution.engine', engine)])
+      response = wait_for_query_to_finish(self.client, response, max=60.0)
+      content = fetch_query_result_data(self.client, response)
 
-    if not is_hive_on_spark():
-      assert_true(search_log_line('map = 100%', log), log)
-      assert_true(search_log_line('reduce = 100%', log), log)
+      assert_equal([2.0, 4.0], content["results"][0])
+      log = content['log']
+
+      assert_true(search_log_line('Completed executing command', log), log)
       # Test job extraction while we're at it
-      assert_equal(1, len(content["hadoop_jobs"]), "Should have started 1 job and extracted it.")
+      assert_equal(1, len(parse_out_jobs(log, engine)), "Should have started 1 job and extracted it.")
 
 
   def test_query_with_remote_udf(self):
@@ -2043,18 +2045,18 @@ for x in sys.stdin:
     """
     Test that the HS2 logs send back the ql.Driver log output with JobID
     """
-    if is_hive_on_spark():
-      raise SkipTest
+    execution_engines = get_available_execution_engines()
 
-    hql = "SELECT foo FROM `%(db)s`.`test` GROUP BY foo" % {'db': self.db_name}  # GROUP BY forces the MR job
-    response = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.db_name)
-    content = fetch_query_result_data(self.client, response)
+    for engine in execution_engines:
+      hql = "SELECT foo FROM `%(db)s`.`test` GROUP BY foo" % {'db': self.db_name}  # GROUP BY forces the MR job
+      response = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.db_name,
+                             settings=[('hive.execution.engine', engine)])
+      content = fetch_query_result_data(self.client, response)
 
-    log = content['log']
-    assert_true(search_log_line('Starting Job = ', log), log)
-    assert_true(search_log_line('Ended Job = ', log), log)
-    # Test job extraction while we're at it
-    assert_equal(1, len(content["hadoop_jobs"]), "Should have started 1 job and extracted it.")
+      log = content['log']
+      assert_true(search_log_line('Completed executing command', log), log)
+      # Test job extraction while we're at it
+      assert_equal(1, len(parse_out_jobs(log, engine)), "Should have started 1 job and extracted it.")
 
 
 
@@ -2173,8 +2175,8 @@ Starting Job = job_201003191517_0003, Tracking URL = http://localhost:50030/jobd
 """
   assert_equal(
     ["job_201003191517_0002", "job_201003191517_0003", "job_1402420825148_0001"],
-    beeswax.views._parse_out_hadoop_jobs(sample_log))
-  assert_equal([], beeswax.views._parse_out_hadoop_jobs("nothing to see here"))
+    parse_out_jobs(sample_log))
+  assert_equal([], parse_out_jobs("nothing to see here"))
 
   sample_log_no_direct_url = """
 14/06/09 08:40:38 INFO impl.YarnClientImpl: Submitted application application_1402269517321_0003
@@ -2185,7 +2187,18 @@ Starting Job = job_201003191517_0003, Tracking URL = http://localhost:50030/jobd
 """
   assert_equal(
       ["job_1402269517321_0003"],
-      beeswax.views._parse_out_hadoop_jobs(sample_log_no_direct_url))
+      parse_out_jobs(sample_log_no_direct_url))
+
+
+def test_tez_job_extraction():
+  sample_log = """
+16/07/12 05:47:08 INFO SessionState:
+16/07/12 05:47:08 INFO SessionState: Status: Running (Executing on YARN cluster with App id application_1465862139975_0002)
+16/07/12 05:47:08 INFO SessionState: Map 1: -/-	Reducer 2: 0/1
+"""
+
+  assert_equal(["application_1465862139975_0002"], parse_out_jobs(sample_log, 'tez'))
+  assert_equal([], parse_out_jobs("Tez job doesn't exist.", 'tez'))
 
 
 def test_hive_site():

+ 5 - 2
apps/beeswax/src/beeswax/views.py

@@ -57,6 +57,7 @@ LOG = logging.getLogger(__name__)
 # For scraping Job IDs from logs
 HADOOP_JOBS_RE = re.compile("Starting Job = ([a-z0-9_]+?),")
 SPARK_APPLICATION_RE = re.compile("Running with YARN Application = (?P<application_id>application_\d+_\d+)")
+TEZ_APPLICATION_RE = re.compile("Executing on YARN cluster with App id ([a-z0-9_]+?)\)")
 
 
 def index(request):
@@ -511,7 +512,7 @@ def view_results(request, id, first_row=0):
     'columns': columns,
     'expected_first_row': first_row,
     'log': log,
-    'hadoop_jobs': app_name != 'impala' and _parse_out_hadoop_jobs(log),
+    'hadoop_jobs': app_name != 'impala' and parse_out_jobs(log),
     'query_context': query_context,
     'can_save': False,
     'context_param': context_param,
@@ -885,7 +886,7 @@ def parse_query_context(context):
   return pair
 
 
-def _parse_out_hadoop_jobs(log, engine='mr', with_state=False):
+def parse_out_jobs(log, engine='mr', with_state=False):
   """
   Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
 
@@ -897,6 +898,8 @@ def _parse_out_hadoop_jobs(log, engine='mr', with_state=False):
     start_pattern = HADOOP_JOBS_RE
   elif engine.lower() == 'spark':
     start_pattern = SPARK_APPLICATION_RE
+  elif engine.lower() == 'tez':
+    start_pattern = TEZ_APPLICATION_RE
   else:
     raise ValueError(_('Cannot parse job IDs for execution engine %(engine)s') % {'engine': engine})
 

+ 2 - 2
desktop/libs/notebook/src/notebook/connectors/hiveserver2.py

@@ -46,7 +46,7 @@ try:
   from beeswax.models import QUERY_TYPES, HiveServerQueryHandle, HiveServerQueryHistory, QueryHistory, Session
   from beeswax.server import dbms
   from beeswax.server.dbms import get_query_server_config, QueryServerException
-  from beeswax.views import _parse_out_hadoop_jobs
+  from beeswax.views import parse_out_jobs
 except ImportError, e:
   LOG.warn('Hive and HiveServer2 interfaces are not enabled')
   hive_settings = None
@@ -360,7 +360,7 @@ class HS2Api(Api):
 
     if snippet['type'] == 'hive':
       engine = self._get_hive_execution_engine(notebook, snippet)
-      jobs_with_state = _parse_out_hadoop_jobs(logs, engine=engine, with_state=True)
+      jobs_with_state = parse_out_jobs(logs, engine=engine, with_state=True)
 
       jobs = [{
         'name': job.get('job_id', ''),