浏览代码

HUE-3438 [editor] Scrape Spark Application ID during query execution of Hive on Spark

Jenny Kim 9 年之前
父节点
当前提交
c5e673c
共有 2 个文件被更改,包括 41 次插入9 次删除
  1. 13 4
      apps/beeswax/src/beeswax/views.py
  2. 28 5
      desktop/libs/notebook/src/notebook/connectors/hiveserver2.py

+ 13 - 4
apps/beeswax/src/beeswax/views.py

@@ -54,6 +54,10 @@ from beeswax.server.dbms import expand_exception, get_query_server_config, Query
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
+# For scraping Job IDs from logs
+HADOOP_JOBS_RE = re.compile("Starting Job = ([a-z0-9_]+?),")
+SPARK_APPLICATION_RE = re.compile("Running with YARN Application = (?P<application_id>application_\d+_\d+)")
+
 
 
 def index(request):
 def index(request):
   return execute_query(request)
   return execute_query(request)
@@ -881,15 +885,20 @@ def parse_query_context(context):
   return pair
   return pair
 
 
 
 
-HADOOP_JOBS_RE = re.compile("Starting Job = ([a-z0-9_]+?),")
-
-def _parse_out_hadoop_jobs(log):
+def _parse_out_hadoop_jobs(log, engine='mr'):
   """
   """
   Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
   Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
   """
   """
   ret = []
   ret = []
 
 
-  for match in HADOOP_JOBS_RE.finditer(log):
+  if engine.lower() == 'mr':
+    pattern = HADOOP_JOBS_RE
+  elif engine.lower() == 'spark':
+    pattern = SPARK_APPLICATION_RE
+  else:
+    raise ValueError(_('Cannot parse job IDs for execution engine %(engine)s') % {'engine': engine})
+
+  for match in pattern.finditer(log):
     job_id = match.group(1)
     job_id = match.group(1)
     if job_id not in ret:
     if job_id not in ret:
       ret.append(job_id)
       ret.append(job_id)

+ 28 - 5
desktop/libs/notebook/src/notebook/connectors/hiveserver2.py

@@ -44,6 +44,9 @@ except ImportError, e:
   LOG.exception('Hive and HiveServer2 interfaces are not enabled')
   LOG.exception('Hive and HiveServer2 interfaces are not enabled')
 
 
 
 
+DEFAULT_HIVE_ENGINE = 'mr'
+
+
 def query_error_handler(func):
 def query_error_handler(func):
   def decorator(*args, **kwargs):
   def decorator(*args, **kwargs):
     try:
     try:
@@ -239,12 +242,16 @@ class HS2Api(Api):
 
 
   @query_error_handler
   @query_error_handler
   def get_jobs(self, notebook, snippet, logs):
   def get_jobs(self, notebook, snippet, logs):
-    job_ids = _parse_out_hadoop_jobs(logs)
+    jobs = []
 
 
-    jobs = [{
-      'name': job_id,
-      'url': reverse('jobbrowser.views.single_job', kwargs={'job': job_id})
-    } for job_id in job_ids]
+    if snippet['type'] == 'hive':
+      engine = self._get_hive_execution_engine(notebook, snippet)
+      job_ids = _parse_out_hadoop_jobs(logs, engine=engine)
+
+      jobs = [{
+        'name': job_id,
+        'url': reverse('jobbrowser.views.single_job', kwargs={'job': job_id})
+      } for job_id in job_ids]
 
 
     return jobs
     return jobs
 
 
@@ -276,6 +283,22 @@ class HS2Api(Api):
     }
     }
 
 
 
 
+  def _get_hive_execution_engine(self, notebook, snippet):
+    # Get hive.execution.engine from snippet properties, if none, then get from session
+    properties = snippet['properties']
+    settings = properties.get('settings', [])
+
+    if not settings:
+      session = next((session for session in notebook['sessions'] if session['type'] == 'hive'), None)
+      if not session:
+        raise Exception(_('Cannot get jobs, failed to find active HS2 session for user: %s') % self.user.username)
+      settings = session['properties']
+
+    engine = next((setting['value'] for setting in settings if setting['key'] == 'hive.execution.engine'), DEFAULT_HIVE_ENGINE)
+
+    return engine
+
+
   def _get_statements(self, hql_query):
   def _get_statements(self, hql_query):
     hql_query = strip_trailing_semicolon(hql_query)
     hql_query = strip_trailing_semicolon(hql_query)
     statements = []
     statements = []