Эх сурвалжийг харах

HUE-4212 [hive] Also provide if the job is started or finished

Jenny Kim 9 жил өмнө
parent
commit
6c1224a

+ 21 - 6
apps/beeswax/src/beeswax/views.py

@@ -885,23 +885,38 @@ def parse_query_context(context):
   return pair
 
 
-def _parse_out_hadoop_jobs(log, engine='mr'):
+def _parse_out_hadoop_jobs(log, engine='mr', with_state=False):
   """
   Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
+
+  with_state: If True, will return a list of dict items with 'job_id', 'started', 'finished'
   """
   ret = []
 
   if engine.lower() == 'mr':
-    pattern = HADOOP_JOBS_RE
+    start_pattern = HADOOP_JOBS_RE
   elif engine.lower() == 'spark':
-    pattern = SPARK_APPLICATION_RE
+    start_pattern = SPARK_APPLICATION_RE
   else:
     raise ValueError(_('Cannot parse job IDs for execution engine %(engine)s') % {'engine': engine})
 
-  for match in pattern.finditer(log):
+  for match in start_pattern.finditer(log):
     job_id = match.group(1)
-    if job_id not in ret:
-      ret.append(job_id)
+
+    if with_state:
+      if job_id not in list(job['job_id'] for job in ret):
+        ret.append({'job_id': job_id, 'started': True, 'finished': False})
+      end_pattern = 'Ended Job = %s' % job_id
+
+      if end_pattern in log:
+        job = next((job for job in ret if job['job_id'] == job_id), None)
+        if job is not None:
+           job['finished'] = True
+        else:
+          ret.append({'job_id': job_id, 'started': True, 'finished': True})
+    else:
+      if job_id not in ret:
+        ret.append(job_id)
 
   return ret
 

+ 3 - 12
desktop/libs/notebook/src/notebook/api.py

@@ -259,20 +259,11 @@ def get_logs(request):
 
   db = get_api(request, snippet)
 
+  full_log = str(request.POST.get('full_log', ''))
   logs = db.get_log(notebook, snippet, startFrom=startFrom, size=size)
+  full_log += logs
 
-  jobs = json.loads(request.POST.get('jobs', '[]'))
-
-  # Get any new jobs from current logs snippet
-  new_jobs = db.get_jobs(notebook, snippet, logs)
-
-  # Append new jobs to known jobs and get the unique set
-  if new_jobs:
-    all_jobs = jobs + new_jobs
-    jobs = dict((job['name'], job) for job in all_jobs).values()
-
-  # Retrieve full log for job progress parsing
-  full_log = request.POST.get('full_log', logs)
+  jobs = db.get_jobs(notebook, snippet, full_log)
 
   response['logs'] = logs.strip()
   response['progress'] = db.progress(snippet, full_log) if snippet['status'] != 'available' and snippet['status'] != 'success' else 100

+ 6 - 4
desktop/libs/notebook/src/notebook/connectors/hiveserver2.py

@@ -346,12 +346,14 @@ class HS2Api(Api):
 
     if snippet['type'] == 'hive':
       engine = self._get_hive_execution_engine(notebook, snippet)
-      job_ids = _parse_out_hadoop_jobs(logs, engine=engine)
+      jobs_with_state = _parse_out_hadoop_jobs(logs, engine=engine, with_state=True)
 
       jobs = [{
-        'name': job_id,
-        'url': reverse('jobbrowser.views.single_job', kwargs={'job': job_id})
-      } for job_id in job_ids]
+        'name': job.get('job_id', ''),
+        'url': reverse('jobbrowser.views.single_job', kwargs={'job': job.get('job_id', '')}),
+        'started': job.get('started', False),
+        'finished': job.get('finished', False)
+      } for job in jobs_with_state]
 
     return jobs
 

+ 97 - 0
desktop/libs/notebook/src/notebook/connectors/tests/tests_hiveserver2.py

@@ -330,6 +330,103 @@ class TestHiveserver2Api(object):
     assert_equal(self.api.progress(snippet, logs), 50)
 
 
+  def test_get_jobs(self):
+
+    notebook = json.loads("""
+      {
+        "uuid": "f5d6394d-364f-56e8-6dd3-b1c5a4738c52",
+        "id": 1234,
+        "sessions": [{"type": "hive", "properties": [], "id": "1234"}],
+        "type": "query-hive",
+        "name": "Test Hiveserver2 Editor",
+        "isSaved": false,
+        "parentUuid": null
+      }
+    """)
+
+    snippet = json.loads("""
+        {
+            "status": "running",
+            "database": "default",
+            "id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
+            "result": {
+                "type": "table",
+                "handle": {
+                  "statement_id": 0,
+                  "statements_count": 2,
+                  "has_more_statements": true
+                },
+                "id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
+            },
+            "statement": "%(statement)s",
+            "type": "hive",
+            "properties": {
+                "files": [],
+                "functions": [],
+                "settings": []
+            }
+        }
+      """ % {'statement': "SELECT * FROM sample_07;"}
+                         )
+
+    logs = """INFO  : Compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app,
+                     AVG(bytes) AS avg_bytes
+            FROM web_logs
+            GROUP BY  app
+            HAVING app IS NOT NULL
+            ORDER BY avg_bytes DESC
+            INFO  : Semantic Analysis Completed
+            INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:app, type:string, comment:null), FieldSchema(name:avg_bytes, type:double, comment:null)], properties:null)
+            INFO  : Completed compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13); Time taken: 0.073 seconds
+            INFO  : Executing command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app,
+                     AVG(bytes) AS avg_bytes
+            FROM web_logs
+            GROUP BY  app
+            HAVING app IS NOT NULL
+            ORDER BY avg_bytes DESC
+            INFO  : Query ID = hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13
+            INFO  : Total jobs = 2
+            INFO  : Launching Job 1 out of 2
+            INFO  : Starting task [Stage-1:MAPRED] in serial mode
+            INFO  : Number of reduce tasks not specified. Estimated from input data size: 1
+            INFO  : In order to change the average load for a reducer (in bytes):
+            INFO  :   set hive.exec.reducers.bytes.per.reducer=<number>
+            INFO  : In order to limit the maximum number of reducers:
+            INFO  :   set hive.exec.reducers.max=<number>
+            INFO  : In order to set a constant number of reducers:
+            INFO  :   set mapreduce.job.reduces=<number>
+            INFO  : number of splits:1
+            INFO  : Submitting tokens for job: job_1466630204796_0059
+            INFO  : The url to track the job: http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/
+            INFO  : Starting Job = job_1466630204796_0059, Tracking URL = http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/
+            INFO  : Kill Command = /usr/lib/hadoop/bin/hadoop job  -kill job_1466630204796_0059
+    """
+
+    jobs = self.api.get_jobs(notebook, snippet, logs)
+    assert_true(isinstance(jobs, list))
+    assert_true(len(jobs), 1)
+    assert_equal(jobs[0]['name'], 'job_1466630204796_0059')
+    assert_equal(jobs[0]['started'], True)
+    assert_equal(jobs[0]['finished'], False)
+    assert_true('url' in jobs[0])
+
+    logs += """INFO  : Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
+        INFO  : 2016-06-24 15:55:51,125 Stage-1 map = 0%,  reduce = 0%
+        INFO  : 2016-06-24 15:56:00,410 Stage-1 map = 100%,  reduce = 0%, Cumulative CPU 2.12 sec
+        INFO  : 2016-06-24 15:56:09,709 Stage-1 map = 100%,  reduce = 100%, Cumulative CPU 4.04 sec
+        INFO  : MapReduce Total cumulative CPU time: 4 seconds 40 msec
+        INFO  : Ended Job = job_1466630204796_0059
+        INFO  : Launching Job 2 out of 2
+    """
+
+
+    jobs = self.api.get_jobs(notebook, snippet, logs)
+    assert_true(len(jobs), 1)
+    assert_equal(jobs[0]['name'], 'job_1466630204796_0059')
+    assert_equal(jobs[0]['started'], True)
+    assert_equal(jobs[0]['finished'], True)
+
+
 class TestHiveserver2ApiWithHadoop(BeeswaxSampleProvider):
 
   @classmethod