Эх сурвалжийг харах

HUE-2678 [jobbrowser] Read Spark job data from Spark History Server API

Also fixes logs link for Spark jobs
Jenny Kim 9 жил өмнө
parent
commit
ca6ca1a

+ 5 - 4
apps/jobbrowser/src/jobbrowser/api.py

@@ -26,8 +26,9 @@ from hadoop.api.jobtracker.ttypes import ThriftJobPriority, TaskTrackerNotFoundE
 
 import hadoop.yarn.history_server_api as history_server_api
 import hadoop.yarn.mapreduce_api as mapreduce_api
-import hadoop.yarn.resource_manager_api as resource_manager_api
 import hadoop.yarn.node_manager_api as node_manager_api
+import hadoop.yarn.resource_manager_api as resource_manager_api
+import hadoop.yarn.spark_history_server_api as spark_history_server_api
 
 from jobbrowser.conf import SHARE_JOBS
 from jobbrowser.models import Job, JobLinkage, TaskList, Tracker
@@ -173,6 +174,7 @@ class YarnApi(JobBrowserApi):
     self.resource_manager_api = resource_manager_api.get_resource_manager(user.username)
     self.mapreduce_api = mapreduce_api.get_mapreduce_api(user.username)
     self.history_server_api = history_server_api.get_history_server_api()
+    self.spark_history_server_api = spark_history_server_api.get_history_server_api()
 
   def get_job_link(self, job_id):
     return self.get_job(job_id)
@@ -225,11 +227,10 @@ class YarnApi(JobBrowserApi):
 
       if job['state'] == 'ACCEPTED':
         raise ApplicationNotRunning(jobid, job)
+      elif job.get('applicationType') == 'SPARK':
+        job = SparkJob(job, rm_api=self.resource_manager_api, hs_api=self.spark_history_server_api)
       elif job['state'] == 'KILLED':
         return KilledYarnJob(self.resource_manager_api, job)
-
-      if job.get('applicationType') == 'SPARK':
-        job = SparkJob(job, self.resource_manager_api)
       elif job.get('applicationType') == 'MAPREDUCE':
         jobid = jobid.replace('application', 'job')
 

+ 19 - 7
apps/jobbrowser/src/jobbrowser/templates/job.mako

@@ -115,7 +115,7 @@ ${ comps.menubar() }
           <li class="nav-header">${_('Status')}</li>
           <li class="white" id="jobStatus">&nbsp;</li>
           <li class="nav-header">${_('Logs')}</li>
-          <li><a href="${job.trackingUrl }" target="_blank"><i class="fa fa-tasks"></i> ${_('Logs')}</a></li>
+          <li><a href="${job.logs_url }" target="_blank"><i class="fa fa-tasks"></i> ${_('Logs')}</a></li>
           <li class="nav-header">${_('Progress')}</li>
           <li class="white">${job.progress}%</li>
           <li class="nav-header">${_('Duration')}</li>
@@ -131,7 +131,7 @@ ${ comps.menubar() }
         <div class="card-body">
           <ul class="nav nav-tabs">
             <li  class="active"><a href="#metadata" data-toggle="tab">${_('Metadata')}</a></li>
-            % if job.scrapedData.get('metrics'):
+            % if hasattr(job, 'metrics') and job.metrics:
               <li><a href="#metrics" data-toggle="tab">${_('Metrics')}</a></li>
             % endif
           </ul>
@@ -182,22 +182,34 @@ ${ comps.menubar() }
                 </tbody>
               </table>
             </div>
+            % if hasattr(job, 'metrics') and job.metrics:
             <div class="tab-pane" id="metrics">
               <table class="table table-condensed">
                 <thead>
-                  <th>${_('Metric')}</th>
-                  <th>${_('Value')}</th>
+                  % for header in job.metrics.get('headers', []):
+                  <th>${ header }</th>
+                  % endfor
                 </thead>
                 <tbody>
-                % for metric in job.scrapedData.get('metrics', []):
+                % for executor in job.metrics.get('executors', []):
                   <tr>
-                    <td>${_(metric['header'])}</td>
-                    <td>${metric['value']}</td>
+                    % for val in executor:
+                    % if isinstance(val, dict):
+                      <td>
+                      % for name, link in val.items():
+                        <a href="${ link }">${ name }</a>
+                      % endfor
+                      </td>
+                    % else:
+                      <td>${ val }</td>
+                    % endif
+                    % endfor
                   </tr>
                 % endfor
                 </tbody>
               </table>
             </div>
+            % endif
         </div>
       </div>
     </div>

+ 67 - 36
apps/jobbrowser/src/jobbrowser/yarn_models.py

@@ -16,24 +16,25 @@
 # limitations under the License.
 
 import logging
+import os
 import re
 import time
-import urlparse
 import urllib2
+import urlparse
 
 from lxml import html
 
 from django.utils.translation import ugettext as _
 
 from desktop.lib.rest.resource import Resource
-from desktop.lib.view_util import format_duration_in_millis
+from desktop.lib.view_util import big_filesizeformat, format_duration_in_millis
 
 from hadoop.yarn.clients import get_log_client
 
 from jobbrowser.models import format_unixtime_ms
 
 
-LOGGER = logging.getLogger(__name__)
+LOG = logging.getLogger(__name__)
 
 
 class Application(object):
@@ -99,40 +100,70 @@ class Application(object):
 
 class SparkJob(Application):
 
-  def __init__(self, job, api=None):
-    super(SparkJob, self).__init__(job, api)
-    self._scrape()
-
-  def _history_application_metrics(self, html_doc):
-    metrics = []
-    root = html.fromstring(html_doc)
-    tables = root.findall('.//table')
-    metrics_table = tables[2].findall('.//tr')
-    for tr in metrics_table:
-        header = tr.find('.//th')
-        value = tr.findall('.//td')
-        if value:
-          header = header.text.strip().replace(':', '')
-          value = value[0].text.strip()
-          metrics.append({
-            'header': header,
-            'value': value
-          })
-    return metrics
-
-  def _scrape(self):
-    # XXX: we have to scrape the tracking URL directly because
-    # spark jobs don't have a JSON api via YARN or app server
-    # see YARN-1530, SPARK-1537 for progress on these apis
-    self.scrapedData = {}
+  def __init__(self, job, rm_api=None, hs_api=None):
+    super(SparkJob, self).__init__(job, rm_api)
+    self._resolve_tracking_url()
+    if self.state not in ('NEW', 'SUBMITTED', 'ACCEPTED', 'RUNNING') and hs_api:
+      self.history_server_api = hs_api
+      self._get_metrics()
+
+  @property
+  def logs_url(self):
+    return os.path.join(self.trackingUrl, 'executors')
+
+  @property
+  def attempt_id(self):
+    return self.trackingUrl.strip('/').split('/')[-1]
+
+  def _resolve_tracking_url(self):
+    try:
+      resp = urllib2.urlopen(self.trackingUrl)
+      actual_url = resp.url
+      if actual_url.strip('/').split('/')[-1] == 'jobs':
+        actual_url = actual_url.strip('/').replace('jobs', '')
+      self.trackingUrl = actual_url
+    except Exception, e:
+      LOG.warn("Failed to resolve Spark Job's actual tracking URL: %s" % e)
+
+  def _get_metrics(self):
+    self.metrics = {}
     try:
-      res = urllib2.urlopen(self.trackingUrl)
-      html_doc = res.read()
-      if self.trackingUI == 'History':
-        self.scrapedData['metrics'] = self._history_application_metrics(html_doc)
+      executors = self.history_server_api.executors(self.jobId, self.attempt_id)
+      if executors:
+        self.metrics['headers'] = [
+          _('Executor Id'),
+          _('Address'),
+          _('RDD Blocks'),
+          _('Storage Memory'),
+          _('Disk Used'),
+          _('Active Tasks'),
+          _('Failed Tasks'),
+          _('Complete Tasks'),
+          _('Task Time'),
+          _('Input'),
+          _('Shuffle Read'),
+          _('Shuffle Write'),
+          _('Logs')]
+        self.metrics['executors'] = []
+        for e in executors:
+          self.metrics['executors'].append([
+            e.get('id', 'N/A'),
+            e.get('hostPort', ''),
+            e.get('rddBlocks', ''),
+            '%s / %s' % (big_filesizeformat(e.get('memoryUsed', 0)), big_filesizeformat(e.get('maxMemory', 0))),
+            big_filesizeformat(e.get('diskUsed', 0)),
+            e.get('activeTasks', ''),
+            e.get('failedTasks', ''),
+            e.get('completedTasks', ''),
+            format_duration_in_millis(e.get('totalDuration', 0)),
+            big_filesizeformat(e.get('totalInputBytes', 0)),
+            big_filesizeformat(e.get('totalShuffleRead', 0)),
+            big_filesizeformat(e.get('totalShuffleWrite', 0)),
+            e.get('executorLogs', '')
+          ])
     except Exception, e:
+      LOG.error('Failed to get Spark Job executors: %s' % e)
       # Prevent a nosedive. Don't create metrics if api changes or url is unreachable.
-      self.scrapedData['metrics'] = []
 
 
 class Job(object):
@@ -143,7 +174,7 @@ class Job(object):
     for attr in attrs.keys():
       if attr == 'acls':
         # 'acls' are actually not available in the API
-        LOGGER.warn('Not using attribute: %s' % attrs[attr])
+        LOG.warn('Not using attribute: %s' % attrs[attr])
       else:
         setattr(self, attr, attrs[attr])
 
@@ -379,7 +410,7 @@ class Attempt:
         try:
           debug_info = '\nLog Link: %s' % log_link
           debug_info += '\nHTML Response: %s' % response
-          LOGGER.error(debug_info)
+          LOG.error(debug_info)
         except:
           LOG.exception('failed to build debug info')
 

+ 3 - 0
desktop/conf.dist/hue.ini

@@ -714,6 +714,9 @@
       # URL of the HistoryServer API
       ## history_server_api_url=http://localhost:19888
 
+      # URL of the Spark History Server
+      ## spark_history_server_url=http://localhost:18088
+
       # In secure mode (HTTPS), if SSL certificates from YARN Rest APIs
       # have to be verified against certificate authority
       ## ssl_cert_ca_verify=True

+ 3 - 0
desktop/conf/pseudo-distributed.ini.tmpl

@@ -716,6 +716,9 @@
       # URL of the HistoryServer API
       ## history_server_api_url=http://localhost:19888
 
+      # URL of the Spark History Server
+      ## spark_history_server_url=http://localhost:18088
+
       # In secure mode (HTTPS), if SSL certificates from YARN Rest APIs
       # have to be verified against certificate authority
       ## ssl_cert_ca_verify=True

+ 3 - 0
desktop/libs/hadoop/src/hadoop/conf.py

@@ -146,6 +146,9 @@ YARN_CLUSTERS = UnspecifiedConfigSection(
       HISTORY_SERVER_API_URL=Config("history_server_api_url",
                   default='http://localhost:19888',
                   help="URL of the HistoryServer API"),
+      SPARK_HISTORY_SERVER_URL=Config("spark_history_server_url",
+                  default='http://localhost:18088',
+                  help="URL of the Spark History Server"),
       SSL_CERT_CA_VERIFY=Config("ssl_cert_ca_verify",
                   help="In secure mode (HTTPS), if SSL certificates from YARN Rest APIs have to be verified against certificate authority",
                   dynamic_default=default_ssl_validate,

+ 95 - 0
desktop/libs/hadoop/src/hadoop/yarn/spark_history_server_api.py

@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import posixpath
+import threading
+
+from desktop.lib.rest.http_client import HttpClient
+from desktop.lib.rest.resource import Resource
+from hadoop import cluster
+
+
+LOG = logging.getLogger(__name__)
+DEFAULT_USER = 'hue'
+
+_API_VERSION = 'v1'
+_JSON_CONTENT_TYPE = 'application/json'
+
+_api_cache = None
+_api_cache_lock = threading.Lock()
+
+
+def get_history_server_api():
+  global _api_cache
+  if _api_cache is None:
+    _api_cache_lock.acquire()
+    try:
+      if _api_cache is None:
+        yarn_cluster = cluster.get_cluster_conf_for_job_submission()
+        _api_cache = SparkHistoryServerApi(yarn_cluster.SPARK_HISTORY_SERVER_URL.get(), yarn_cluster.SECURITY_ENABLED.get(), yarn_cluster.SSL_CERT_CA_VERIFY.get())
+    finally:
+      _api_cache_lock.release()
+  return _api_cache
+
+
+class SparkHistoryServerApi(object):
+
+  def __init__(self, spark_hs_url, security_enabled=False, ssl_cert_ca_verify=False):
+    self._ui_url = spark_hs_url
+    self._url = posixpath.join(spark_hs_url, 'api/%s/' % _API_VERSION)
+    self._client = HttpClient(self._url, logger=LOG)
+    self._root = Resource(self._client)
+    self._security_enabled = security_enabled
+
+    if self._security_enabled:
+      self._client.set_kerberos_auth()
+
+    self._client.set_verify(ssl_cert_ca_verify)
+
+  def __str__(self):
+    return "Spark History Server API at %s" % (self._url,)
+
+  @property
+  def url(self):
+    return self._url
+
+  @property
+  def ui_url(self):
+    return self._ui_url
+
+  @property
+  def headers(self):
+    return {'Accept': _JSON_CONTENT_TYPE}
+
+  def applications(self):
+    return self._root.get('applications', headers=self.headers)
+
+  def application(self, app_id):
+    return self._root.get('applications/%(app_id)s' % {'app_id': app_id}, headers=self.headers)
+
+  def jobs(self, app_id, attempt_id):
+    return self._root.get('applications/%(app_id)s/%(attempt_id)s/jobs' % {'app_id': app_id, 'attempt_id': attempt_id}, headers=self.headers)
+
+  def stages(self, app_id, attempt_id):
+    return self._root.get('applications/%(app_id)s/%(attempt_id)s/stages' % {'app_id': app_id, 'attempt_id': attempt_id}, headers=self.headers)
+
+  def executors(self, app_id, attempt_id):
+    return self._root.get('applications/%(app_id)s/%(attempt_id)s/executors' % {'app_id': app_id, 'attempt_id': attempt_id}, headers=self.headers)
+
+  # TODO: stage attempts, task summaries, task list, storage, download logs
+  # http://spark.apache.org/docs/latest/monitoring.html#rest-api