Эх сурвалжийг харах

HUE-7258 [jb] Fix fetching Spark job logs in Kerberized environment

Ying Chen 7 жил өмнө
parent
commit
9a88856

+ 2 - 2
apps/jobbrowser/src/jobbrowser/apis/job_api.py

@@ -504,11 +504,11 @@ class SparkExecutorApi(Api):
        "logs": executor['logs']
     }
 
-  def logs(self, appid, app_type, log_name, offset=LOG_OFFSET_BYTES, is_embeddable=False):
+  def logs(self, appid, app_type, log_name, is_embeddable=False):
     log = ""
 
     if self._executors and self._executors[0]:
-      log = self.history_server_api.download_executor_logs(self.user, self._executors[0], log_name, offset)
+      log = self.history_server_api.download_executor_logs(self.user, self._executors[0], log_name, LOG_OFFSET_BYTES)
     return {
        "logs": log
     }

+ 1 - 1
apps/jobbrowser/src/jobbrowser/tests.py

@@ -1087,7 +1087,7 @@ class MockSparkHistoryApi(SparkHistoryServerApi):
 
   def executors(self, job):
     EXECUTORS_LISTS = {
-      u'application_1513618343677_0018': [{
+      u'application_1513618343677_0018/1': [{
         u'diskUsed': 0,
         u'totalShuffleWrite': 0,
         u'totalCores': 0,

+ 22 - 3
apps/jobbrowser/src/jobbrowser/yarn_models.py

@@ -19,16 +19,18 @@ import logging
 import os
 import re
 import time
-import urllib2
 import urlparse
 
 from lxml import html
 
 from django.utils.translation import ugettext as _
 
+from desktop.lib.exceptions_renderable import PopupException
+from desktop.lib.rest.http_client import HttpClient
 from desktop.lib.rest.resource import Resource
 from desktop.lib.view_util import big_filesizeformat, format_duration_in_millis
 
+from hadoop import cluster
 from hadoop.yarn.clients import get_log_client
 
 from itertools import izip
@@ -129,17 +131,34 @@ class SparkJob(Application):
   def _resolve_tracking_url(self):
     resp = None
     try:
-      resp = urllib2.urlopen(self.trackingUrl, timeout=5.0)
-      actual_url = resp.url
+      self._client = HttpClient(self.trackingUrl, logger=LOG)
+      self._root = Resource(self._client)
+      yarn_cluster = cluster.get_cluster_conf_for_job_submission()
+      self._security_enabled = yarn_cluster.SECURITY_ENABLED.get()
+      if self._security_enabled:
+        self._client.set_kerberos_auth()
+
+      self._client.set_verify(yarn_cluster.SSL_CERT_CA_VERIFY.get())
+      actual_url = self._execute(self._root.resolve_redirect_url())
+
       if actual_url.strip('/').split('/')[-1] == 'jobs':
         actual_url = actual_url.strip('/').replace('jobs', '')
       self.trackingUrl = actual_url
+      LOG.debug("SparkJob tracking URL: %s" % self.trackingUrl)
     except Exception, e:
       LOG.warn("Failed to resolve Spark Job's actual tracking URL: %s" % e)
     finally:
       if resp is not None:
         resp.close()
 
+  def _execute(self, function, *args, **kwargs):
+    response = None
+    try:
+      response = function(*args, **kwargs)
+    except Exception, e:
+      LOG.warn('Spark resolve tracking URL returned a failed response: %s' % e)
+    return response
+
   def _get_metrics(self):
     self.metrics = {}
     try:

+ 28 - 1
desktop/core/src/desktop/lib/rest/resource.py

@@ -66,6 +66,19 @@ class Resource(object):
       return resp.content
 
   def invoke(self, method, relpath=None, params=None, data=None, headers=None, files=None, allow_redirects=False, clear_cookies=False, log_response=True):
+    resp = self._invoke(method=method,
+                        relpath=relpath,
+                        params=params,
+                        data=data,
+                        headers=headers,
+                        files=files,
+                        allow_redirects=allow_redirects,
+                        clear_cookies=clear_cookies,
+                        log_response=log_response)
+
+    return self._format_response(resp)
+
+  def _invoke(self, method, relpath=None, params=None, data=None, headers=None, files=None, allow_redirects=False, clear_cookies=False, log_response=True):
     """
     Invoke an API method.
     @return: Raw body or JSON dictionary (if response content type is JSON).
@@ -92,7 +105,8 @@ class Resource(object):
            len(resp.content) > 1000 and "..." or "")
       )
 
-    return self._format_response(resp)
+    return resp
+
 
   def get(self, relpath=None, params=None, headers=None, clear_cookies=False):
     """
@@ -161,3 +175,16 @@ class Resource(object):
       headers.update({'Content-Type': contenttype})
 
     return headers
+
+  def resolve_redirect_url(self, method="GET", relpath=None, params=None, data=None, headers=None, files=None, allow_redirects=True, clear_cookies=False, log_response=True):
+    resp = self._invoke(method=method,
+                        relpath=relpath,
+                        params=params,
+                        data=data,
+                        headers=headers,
+                        files=files,
+                        allow_redirects=allow_redirects,
+                        clear_cookies=clear_cookies,
+                        log_response=log_response)
+
+    return resp.url.encode("utf-8")

+ 15 - 15
desktop/libs/hadoop/src/hadoop/yarn/spark_history_server_api.py

@@ -100,6 +100,7 @@ class SparkHistoryServerApi(object):
     return self._root.get('applications/%(app_id)s/stages' % {'app_id': app_id}, headers=self.headers)
 
   def executors(self, job):
+    LOG.debug("Getting executors for Spark job %s" % job.jobId)
     app_id = self.get_real_app_id(job)
     if not app_id:
       return []
@@ -174,24 +175,23 @@ class SparkHistoryServerApi(object):
     # When running job as cluster mode, an attempt number is part of application ID, but proxy URL can't be resolved to match
     # Spark history URL. In the applications list, each job's attampt list shows if attempt ID is used and how many attempts.
 
-    app_id = job.jobId if job.jobId == job.attempt_id else job.jobId + '/' + job.attempt_id
-    if job.status not in ('SUCCEEDED', 'FAILED'):
-      try:
-        jobs_json = self.applications()
-        job_filtered_json = [x for x in jobs_json if x['id'] == job.jobId]
+    try:
+      jobs_json = self.applications()
+      job_filtered_json = [x for x in jobs_json if x['id'] == job.jobId]
 
-        if not job_filtered_json:
-          return {}
+      if not job_filtered_json:
+        return {}
 
-        attempts = job_filtered_json[0]['attempts']
+      attempts = job_filtered_json[0]['attempts']
 
-        if len(attempts) == 1:
-          app_id = job.jobId if 'attemptId' not in attempts[0] else job.jobId + '/' + attempts[0]['attemptId']
-        else:
-          app_id = job.jobId + '/%d' % len(attempts)
+      if len(attempts) == 1:
+        app_id = job.jobId if 'attemptId' not in attempts[0] else job.jobId + '/' + attempts[0]['attemptId']
+      else:
+        app_id = job.jobId + '/%d' % len(attempts)
 
-      except Exception as e:
-        LOG.error('Cannot get executors %s' % e)
-        app_id = None
+      LOG.debug("Getting real spark app id %s for Spark job %s" % (app_id, job.jobId))
+    except Exception as e:
+      LOG.error('Cannot get real app id %s: %s' % (job.jobId, e))
+      app_id = None
 
     return app_id