فهرست منبع

HUE-1178 [jb] Smarter logic for getting MR2 job information

Romain Rigaux 12 سال پیش
والد
کامیت
4ba5fe9e8d

+ 12 - 6
apps/filebrowser/src/filebrowser/views.py

@@ -1170,6 +1170,7 @@ def _upload_file(request):
     We just need to rename it to the destination path.
     """
     form = UploadFileForm(request.POST, request.FILES)
+    response = {'status': -1, 'data': ''}
 
     if request.META.get('upload_failed'):
       raise PopupException(request.META.get('upload_failed'))
@@ -1188,6 +1189,7 @@ def _upload_file(request):
         try:
             # Remove tmp suffix of the file
             request.fs.do_as_user(username, request.fs.rename, tmp_file, dest)
+            response['status'] = 0
         except IOError, ex:
             already_exists = False
             try:
@@ -1200,12 +1202,13 @@ def _upload_file(request):
                 msg = _('Copy to %(name)s failed: %(error)s') % {'name': dest, 'error': ex}
             raise PopupException(msg)
 
-        return {
-          'status': 0,
+        response.update({
           'path': dest,
           'result': _massage_stats(request, request.fs.stats(dest)),
           'next': request.GET.get("next")
-          }
+        })
+
+        return response
     else:
         raise PopupException(_("Error in upload form: %s") % (form.errors,))
 
@@ -1248,6 +1251,7 @@ def _upload_archive(request):
     We need to extract it and rename it.
     """
     form = UploadArchiveForm(request.POST, request.FILES)
+    response = {'status': -1, 'data': ''}
 
     if form.is_valid():
         uploaded_file = request.FILES['archive']
@@ -1268,6 +1272,7 @@ def _upload_archive(request):
                 dest = dest[:-4]
                 request.fs.copyFromLocal(temp_path, dest)
                 shutil.rmtree(temp_path)
+                response['status'] = 0
             else:
                 raise PopupException(_('Could not interpret archive type.'))
 
@@ -1283,12 +1288,13 @@ def _upload_archive(request):
                 msg = _('Copy to %(name)s failed: %(error)s') % {'name': dest, 'error': ex}
             raise PopupException(msg)
 
-        return {
-          'status': 0,
+        response.update({
           'path': dest,
           'result': _massage_stats(request, request.fs.stats(dest)),
           'next': request.GET.get("next")
-          }
+        })
+
+        return response
     else:
         raise PopupException(_("Error in upload form: %s") % (form.errors,))
 

+ 7 - 8
apps/filebrowser/src/filebrowser/views_test.py

@@ -1006,19 +1006,18 @@ def test_upload_file():
     assert_equal(actual, expected)
 
     # Upload again and so fails because file already exits
-    resp = client.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
+    resp = client.post('/filebrowser/upload/file',
                        dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
     response = json.loads(resp.content)
     assert_equal(-1, response['status'], response)
     assert_true('already exists' in response['data'], response)
 
-    # Upload in tmp and fails because of missing permissions
-    try:
-      resp = client_not_me.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
-                                dict(dest=HDFS_DEST_DIR, hdfs_file=file(LOCAL_FILE)))
-      raise Exception('Should have sent a permissions exception!')
-    except Exception, e:
-      assert_true('Permission denied' in str(e), e)
+    # Upload in / and fails because of missing permissions
+    resp = client_not_me.post('/filebrowser/upload/file',
+                              dict(dest='/', hdfs_file=file(LOCAL_FILE)))
+    response = json.loads(resp.content)
+    assert_equal(-1, response['status'], response)
+    assert_true('Permission denied' in response['data'], response)
   finally:
     try:
       cluster.fs.remove(HDFS_DEST_DIR)

+ 16 - 10
apps/jobbrowser/src/jobbrowser/api.py

@@ -31,6 +31,7 @@ from jobbrowser.conf import SHARE_JOBS
 from jobbrowser.models import Job, JobLinkage, TaskList, Tracker
 from jobbrowser.yarn_models import Application, Job as YarnJob, Container
 from hadoop.cluster import get_next_ha_mrcluster
+from desktop.lib.exceptions_renderable import PopupException
 
 
 LOG = logging.getLogger(__name__)
@@ -229,18 +230,23 @@ class YarnApi(JobBrowserApi):
                   job.user == user.username, jobs)
 
   def get_job(self, jobid):
-    """
-    Try first as if it was a running job, then as a finished job.
-    """
     try:
-      if jobid.startswith('application'):
-        json = self.mapreduce_api.job(self.user, jobid.replace('application', 'job'))
-        return YarnJob(self.mapreduce_api, json['job'])
+      # App id
+      jobid = jobid.replace('job', 'application')
+      job = self.resource_manager_api.app(jobid)['app']
+
+      # MR id
+      jobid = jobid.replace('application', 'job')
+      if job['state'] in ('NEW', 'SUBMITTED', 'ACCEPTED', 'RUNNING'):
+        json = self.mapreduce_api.job(self.user, jobid)
+        job = YarnJob(self.mapreduce_api, json['job'])
+      else:
+        json = self.history_server_api.job(self.user, jobid)
+        job = YarnJob(self.history_server_api, json['job'])
     except Exception, e:
-      LOG.info('Job %s not running: %s' % (jobid, e))
-    jobid = jobid.replace('application', 'job')
-    json = self.history_server_api.job(self.user, jobid)
-    return YarnJob(self.history_server_api, json['job'])
+      raise PopupException('Job %s could not be found: %s' % (jobid, e), detail=e)
+
+    return job
 
   def get_tasks(self, jobid, **filters):
     filters.pop('pagenum')

+ 1 - 2
apps/jobbrowser/src/jobbrowser/models.py

@@ -525,8 +525,7 @@ class Cluster(object):
     self.blacklistedTrackerNames = self.status.blacklistedTrackerNames
     self.hostname = self.status.hostname
     self.httpPort = self.status.httpPort
-    # self.currentTimeMs = curtime
-    # self.currentTimeFormatted = format_unixtime_ms(curtime)
+
 
 def get_jobconf(jt, jobid):
   """

+ 2 - 11
apps/jobbrowser/src/jobbrowser/yarn_models.py

@@ -41,11 +41,8 @@ class Application:
     self.is_mr2 = True
     jobid = self.id
     if self.state in ('FINISHED', 'FAILED', 'KILLED'):
-      # When a job is finished
-      jobid = jobid.replace('application', 'job')
       setattr(self, 'status', self.finalStatus)
     else:
-      jobid = jobid.replace('job', 'application')
       setattr(self, 'status', self.state)
     setattr(self, 'jobId', jobid)
     setattr(self, 'jobId_short', re.sub('(application|job)_', '', self.jobId))
@@ -82,13 +79,7 @@ class Job:
   def _fixup(self):
     jobid = self.id
 
-    if self.state in ('SUCCEEDED', 'FAILED', 'KILL_WAIT', 'KILLED', 'ERROR'):
-      setattr(self, 'status', self.state)
-      # When a job is finished, just use 'job' instead of 'application'
-      jobid = jobid.replace('application', 'job')
-    else:
-      jobid = jobid.replace('job', 'application')
-      setattr(self, 'status', self.state)
+    setattr(self, 'status', self.state)
     setattr(self, 'jobId', jobid)
     setattr(self, 'jobId_short', self.jobId.replace('job_', ''))
     setattr(self, 'is_retired', True)
@@ -192,7 +183,7 @@ class Attempt:
   def _fixup(self):
     setattr(self, 'attemptId', self.id)
     setattr(self, 'attemptId_short', self.id)
-    setattr(self, 'taskTrackerId', self.assignedContainerId)
+    setattr(self, 'taskTrackerId', getattr(self, 'assignedContainerId', None))
     setattr(self, 'startTimeFormatted', self.startTime)
     setattr(self, 'finishTimeFormatted', self.finishTime)
     setattr(self, 'outputSize', None)

+ 1 - 1
desktop/libs/hadoop/src/hadoop/fs/upload.py

@@ -146,7 +146,7 @@ class HDFSfileUploadHandler(FileUploadHandler):
 
   def receive_data_chunk(self, raw_data, start):
     if not self._activated:
-      raise StopUpload()
+      return raw_data
 
     try:
       self._file.write(raw_data)

+ 3 - 0
desktop/libs/hadoop/src/hadoop/yarn/resource_manager_api.py

@@ -67,3 +67,6 @@ class ResourceManagerApi(object):
 
   def apps(self, **kwargs):
     return self._root.get('cluster/apps', params=kwargs, headers={'Accept': _JSON_CONTENT_TYPE})
+
+  def app(self, app_id):
+    return self._root.get('cluster/apps/%(app_id)s' % {'app_id': app_id}, headers={'Accept': _JSON_CONTENT_TYPE})