Ver Fonte

HUE-7803 [jb] Remove Job Browser API for MR1

Romain Rigaux há 8 anos atrás
pai
commit
2ec56041d4

+ 2 - 110
apps/jobbrowser/src/jobbrowser/api.py

@@ -24,9 +24,7 @@ from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.paginator import Paginator
 from desktop.lib.rest.http_client import RestException
 
-from hadoop import cluster
-from hadoop.cluster import jt_ha, rm_ha
-from hadoop.api.jobtracker.ttypes import ThriftJobPriority, TaskTrackerNotFoundException, ThriftJobState
+from hadoop.cluster import rm_ha
 
 import hadoop.yarn.history_server_api as history_server_api
 import hadoop.yarn.mapreduce_api as mapreduce_api
@@ -35,7 +33,6 @@ import hadoop.yarn.resource_manager_api as resource_manager_api
 import hadoop.yarn.spark_history_server_api as spark_history_server_api
 
 from jobbrowser.conf import SHARE_JOBS
-from jobbrowser.models import Job, JobLinkage, TaskList, Tracker
 from jobbrowser.yarn_models import Application, Job as YarnJob, KilledJob as KilledYarnJob, Container, SparkJob
 
 
@@ -45,10 +42,7 @@ _DEFAULT_OBJ_PER_PAGINATION = 10
 
 
 def get_api(user, jt):
-  if cluster.is_yarn():
-    return YarnApi(user)
-  else:
-    return JtApi(jt)
+  return YarnApi(user)
 
 
 class JobBrowserApi(object):
@@ -58,108 +52,6 @@ class JobBrowserApi(object):
     return paginator.page(pagenum)
 
 
-class JtApi(JobBrowserApi):
-  def __init__(self, jt):
-    self.jt = jt
-
-  @jt_ha
-  def get_job_link(self, jobid):
-    return JobLinkage(self.jt, jobid)
-
-  @jt_ha
-  def get_job(self, jobid):
-    return Job.from_id(jt=self.jt, jobid=jobid)
-
-  @jt_ha
-  def get_jobs(self, user, **kwargs):
-    """
-    Returns an array of jobs where the returned
-    jobs are matched by the provided filter arguments.
-
-    If a filter argument is in kwargs it will supersede the same argument
-    in the request object.
-
-    Filter arguments may be jobid, pools, user, tasks, text and state.
-
-    Filter by user ownership if check_permission is set to true.
-    """
-    jobfunc = {
-       "completed" : (self.jt.completed_jobs, ThriftJobState.SUCCEEDED),
-       # Succeeded and completed are synonyms here.
-       "succeeded" : (self.jt.completed_jobs, ThriftJobState.SUCCEEDED),
-       "running" : (self.jt.running_jobs, ThriftJobState.RUNNING),
-       "failed" : (self.jt.failed_jobs, ThriftJobState.FAILED),
-       "killed" : (self.jt.killed_jobs, ThriftJobState.KILLED),
-       "all" : (self.jt.all_jobs, None),
-       None : (self.jt.all_jobs, None)
-    }
-
-    selection = kwargs.pop('state')
-    retired = kwargs.pop('retired')
-
-    jobs = jobfunc[selection][0]().jobs
-
-    if retired:
-      jobs += self.jt.retired_jobs(jobfunc[selection][1]).jobs
-
-    return self.filter_jobs(user, jobs, **kwargs)
-
-  @jt_ha
-  def filter_jobs(self, user, jobs, **kwargs):
-    check_permission = not SHARE_JOBS.get() and not user.is_superuser
-
-    limit = kwargs.pop('limit', 10000)
-
-    return [Job.from_thriftjob(self.jt, j)
-            for j in self._filter_jobs(jobs, **kwargs)
-            if not check_permission or user.is_superuser or j.profile.user == user.username][:limit]
-
-  def _filter_jobs(self, jobs, username=None, text=None):
-    def predicate(job):
-      """
-      Return True if a ThriftJobInProgress structure matches the supplied filters.
-
-      If a filter argument is None, everything matches it.
-      """
-      if username and username not in job.profile.user:
-        return False
-
-      if text:
-        search = text.lower()
-        # These fields are chosen to match those displayed by the JT UI
-        saw_text = False
-        for t in [job.profile.user,
-                  job.profile.name,
-                  job.jobID.asString,
-                  job.profile.queueName,
-                  job.priorityAsString
-                  ]:
-          if search in t.lower():
-            saw_text = True
-            break
-
-        if not saw_text:
-          return False
-
-      return True
-
-    return filter(predicate, jobs)
-
-  @jt_ha
-  def get_tasks(self, jobid, **filters):
-    return TaskList.select(self.jt,
-                           jobid,
-                           filters['task_types'],
-                           filters['task_states'],
-                           filters['task_text'],
-                           _DEFAULT_OBJ_PER_PAGINATION,
-                           _DEFAULT_OBJ_PER_PAGINATION * (filters['pagenum'] - 1))
-
-  @jt_ha
-  def get_tracker(self, trackerid):
-    return Tracker.from_name(self.jt, trackerid)
-
-
 class YarnApi(JobBrowserApi):
   """
   List all the jobs with Resource Manager API.

+ 3 - 582
apps/jobbrowser/src/jobbrowser/models.py

@@ -15,30 +15,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import datetime
 import logging
-import lxml.html
-import re
-import urllib2
-
-from urlparse import urlparse, urlunparse
 
 from django.core.urlresolvers import reverse
-from desktop.conf import REST_CONN_TIMEOUT
-from desktop.lib.view_util import format_duration_in_millis
-from desktop.lib import i18n
 from django.utils.html import escape
-from filebrowser.views import location_to_url
-from hadoop import job_tracker
-from hadoop import confparse
-from hadoop.api.jobtracker.ttypes import JobNotFoundException
+from django.utils.translation import ugettext as _
 
-import hadoop.api.jobtracker.ttypes as ttypes
-from desktop.lib.exceptions_renderable import PopupException
+from filebrowser.views import location_to_url
 
-from django.utils.translation import ugettext as _
 from jobbrowser.conf import DISABLE_KILLING_JOBS
 
+
 LOG = logging.getLogger(__name__)
 
 
@@ -70,569 +57,3 @@ def can_kill_job(self, user):
     return True
 
   return user.username == self.user
-
-class JobLinkage(object):
-  """
-  A thin representation of a job, without much of the details.
-  Its purpose is to wrap a JobID to allow us to get further
-  information from Hadoop, without instantiating a full Job object
-  (which requires talking to Hadoop).
-  """
-  def __init__(self, jobtracker, jobid):
-    """
-    JobLinkage(jobtracker, jobid) -> JobLinkage
-    The jobid is the jobid string (not the thrift jobid)
-    """
-    self._jobtracker = jobtracker
-    self.jobId = jobid
-    self.jobId_short = "_".join(jobid.split("_")[-2:])
-    self.is_mr2 = False
-
-  def get_task(self, task_id):
-    """Retrieve a TaskInProgress from hadoop."""
-    ttask = self._jobtracker.get_task(
-                    self._jobtracker.thriftjobid_from_string(self.jobId),
-                    self._jobtracker.thrifttaskid_from_string(task_id))
-    return Task(ttask, self._jobtracker)
-
-class Job(JobLinkage):
-  """
-  Creates a Job instance pulled from the job tracker Thrift interface.
-  """
-
-  def __getitem__(self, item):
-    """
-    For backwards-compatibility, resolve job["foo"] as job.foo
-    """
-    return getattr(self, item)
-
-  @staticmethod
-  def from_id(jt, jobid, is_finished=False):
-    """
-      Returns a Job instance given a job tracker interface and an id. The job tracker interface is typically
-      located in request.jt.
-    """
-    try:
-      thriftjob = jt.get_job(jt.thriftjobid_from_string(jobid))
-    except JobNotFoundException:
-      try:
-        thriftjob = jt.get_retired_job(jt.thriftjobid_from_string(jobid))
-      except JobNotFoundException, e:
-        raise PopupException(_("Could not find job with id %(jobid)s.") % {'jobid': jobid}, detail=e)
-
-    return Job(jt, thriftjob)
-
-  @staticmethod
-  def from_thriftjob(jt, thriftjob):
-    """
-      Returns a Job instance given a job tracker interface and a thriftjob object returned from that job tracker interface.
-      The job tracker interface is typically located in request.jt
-    """
-    return Job(jt, thriftjob)
-
-  def __init__(self, jt, thriftJob):
-    """
-    Returns a Job instance given a job tracker interface and a thriftjob object returned from that
-    job tracker interface.  The job tracker interface is typically located in request.jt
-    """
-    JobLinkage.__init__(self, jt, thriftJob.jobID.asString)
-    self.jt = jt
-    self.job = thriftJob
-    self.tasks = []
-    if self.job.tasks is not None:
-      self.tasks = TaskList.from_thriftTaskList(self.job.tasks, jt)
-
-    self.task_map = dict( (task.taskId, task) for task in self.tasks )
-    self._counters = None
-    self._conf_keys = None
-    self._full_job_conf = None
-    self._init_attributes()
-    self.is_retired = hasattr(thriftJob, 'is_retired')
-    self.is_mr2 = False
-    self.applicationType = 'MAPREDUCE'
-
-  @property
-  def counters(self):
-    if self.is_retired:
-      self._counters = {}
-    elif self._counters is None:
-      rollups = self.jt.get_job_counter_rollups(self.job.jobID)
-      # We get back a structure with counter lists for maps, reduces, and total
-      # and we need to invert this
-
-      def aggregate_counters(ctrs_from_jt, key, target):
-        for group in ctrs_from_jt.groups:
-          if group.name not in target:
-            target[group.name] = {
-              'name': group.name,
-              'displayName': group.displayName,
-              'counters': {}
-              }
-          agg_counters = target[group.name]['counters']
-          for counter in group.counters.itervalues():
-            if counter.name not in agg_counters:
-              agg_counters[counter.name] = {
-                'name': counter.name,
-                'displayName': counter.displayName,
-                }
-            agg_counters[counter.name][key] = counter.value
-
-      self._counters = {}
-      aggregate_counters(rollups.mapCounters, "map", self._counters)
-      aggregate_counters(rollups.reduceCounters, "reduce", self._counters)
-      aggregate_counters(rollups.jobCounters, "total", self._counters)
-    return self._counters
-
-  @property
-  def conf_keys(self):
-    if self._conf_keys is None:
-      self._initialize_conf_keys()
-    return self._conf_keys
-
-  @property
-  def full_job_conf(self):
-    if self._full_job_conf is None:
-      self._initialize_conf_keys()
-    return self._full_job_conf
-
-  def _init_attributes(self):
-    self.queueName = i18n.smart_unicode(self.job.profile.queueName)
-    self.jobName = i18n.smart_unicode(self.job.profile.name)
-    self.user = i18n.smart_unicode(self.job.profile.user)
-    self.mapProgress = self.job.status.mapProgress
-    self.reduceProgress = self.job.status.reduceProgress
-    self.setupProgress = self.job.status.setupProgress
-    self.cleanupProgress = self.job.status.cleanupProgress
-
-    if self.job.desiredMaps == 0:
-      maps_percent_complete = 0
-    else:
-      maps_percent_complete = int(round(float(self.job.finishedMaps) / self.job.desiredMaps * 100))
-
-    self.desiredMaps = self.job.desiredMaps
-
-    if self.job.desiredReduces == 0:
-      reduces_percent_complete = 0
-    else:
-      reduces_percent_complete = int(round(float(self.job.finishedReduces) / self.job.desiredReduces * 100))
-
-    self.desiredReduces = self.job.desiredReduces
-    self.maps_percent_complete = maps_percent_complete
-    self.finishedMaps = self.job.finishedMaps
-    self.finishedReduces = self.job.finishedReduces
-    self.reduces_percent_complete = reduces_percent_complete
-    self.startTimeMs = self.job.startTime
-    self.startTimeFormatted = format_unixtime_ms(self.job.startTime)
-    self.launchTimeMs = self.job.launchTime
-    self.launchTimeFormatted = format_unixtime_ms(self.job.launchTime)
-
-    self.finishTimeMs = self.job.finishTime
-    self.finishTimeFormatted = format_unixtime_ms(self.job.finishTime)
-    self.status = self.job.status.runStateAsString
-    self.priority = self.job.priorityAsString
-    self.jobFile = self.job.profile.jobFile
-
-    finishTime = self.job.finishTime
-    if finishTime == 0:
-      finishTime = datetime.datetime.now()
-    else:
-      finishTime = datetime.datetime.fromtimestamp(finishTime / 1000)
-    self.duration = finishTime - datetime.datetime.fromtimestamp(self.job.startTime / 1000)
-
-    diff = int(finishTime.strftime("%s")) * 1000 - self.startTimeMs
-    self.durationFormatted = format_duration_in_millis(diff)
-    self.durationInMillis = diff
-
-  def kill(self):
-    self.jt.kill_job(self.job.jobID)
-
-  def get_task(self, id):
-    try:
-      return self.task_map[id]
-    except KeyError:
-      return JobLinkage.get_task(self, id)
-
-  def filter_tasks(self, task_types=None, task_states=None, task_text=None):
-    """
-    Filters the tasks of the job.
-    Pass in task_type and task_state as sets; None for "all".
-    task_text is used to search in the state, mostRecentState, and the ID.
-    """
-    assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)
-    assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)
-
-    def is_good_match(t):
-      if task_types is not None:
-        if t.task.taskID.taskTypeAsString.lower() not in task_types:
-          return False
-
-      if task_states is not None:
-        if t.state.lower() not in task_states:
-          return False
-
-      if task_text is not None:
-        tt_lower = task_text.lower()
-        if tt_lower not in t.state.lower() and tt_lower not in t.mostRecentState.lower() and tt_lower not in t.task.taskID.asString.lower():
-          return False
-
-      return True
-
-    return [ t for t in self.tasks if is_good_match(t) ]
-
-  def _initialize_conf_keys(self):
-    if self.is_retired:
-      self._conf_keys = {}
-      self._full_job_conf = {}
-    else:
-      conf_keys = [
-        'mapred.mapper.class',
-        'mapred.reducer.class',
-        'mapred.input.format.class',
-        'mapred.output.format.class',
-        'mapred.input.dir',
-        'mapred.output.dir',
-        ]
-      jobconf = get_jobconf(self.jt, self.jobId)
-      self._full_job_conf = jobconf
-      self._conf_keys = {}
-      for k, v in jobconf.iteritems():
-        if k in conf_keys:
-          self._conf_keys[dots_to_camel_case(k)] = v
-
-
-class TaskList(object):
-  @staticmethod
-  def select(jt, jobid, task_types, task_states, text, count, offset):
-    """
-    select(jt, jobid, task_types, task_states, text, count, offset) -> TaskList
-
-    Retrieve a TaskList from Hadoop according to the given criteria.
-    task_types is a set of job_tracker.VALID_TASK_TYPES. A value to None means everything.
-    task_states is a set of job_tracker.VALID_TASK_STATES. A value to None means everything.
-    """
-    assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)
-    assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)
-
-    if task_types is None:
-      task_types = job_tracker.VALID_TASK_TYPES
-    if task_states is None:
-      task_states = job_tracker.VALID_TASK_STATES
-
-    tjobid = jt.thriftjobid_from_string(jobid)
-    thrift_list = jt.get_task_list(tjobid, task_types, task_states, text, count, offset)
-    return TaskList.from_thriftTaskList(thrift_list, jt)
-
-  @staticmethod
-  def from_thriftTaskList(thrift_task_list, jobtracker):
-    """TaskList.from_thriftTaskList(thrift_task_list, jobtracker) -> TaskList
-    """
-    if thrift_task_list is None:
-      return None
-    return TaskList(thrift_task_list, jobtracker)
-
-  def __init__(self, tasklist, jobtracker):
-    self.__tasklist = tasklist                  # The thrift task list
-    self.__jt = jobtracker
-    self.__init_attributes()
-
-  def __init_attributes(self):
-    self.__tasksSoFar = [ Task(t, self.__jt) for t in self.__tasklist.tasks ]
-    self.__nTotalTasks = self.__tasklist.numTotalTasks
-
-  def __iter__(self):
-    return self.__tasksSoFar.__iter__()
-
-  def __len__(self):
-    return len(self.__tasksSoFar)
-
-  def __getitem__(self, key):
-    return self.__tasksSoFar[key]
-
-  @property
-  def tasks(self):
-    return self.__tasksSoFar
-
-  @property
-  def numTotalTasks(self):
-    return self.__nTotalTasks
-
-
-class Task(object):
-
-  def __getitem__(self, item):
-    """
-    For backwards-compatibility, resolve job["foo"] as job.foo
-    """
-    return getattr(self, item)
-
-  def __init__(self, task, jt):
-    self.task = task
-    self.jt = jt
-    self._init_attributes()
-
-    self.attempt_map = {}
-    for id, attempt in self.task.taskStatuses.iteritems():
-      ta = TaskAttempt(attempt, task=self)
-      self.attempt_map[id] = ta
-
-  @property
-  def attempts(self):
-    return self.attempt_map.values()
-
-  def _init_attributes(self):
-    self.taskType = self.task.taskID.taskTypeAsString
-    self.taskId = self.task.taskID.asString
-    self.taskId_short = "_".join(self.taskId.split("_")[-2:])
-    self.startTimeMs = self.task.startTime
-    self.startTimeFormatted = format_unixtime_ms(self.task.startTime)
-    self.execStartTimeMs = self.task.execStartTime
-    self.execStartTimeFormatted = format_unixtime_ms(self.task.execStartTime)
-    self.execFinishTimeMs = self.task.execFinishTime
-    self.execFinishTimeFormatted = format_unixtime_ms(self.task.execFinishTime)
-    self.state = self.task.state
-    assert self.state in job_tracker.VALID_TASK_STATES
-    self.progress = self.task.progress
-    self.taskId = self.task.taskID.asString
-    self.jobId = self.task.taskID.jobID.asString
-    self.taskAttemptIds = self.task.taskStatuses.keys()
-    self.mostRecentState = self.task.mostRecentState
-    self.diagnosticMap = self.task.taskDiagnosticData
-    self.counters = self.task.counters
-    self.failed = self.task.failed
-    self.complete = self.task.complete
-    self.is_mr2 = False
-
-  def get_attempt(self, id):
-    """
-    Returns a TaskAttempt for a given id.
-    """
-    return self.attempt_map[id]
-
-
-class TaskAttempt(object):
-
-  def __getitem__(self, item):
-    """
-    For backwards-compatibility, resolve task["foo"] as task.foo.
-    """
-    return getattr(self, item)
-
-  def __init__(self, task_attempt, task):
-    assert task_attempt is not None
-    self.task_attempt = task_attempt
-    self.task = task
-    self._init_attributes();
-
-  def _init_attributes(self):
-    self.taskType = self.task_attempt.taskID.taskID.taskTypeAsString
-    self.attemptId = self.task_attempt.taskID.asString
-    self.attemptId_short = "_".join(self.attemptId.split("_")[-2:])
-    self.startTimeMs = self.task_attempt.startTime
-    self.startTimeFormatted = format_unixtime_ms(self.task_attempt.startTime)
-    self.finishTimeMs = self.task_attempt.finishTime
-    self.finishTimeFormatted = format_unixtime_ms(self.task_attempt.finishTime)
-    self.state = self.task_attempt.stateAsString.lower()
-    self.taskTrackerId = self.task_attempt.taskTracker
-    self.phase = self.task_attempt.phaseAsString
-    self.progress = self.task_attempt.progress
-    self.outputSize = self.task_attempt.outputSize
-    self.shuffleFinishTimeMs = self.task_attempt.shuffleFinishTime
-    self.shuffleFinishTimeFormatted = format_unixtime_ms(self.task_attempt.shuffleFinishTime)
-    self.sortFinishTimeMs = self.task_attempt.sortFinishTime
-    self.sortFinishTimeFormatted = format_unixtime_ms(self.task_attempt.sortFinishTime)
-    self.mapFinishTimeMs = self.task_attempt.mapFinishTime # DO NOT USE, NOT VALID IN 0.20
-    self.mapFinishTimeFormatted = format_unixtime_ms(self.task_attempt.mapFinishTime)
-    self.counters = self.task_attempt.counters
-    self.is_mr2 = False
-
-  def get_tracker(self):
-    try:
-      tracker = Tracker.from_name(self.task.jt, self.taskTrackerId)
-      return tracker
-    except ttypes.TaskTrackerNotFoundException, e:
-      LOG.warn("Tracker %s not found: %s" % (self.taskTrackerId, e))
-      if LOG.isEnabledFor(logging.DEBUG):
-        all_trackers = self.task.jt.all_task_trackers()
-        for t in all_trackers.trackers:
-          LOG.debug("Available tracker: %s" % (t.trackerName,))
-      raise ttypes.TaskTrackerNotFoundException(
-                          _("Cannot look up TaskTracker %(id)s.") % {'id': self.taskTrackerId})
-
-  def get_task_log(self):
-    """
-    get_task_log(task_id) -> (stdout_text, stderr_text, syslog_text)
-
-    Retrieve the task log from the TaskTracker, at this url:
-      http://<tracker_host>:<port>/tasklog?taskid=<attempt_id>
-    Optional query string:
-      &filter=<source>  : where <source> is 'syslog', 'stdout', or 'stderr'.
-      &start=<offset>   : specify the start offset of the log section, when using a filter.
-      &end=<offset>     : specify the end offset of the log section, when using a filter.
-    """
-    tracker = self.get_tracker()
-    url = urlunparse(('http',
-                      '%s:%s' % (tracker.host, tracker.httpPort),
-                      'tasklog',
-                      None,
-                      'attemptid=%s' % (self.attemptId,),
-                      None))
-    LOG.info('Retrieving %s' % (url,))
-    try:
-      data = urllib2.urlopen(url, timeout=REST_CONN_TIMEOUT.get())
-    except urllib2.URLError:
-      raise urllib2.URLError(_("Cannot retrieve logs from TaskTracker %(id)s.") % {'id': self.taskTrackerId})
-
-    et = lxml.html.parse(data)
-    log_sections = et.findall('body/pre')
-    logs = [section.text or '' for section in log_sections]
-    if len(logs) < 3:
-      LOG.warn('Error parsing task attempt log for %s at "%s". Found %d (not 3) log sections' %
-                  (self.attemptId, url, len(log_sections)))
-      err = _("Hue encountered an error while retrieving logs from '%s'.") % (url,)
-      logs += [err] * (3 - len(logs))
-    return logs
-
-
-class Tracker(object):
-
-  def __getitem__(self, item):
-    """
-    For backwards-compatibility, resolve job["foo"] as job.foo.
-    """
-    return getattr(self, item)
-
-  @staticmethod
-  def from_name(jt, trackername):
-    return Tracker(jt.task_tracker(trackername))
-
-  def __init__(self, thrifttracker):
-    self.tracker = thrifttracker
-    self._init_attributes();
-
-  def _init_attributes(self):
-    self.trackerId = self.tracker.trackerName
-    self.httpPort = self.tracker.httpPort
-    self.host = self.tracker.host
-    self.lastSeenMs = self.tracker.lastSeen
-    self.lastSeenFormatted = format_unixtime_ms(self.tracker.lastSeen)
-    self.totalVirtualMemory = self.tracker.totalVirtualMemory
-    self.totalPhysicalMemory = self.tracker.totalPhysicalMemory
-    self.availableSpace = self.tracker.availableSpace
-    self.failureCount = self.tracker.failureCount
-    self.mapCount = self.tracker.mapCount
-    self.reduceCount = self.tracker.reduceCount
-    self.maxMapTasks = self.tracker.maxMapTasks
-    self.maxReduceTasks = self.tracker.maxReduceTasks
-    self.taskReports = self.tracker.taskReports
-    self.is_mr2 = False
-
-
-class Cluster(object):
-
-  def __getitem__(self, item):
-    """
-    For backwards-compatibility, resolve job["foo"] as job.foo
-    """
-    return getattr(self, item)
-
-  def __init__(self, jt):
-    self.status = jt.cluster_status()
-    self._init_attributes();
-
-  def _init_attributes(self):
-    self.mapTasksInProgress = self.status.mapTasks
-    self.reduceTasksInProgress = self.status.reduceTasks
-    self.maxMapTasks = self.status.maxMapTasks
-    self.maxReduceTasks = self.status.maxReduceTasks
-    self.usedHeapMemory = self.status.usedMemory
-    self.maxHeapMemory = self.status.maxMemory
-    self.clusterStartTimeMs = self.status.startTime
-    self.clusterStartTimeFormatted = format_unixtime_ms(self.status.startTime)
-    self.identifier = self.status.identifier
-    self.taskTrackerExpiryInterval = self.status.taskTrackerExpiryInterval
-    self.totalJobSubmissions = self.status.totalSubmissions
-    self.state = self.status.stateAsString
-    self.numActiveTrackers = self.status.numActiveTrackers
-    self.activeTrackerNames = self.status.activeTrackerNames
-    self.numBlackListedTrackers = self.status.numBlacklistedTrackers
-    self.blacklistedTrackerNames = self.status.blacklistedTrackerNames
-    self.hostname = self.status.hostname
-    self.httpPort = self.status.httpPort
-
-
-class LinkJobLogs(object):
-
-  @classmethod
-  def _make_hdfs_links(cls, log):
-    escaped_logs = escape(log)
-    return re.sub('((?<= |;)/|hdfs://)[^ <&\t;,\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)
-
-  @classmethod
-  def _make_mr_links(cls, log):
-    escaped_logs = escape(log)
-    return re.sub('(job_[0-9]{12,}_[0-9]+)', LinkJobLogs._replace_mr_link, escaped_logs)
-
-  @classmethod
-  def _make_links(cls, log):
-    escaped_logs = escape(log)
-    hdfs_links = re.sub('((?<= |;)/|hdfs://)[^ <&\t;,\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)
-    return re.sub('(job_[0-9]{12,}_[0-9]+)', LinkJobLogs._replace_mr_link, hdfs_links)
-
-  @classmethod
-  def _replace_hdfs_link(self, match):
-    try:
-      return '<a href="%s">%s</a>' % (location_to_url(match.group(0), strict=False), match.group(0))
-    except:
-      LOG.exception('failed to replace hdfs links: %s' % (match.groups(),))
-      return match.group(0)
-
-  @classmethod
-  def _replace_mr_link(self, match):
-    try:
-      return '<a href="%s">%s</a>' % (reverse('jobbrowser.views.single_job', kwargs={'job': match.group(0)}), match.group(0))
-    except:
-      LOG.exception('failed to replace mr links: %s' % (match.groups(),))
-      return match.group(0)
-
-
-def get_jobconf(jt, jobid):
-  """
-  Returns a dict representation of the jobconf for the job corresponding
-  to jobid. filter_keys is an optional list of configuration keys to filter on.
-  """
-  jid = jt.thriftjobid_from_string(jobid)
-  # This will throw if the the jobconf can't be found
-  xml_data = jt.get_job_xml(jid)
-  return confparse.ConfParse(xml_data)
-
-def format_unixtime_ms(unixtime):
-  """
-  Format a unix timestamp in ms to a human readable string
-  """
-  if unixtime:
-    return str(datetime.datetime.fromtimestamp(unixtime/1000).strftime("%x %X %Z"))
-  else:
-    return ""
-
-DOTS = re.compile("\.([a-z])")
-def dots_to_camel_case(dots):
-  """
-  Takes a string delimited with periods and returns a camel-case string.
-  Example: dots_to_camel_case("foo.bar.baz") //returns fooBarBaz
-  """
-  def return_upper(match):
-    return match.groups()[0].upper()
-  return str(DOTS.sub(return_upper, dots))
-
-def get_path(hdfs_url):
-  """
-  Returns the path component of an HDFS url.
-  """
-  # urlparse is lame, and only "uses_netloc" for a certain
-  # set of protocols.  So we replace hdfs with gopher:
-  if hdfs_url.startswith("hdfs://"):
-    gopher_url = "gopher://" + hdfs_url[7:]
-    path = urlparse(gopher_url)[2] # path
-    return path
-  else:
-    return hdfs_url

+ 27 - 26
desktop/libs/hadoop/src/hadoop/conf.py

@@ -90,32 +90,33 @@ HDFS_CLUSTERS = UnspecifiedConfigSection(
   )
 )
 
-# MR_CLUSTERS = UnspecifiedConfigSection(
-#   "mapred_clusters",
-#   help="One entry for each MapReduce cluster",
-#   each=ConfigSection(
-#     help="Information about a single MapReduce cluster",
-#     members=dict(
-#       HOST=Config("jobtracker_host", help="Host/IP for JobTracker"),
-#       PORT=Config("jobtracker_port",
-#                   default=8021,
-#                   help="Service port for the JobTracker",
-#                   type=int),
-#       LOGICAL_NAME=Config('logical_name',
-#                           default="",
-#                           type=str,
-#                           help=_t('JobTracker logical name.')),
-#       JT_THRIFT_PORT=Config("thrift_port", help="Thrift port for JobTracker", default=9290,
-#                             type=int),
-#       JT_KERBEROS_PRINCIPAL=Config("jt_kerberos_principal", help="Kerberos principal for JobTracker",
-#                                    default="mapred", type=str),
-#       SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
-#                               default=False, type=coerce_bool),
-#       SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
-#                        default=True, type=coerce_bool), # True here for backward compatibility
-#     )
-#   )
-# )
+# Deprecated and not used.
+MR_CLUSTERS = UnspecifiedConfigSection(
+  "mapred_clusters",
+  help="One entry for each MapReduce cluster",
+  each=ConfigSection(
+    help="Information about a single MapReduce cluster",
+    members=dict(
+      HOST=Config("jobtracker_host", help="Host/IP for JobTracker"),
+      PORT=Config("jobtracker_port",
+                  default=8021,
+                  help="Service port for the JobTracker",
+                  type=int),
+      LOGICAL_NAME=Config('logical_name',
+                          default="",
+                          type=str,
+                          help=_t('JobTracker logical name.')),
+      JT_THRIFT_PORT=Config("thrift_port", help="Thrift port for JobTracker", default=9290,
+                            type=int),
+      JT_KERBEROS_PRINCIPAL=Config("jt_kerberos_principal", help="Kerberos principal for JobTracker",
+                                   default="mapred", type=str),
+      SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
+                              default=False, type=coerce_bool),
+      SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
+                       default=True, type=coerce_bool), # True here for backward compatibility
+    )
+  )
+)
 
 YARN_CLUSTERS = UnspecifiedConfigSection(
   "yarn_clusters",