Эх сурвалжийг харах

HUE-1631 [oozie] Support JobTracker HA in workflows

Abraham Elmahrek 12 жил өмнө
parent
commit
13516c1

+ 0 - 1
apps/oozie/src/oozie/conf.py

@@ -55,7 +55,6 @@ OOZIE_JOBS_COUNT = Config(
   type=int,
   help=_t('Maximum number of Oozie workflows or coodinators or bundles to retrieve in one API call.'))
 
-
 def config_validator(user):
   res = []
 

+ 2 - 2
apps/oozie/src/oozie/models.py

@@ -247,10 +247,10 @@ class WorkflowManager(models.Manager):
     else:
       perms = 0711
 
-    Submission(workflow.owner, workflow, fs, {})._create_dir(workflow.deployment_dir, perms=perms)
+    Submission(workflow.owner, workflow, fs, None, {})._create_dir(workflow.deployment_dir, perms=perms)
 
   def destroy(self, workflow, fs):
-    Submission(workflow.owner, workflow, fs, {}).remove_deployment_dir()
+    Submission(workflow.owner, workflow, fs, None, {}).remove_deployment_dir()
     try:
       workflow.coordinator_set.update(workflow=None) # In Django 1.3 could do ON DELETE set NULL
     except:

+ 3 - 3
apps/oozie/src/oozie/views/dashboard.py

@@ -365,7 +365,7 @@ def rerun_oozie_job(request, job_id, app_path):
 
 def _rerun_workflow(request, oozie_id, run_args, mapping):
   try:
-    submission = Submission(user=request.user, fs=request.fs, properties=mapping, oozie_id=oozie_id)
+    submission = Submission(user=request.user, fs=request.fs, jt=request.jt, properties=mapping, oozie_id=oozie_id)
     job_id = submission.rerun(**run_args)
     return job_id
   except RestException, ex:
@@ -419,7 +419,7 @@ def rerun_oozie_coordinator(request, job_id, app_path):
 
 def _rerun_coordinator(request, oozie_id, args, params, properties):
   try:
-    submission = Submission(user=request.user, fs=request.fs, oozie_id=oozie_id, properties=properties)
+    submission = Submission(user=request.user, fs=request.fs, jt=request.jt, oozie_id=oozie_id, properties=properties)
     job_id = submission.rerun_coord(params=params, **args)
     return job_id
   except RestException, ex:
@@ -482,7 +482,7 @@ def rerun_oozie_bundle(request, job_id, app_path):
 
 def _rerun_bundle(request, oozie_id, args, params, properties):
   try:
-    submission = Submission(user=request.user, fs=request.fs, oozie_id=oozie_id, properties=properties)
+    submission = Submission(user=request.user, fs=request.fs, jt=request.jt, oozie_id=oozie_id, properties=properties)
     job_id = submission.rerun_bundle(params=params, **args)
     return job_id
   except RestException, ex:

+ 8 - 8
apps/oozie/src/oozie/views/editor.py

@@ -300,7 +300,7 @@ def submit_workflow(request, workflow):
     if params_form.is_valid():
       mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
 
-      job_id = _submit_workflow(request.user, request.fs, workflow, mapping)
+      job_id = _submit_workflow(request.user, request.fs, request.jt, workflow, mapping)
 
       request.info(_('Workflow submitted'))
       return redirect(reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_id}))
@@ -318,9 +318,9 @@ def submit_workflow(request, workflow):
   return HttpResponse(json.dumps(popup), mimetype="application/json")
 
 
-def _submit_workflow(user, fs, workflow, mapping):
+def _submit_workflow(user, fs, jt, workflow, mapping):
   try:
-    submission = Submission(user, workflow, fs, mapping)
+    submission = Submission(user, workflow, fs, jt, mapping)
     job_id = submission.run()
     History.objects.create_from_submission(submission)
     return job_id
@@ -601,12 +601,12 @@ def submit_coordinator(request, coordinator):
 
 def _submit_coordinator(request, coordinator, mapping):
   try:
-    wf_dir = Submission(request.user, coordinator.workflow, request.fs, mapping).deploy()
+    wf_dir = Submission(request.user, coordinator.workflow, request.fs, request.jt, mapping).deploy()
 
     properties = {'wf_application_path': request.fs.get_hdfs_path(wf_dir)}
     properties.update(mapping)
 
-    submission = Submission(request.user, coordinator, request.fs, properties=properties)
+    submission = Submission(request.user, coordinator, request.fs, request.jt, properties=properties)
     job_id = submission.run()
 
     History.objects.create_from_submission(submission)
@@ -813,13 +813,13 @@ def _submit_bundle(request, bundle, properties):
     deployment_dirs = {}
 
     for bundled in bundle.coordinators.all():
-      wf_dir = Submission(request.user, bundled.coordinator.workflow, request.fs, properties).deploy()
+      wf_dir = Submission(request.user, bundled.coordinator.workflow, request.fs, request.jt, properties).deploy()
       deployment_dirs['wf_%s_dir' % bundled.coordinator.workflow.id] = request.fs.get_hdfs_path(wf_dir)
-      coord_dir = Submission(request.user, bundled.coordinator, request.fs, properties).deploy()
+      coord_dir = Submission(request.user, bundled.coordinator, request.fs, request.jt, properties).deploy()
       deployment_dirs['coord_%s_dir' % bundled.coordinator.id] = coord_dir
 
     properties.update(deployment_dirs)
-    submission = Submission(request.user, bundle, request.fs, properties=properties)
+    submission = Submission(request.user, bundle, request.fs, request.jt, properties=properties)
     job_id = submission.run()
 
     History.objects.create_from_submission(submission)

+ 6 - 0
desktop/conf.dist/hue.ini

@@ -296,6 +296,9 @@
       # Enter the filesystem uri
       fs_defaultfs=hdfs://localhost:8020
 
+      # NameNode logical name.
+      ## logical_name=
+
       # Use WebHdfs/HttpFs as the communication mechanism.
       # This should be the web service root URL, such as
       # http://namenode:50070/webhdfs/v1
@@ -365,6 +368,9 @@
       # The port where the JobTracker IPC listens on
       ## jobtracker_port=8021
 
+      # JobTracker logical name.
+      ## logical_name=
+
       # Thrift plug-in port for the JobTracker
       ## thrift_port=9290
 

+ 6 - 0
desktop/conf/pseudo-distributed.ini.tmpl

@@ -301,6 +301,9 @@
       # Enter the filesystem uri
       fs_defaultfs=hdfs://localhost:8020
 
+      # NameNode logical name.
+      ## logical_name=
+
       # Use WebHdfs/HttpFs as the communication mechanism.
       # This should be the web service root URL, such as
       # http://namenode:50070/webhdfs/v1
@@ -370,6 +373,9 @@
       # The port where the JobTracker IPC listens on
       ## jobtracker_port=8021
 
+      # JobTracker logical name.
+      ## logical_name=
+
       # Thrift plug-in port for the JobTracker
       ## thrift_port=9290
 

+ 7 - 0
desktop/libs/hadoop/src/hadoop/conf.py

@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from django.utils.translation import ugettext_lazy as _t
 from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, validate_path, coerce_bool
 import fnmatch
 import logging
@@ -82,6 +83,8 @@ HDFS_CLUSTERS = UnspecifiedConfigSection(
       # End deprecation
       FS_DEFAULTFS=Config("fs_defaultfs", help="The equivalent of fs.defaultFS (aka fs.default.name)",
                           default="hdfs://localhost:8020"),
+      LOGICAL_NAME = Config("logical_name", default="",
+                            type=str, help=_t('NameNode logical name.')),
       WEBHDFS_URL=Config("webhdfs_url",
                          help="The URL to WebHDFS/HttpFS service. Defaults to " +
                          "the WebHDFS URL on the NameNode.",
@@ -134,6 +137,10 @@ MR_CLUSTERS = UnspecifiedConfigSection(
                   default=8021,
                   help="Service port for the JobTracker",
                   type=int),
+      LOGICAL_NAME = Config('logical_name',
+                            default="",
+                            type=str,
+                            help=_t('JobTracker logical name.')),
       JT_THRIFT_PORT=Config("thrift_port", help="Thrift port for JobTracker", default=9290,
                             type=int),
       JT_KERBEROS_PRINCIPAL=Config("jt_kerberos_principal", help="Kerberos principal for JobTracker",

+ 7 - 0
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py

@@ -55,6 +55,7 @@ class WebHdfs(Hdfs):
 
   def __init__(self, url,
                fs_defaultfs,
+               logical_name=None,
                hdfs_superuser=None,
                security_enabled=False,
                temp_dir="/tmp"):
@@ -63,6 +64,7 @@ class WebHdfs(Hdfs):
     self._security_enabled = security_enabled
     self._temp_dir = temp_dir
     self._fs_defaultfs = fs_defaultfs
+    self._logical_name = logical_name
 
     self._client = self._make_client(url, security_enabled)
     self._root = resource.Resource(self._client)
@@ -78,6 +80,7 @@ class WebHdfs(Hdfs):
     fs_defaultfs = hdfs_config.FS_DEFAULTFS.get()
     return cls(url=_get_service_url(hdfs_config),
                fs_defaultfs=fs_defaultfs,
+               logical_name=hdfs_config.LOGICAL_NAME.get(),
                security_enabled=hdfs_config.SECURITY_ENABLED.get(),
                temp_dir=hdfs_config.TEMP_DIR.get())
 
@@ -95,6 +98,10 @@ class WebHdfs(Hdfs):
   def uri(self):
     return self._url
 
+  @property
+  def logical_name(self):
+    return self._logical_name
+
   @property
   def fs_defaultfs(self):
     return self._fs_defaultfs

+ 5 - 1
desktop/libs/hadoop/src/hadoop/job_tracker.py

@@ -64,7 +64,9 @@ class LiveJobTracker(object):
   In particular, if Thrift returns None for anything, this will throw.
   """
 
-  def __init__(self, host, thrift_port,
+  def __init__(self, host,
+               thrift_port,
+               logical_name=None,
                security_enabled=False,
                kerberos_principal="mapred"):
     self.client = thrift_util.get_client(
@@ -75,6 +77,7 @@ class LiveJobTracker(object):
       timeout_seconds=JT_THRIFT_TIMEOUT)
     self.host = host
     self.thrift_port = thrift_port
+    self.logical_name = logical_name
     self.security_enabled = security_enabled
     # We allow a single LiveJobTracker to be used across multiple
     # threads by restricting the stateful components to a thread
@@ -87,6 +90,7 @@ class LiveJobTracker(object):
     return cls(
       conf.HOST.get(),
       conf.JT_THRIFT_PORT.get(),
+      conf.LOGICAL_NAME.get(),
       security_enabled=conf.SECURITY_ENABLED.get(),
       kerberos_principal=conf.JT_KERBEROS_PRINCIPAL.get())
 

+ 0 - 1
desktop/libs/liboozie/src/liboozie/conf.py

@@ -39,7 +39,6 @@ REMOTE_DEPLOYMENT_DIR = Config(
   default="/user/hue/oozie/deployments",
   help=_t("Location on HDFS where the workflows/coordinators are deployed when submitted by a non-owner."))
 
-
 def get_oozie_status():
   from liboozie.oozie_api import get_oozie
 

+ 8 - 6
desktop/libs/liboozie/src/liboozie/submittion.py

@@ -39,10 +39,11 @@ class Submission(object):
   - submit
   - rerun
   """
-  def __init__(self, user, job=None, fs=None, properties=None, oozie_id=None):
+  def __init__(self, user, job=None, fs=None, jt=None, properties=None, oozie_id=None):
     self.job = job
     self.user = user
     self.fs = fs
+    self.jt = jt
     self.oozie_id = oozie_id
 
     if properties is not None:
@@ -157,16 +158,17 @@ class Submission(object):
         # Don't support shared sub-worfklow
         if action.node_type == 'subworkflow':
           node = action.get_full_node()
-          sub_deploy = Submission(self.user, node.sub_workflow, self.fs, self.properties)
+          sub_deploy = Submission(self.user, node.sub_workflow, self.fs, self.jt, self.properties)
           sub_deploy.deploy()
 
     return deployment_dir
 
   def _update_properties(self, jobtracker_addr, deployment_dir):
-    self.properties.update({
-      'jobTracker': jobtracker_addr,
-      'nameNode': self.fs.fs_defaultfs,
-    })
+    if self.fs and self.jt:
+      self.properties.update({
+        'jobTracker': self.jt.logical_name or jobtracker_addr,
+        'nameNode': self.fs.logical_name or self.fs.fs_defaultfs,
+      })
 
     if self.job:
       self.properties.update({

+ 51 - 0
desktop/libs/liboozie/src/liboozie/tests.py

@@ -22,7 +22,10 @@ from oozie.tests import MockOozieApi
 
 from desktop.lib.test_utils import reformat_xml
 
+from hadoop import cluster
+from hadoop.conf import HDFS_CLUSTERS, MR_CLUSTERS, YARN_CLUSTERS
 from liboozie.types import WorkflowAction, Coordinator
+from liboozie.submittion import Submission
 from liboozie.utils import config_gen
 
 
@@ -56,3 +59,51 @@ def test_config_gen():
   <value><![CDATA[hue]]></value>
 </property>
 </configuration>"""), reformat_xml(config_gen(properties)))
+
+
+def test_update_properties():
+  finish = []
+  finish.append(MR_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
+  finish.append(YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
+  try:
+    properties = {
+      'user.name': 'hue',
+      'test.1': 'http://localhost/test?test1=test&test2=test'
+    }
+
+    final_properties = properties.copy()
+    submission = Submission(None, properties=properties, oozie_id='test')
+    assert_equal(properties, submission.properties)
+    submission._update_properties('jtaddress', 'deployment-directory')
+    assert_equal(final_properties, submission.properties)
+
+    cluster.clear_caches()
+    fs = cluster.get_hdfs()
+    jt = cluster.get_next_ha_mrcluster()[1]
+    final_properties = properties.copy()
+    final_properties.update({
+      'jobTracker': 'jtaddress',
+      'nameNode': fs.fs_defaultfs
+    })
+    submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
+    assert_equal(properties, submission.properties)
+    submission._update_properties('jtaddress', 'deployment-directory')
+    assert_equal(final_properties, submission.properties)
+
+    finish.append(HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode'))
+    finish.append(MR_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('jobtracker'))
+    cluster.clear_caches()
+    fs = cluster.get_hdfs()
+    jt = cluster.get_next_ha_mrcluster()[1]
+    final_properties = properties.copy()
+    final_properties.update({
+      'jobTracker': 'jobtracker',
+      'nameNode': 'namenode'
+    })
+    submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
+    assert_equal(properties, submission.properties)
+    submission._update_properties('jtaddress', 'deployment-directory')
+    assert_equal(final_properties, submission.properties)
+  finally:
+    for reset in finish:
+      reset()