Эх сурвалжийг харах

[oozie] hive-site.xml created for spark action has 700 which causes an issue when the shared workflow is copied by user b (#2484)

PR: 2484
Jira: CDPD-29135
Mahesh Balakrishnan 4 жил өмнө
parent
commit
9a666b8ac1

+ 63 - 61
desktop/libs/liboozie/src/liboozie/submission2.py

@@ -48,7 +48,6 @@ if sys.version_info[0] > 2:
 else:
 else:
   from django.utils.translation import ugettext as _
   from django.utils.translation import ugettext as _
 
 
-
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
 
 
@@ -60,13 +59,14 @@ def submit_dryrun(run_func):
     jt_address = cluster.get_cluster_addr_for_job_submission()
     jt_address = cluster.get_cluster_addr_for_job_submission()
 
 
     if deployment_dir is None:
     if deployment_dir is None:
-      self._update_properties(jt_address) # Needed as we need to set some properties like Credentials before
+      self._update_properties(jt_address)  # Needed as we need to set some properties like Credentials before
       deployment_dir = self.deploy()
       deployment_dir = self.deploy()
 
 
     self._update_properties(jt_address, deployment_dir)
     self._update_properties(jt_address, deployment_dir)
     if self.properties.get('dryrun'):
     if self.properties.get('dryrun'):
       self.api.dryrun(self.properties)
       self.api.dryrun(self.properties)
     return run_func(self, deployment_dir)
     return run_func(self, deployment_dir)
+
   return wraps(run_func)(decorate)
   return wraps(run_func)(decorate)
 
 
 
 
@@ -78,11 +78,12 @@ class Submission(object):
   - submit
   - submit
   - rerun
   - rerun
   """
   """
+
   def __init__(self, user, job=None, fs=None, jt=None, properties=None, oozie_id=None, local_tz=None):
   def __init__(self, user, job=None, fs=None, jt=None, properties=None, oozie_id=None, local_tz=None):
     self.job = job
     self.job = job
     self.user = user
     self.user = user
     self.fs = fs
     self.fs = fs
-    self.jt = jt # Deprecated with YARN, we now use logical names only for RM
+    self.jt = jt  # Deprecated with YARN, we now use logical names only for RM
     self.oozie_id = oozie_id
     self.oozie_id = oozie_id
     self.api = get_oozie(self.user)
     self.api = get_oozie(self.user)
 
 
@@ -152,7 +153,7 @@ class Submission(object):
     if fail_nodes:
     if fail_nodes:
       self.properties.update({'oozie.wf.rerun.failnodes': fail_nodes})
       self.properties.update({'oozie.wf.rerun.failnodes': fail_nodes})
     elif not skip_nodes:
     elif not skip_nodes:
-      self.properties.update({'oozie.wf.rerun.failnodes': 'false'}) # Case empty 'skip_nodes' list
+      self.properties.update({'oozie.wf.rerun.failnodes': 'false'})  # Case empty 'skip_nodes' list
     else:
     else:
       self.properties.update({'oozie.wf.rerun.skip.nodes': skip_nodes})
       self.properties.update({'oozie.wf.rerun.skip.nodes': skip_nodes})
 
 
@@ -162,7 +163,6 @@ class Submission(object):
 
 
     return self.oozie_id
     return self.oozie_id
 
 
-
   def rerun_coord(self, deployment_dir, params):
   def rerun_coord(self, deployment_dir, params):
     jt_address = cluster.get_cluster_addr_for_job_submission()
     jt_address = cluster.get_cluster_addr_for_job_submission()
 
 
@@ -191,7 +191,6 @@ class Submission(object):
 
 
     return self.oozie_id
     return self.oozie_id
 
 
-
   def deploy(self, deployment_dir=None):
   def deploy(self, deployment_dir=None):
     try:
     try:
       if not deployment_dir:
       if not deployment_dir:
@@ -203,7 +202,7 @@ class Submission(object):
 
 
     if self.api.security_enabled:
     if self.api.security_enabled:
       jt_address = cluster.get_cluster_addr_for_job_submission()
       jt_address = cluster.get_cluster_addr_for_job_submission()
-      self._update_properties(jt_address) # Needed for coordinator deploying workflows with credentials
+      self._update_properties(jt_address)  # Needed for coordinator deploying workflows with credentials
 
 
     if hasattr(self.job, 'nodes'):
     if hasattr(self.job, 'nodes'):
       for action in self.job.nodes:
       for action in self.job.nodes:
@@ -215,12 +214,12 @@ class Submission(object):
           sub_deploy = Submission(self.user, workflow, self.fs, self.jt, self.properties)
           sub_deploy = Submission(self.user, workflow, self.fs, self.jt, self.properties)
           workspace = sub_deploy.deploy()
           workspace = sub_deploy.deploy()
 
 
-          self.job.override_subworkflow_id(action, workflow.id) # For displaying the correct graph
-          self.properties['workspace_%s' % workflow.uuid] = workspace # For pointing to the correct workspace
+          self.job.override_subworkflow_id(action, workflow.id)  # For displaying the correct graph
+          self.properties['workspace_%s' % workflow.uuid] = workspace  # For pointing to the correct workspace
 
 
         elif action.data['type'] == 'altus' or \
         elif action.data['type'] == 'altus' or \
-            (action.data['type'] == 'spark-document' and 'altus' in self.properties.get('cluster', '')) or \
-            (self.properties.get('auto-cluster') and 'document' in action.data['type']):
+          (action.data['type'] == 'spark-document' and 'altus' in self.properties.get('cluster', '')) or \
+          (self.properties.get('auto-cluster') and 'document' in action.data['type']):
           is_altus_job = 'altus' in self.properties.get('cluster', '') and action.data['type'] != 'altus'
           is_altus_job = 'altus' in self.properties.get('cluster', '') and action.data['type'] != 'altus'
           is_scheduled_altus_job = self.properties.get('auto-cluster')
           is_scheduled_altus_job = self.properties.get('auto-cluster')
 
 
@@ -236,27 +235,27 @@ python altus.py
 
 
           if is_altus_job:
           if is_altus_job:
             shell_script = self._generate_altus_job_action_script(
             shell_script = self._generate_altus_job_action_script(
-                service='dataeng',
-                cluster=self.properties['cluster'],
-                jobs=[{
-                    'sparkJob': {
-                        'jars': [u's3a://datawarehouse-customer360/ETL/spark-examples.jar'],
-                        'mainClass': u'org.apache.spark.examples.SparkPi ',
-                        'applicationArguments': [u'10']
-                      },
-                    'name': None,
-                    'failureAction': 'NONE'
-                }],
-                auth_key_id=ALTUS.AUTH_KEY_ID.get(),
-                auth_key_secret=ALTUS.AUTH_KEY_SECRET.get().replace('\\n', '\n')
+              service='dataeng',
+              cluster=self.properties['cluster'],
+              jobs=[{
+                'sparkJob': {
+                  'jars': [u's3a://datawarehouse-customer360/ETL/spark-examples.jar'],
+                  'mainClass': u'org.apache.spark.examples.SparkPi ',
+                  'applicationArguments': [u'10']
+                },
+                'name': None,
+                'failureAction': 'NONE'
+              }],
+              auth_key_id=ALTUS.AUTH_KEY_ID.get(),
+              auth_key_secret=ALTUS.AUTH_KEY_SECRET.get().replace('\\n', '\n')
             )
             )
           elif is_scheduled_altus_job:
           elif is_scheduled_altus_job:
             shell_script = self._generate_altus_job_action_script(
             shell_script = self._generate_altus_job_action_script(
-                service='dataeng',
-                cluster=self.properties['auto-cluster'],
-                jobs=[],
-                auth_key_id=ALTUS.AUTH_KEY_ID.get(),
-                auth_key_secret=ALTUS.AUTH_KEY_SECRET.get().replace('\\n', '\n')
+              service='dataeng',
+              cluster=self.properties['auto-cluster'],
+              jobs=[],
+              auth_key_id=ALTUS.AUTH_KEY_ID.get(),
+              auth_key_secret=ALTUS.AUTH_KEY_SECRET.get().replace('\\n', '\n')
             )
             )
           else:
           else:
             if action.data['properties'].get('service', '').lower().strip().startswith('query'):
             if action.data['properties'].get('service', '').lower().strip().startswith('query'):
@@ -268,11 +267,11 @@ python altus.py
               )
               )
             else:
             else:
               shell_script = self._generate_altus_action_script(
               shell_script = self._generate_altus_action_script(
-                  service=action.data['properties'].get('service'),
-                  command=action.data['properties'].get('command'),
-                  arguments=dict([arg.split('=', 1) for arg in action.data['properties'].get('arguments', [])]),
-                  auth_key_id=ALTUS.AUTH_KEY_ID.get(),
-                  auth_key_secret=ALTUS.AUTH_KEY_SECRET.get().replace('\\n', '\n')
+                service=action.data['properties'].get('service'),
+                command=action.data['properties'].get('command'),
+                arguments=dict([arg.split('=', 1) for arg in action.data['properties'].get('arguments', [])]),
+                auth_key_id=ALTUS.AUTH_KEY_ID.get(),
+                auth_key_secret=ALTUS.AUTH_KEY_SECRET.get().replace('\\n', '\n')
               )
               )
 
 
           self._create_file(deployment_dir, 'altus.py', shell_script)
           self._create_file(deployment_dir, 'altus.py', shell_script)
@@ -322,12 +321,12 @@ export PYTHON_EGG_CACHE=./myeggs
 %(kinit)s
 %(kinit)s
 
 
 impala-shell %(kerberos_option)s %(ssl_option)s -i %(impalad_host)s -f %(query_file)s""" % {
 impala-shell %(kerberos_option)s %(ssl_option)s -i %(impalad_host)s -f %(query_file)s""" % {
-  'impalad_host': action.data['properties'].get('impalad_host') or _get_impala_url(),
-  'kerberos_option': '-k' if self.api.security_enabled else '',
-  'ssl_option': '--ssl' if get_ssl_server_certificate() else '',
-  'query_file': script_name,
-  'kinit': kinit
-  }
+            'impalad_host': action.data['properties'].get('impalad_host') or _get_impala_url(),
+            'kerberos_option': '-k' if self.api.security_enabled else '',
+            'ssl_option': '--ssl' if get_ssl_server_certificate() else '',
+            'query_file': script_name,
+            'kinit': kinit
+          }
 
 
           self._create_file(deployment_dir, action.data['name'] + '.sh', shell_script)
           self._create_file(deployment_dir, action.data['name'] + '.sh', shell_script)
 
 
@@ -348,7 +347,8 @@ WITH SERDEPROPERTIES (
    "quoteChar"     = "'",
    "quoteChar"     = "'",
    "escapeChar"    = "\\"
    "escapeChar"    = "\\"
 )
 )
-STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.join([snippet['statement_raw'] for snippet in notebook.get_data()['snippets']]))
+STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'),
+            '\n\n\n'.join([snippet['statement_raw'] for snippet in notebook.get_data()['snippets']]))
 
 
           if statements is not None:
           if statements is not None:
             self._create_file(deployment_dir, action.data['name'] + '.sql', statements)
             self._create_file(deployment_dir, action.data['name'] + '.sql', statements)
@@ -379,7 +379,8 @@ STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.jo
             hive_site_lib = Hdfs.join(deployment_dir + '/lib/', 'hive-site.xml')
             hive_site_lib = Hdfs.join(deployment_dir + '/lib/', 'hive-site.xml')
             hive_site_content = get_hive_site_content()
             hive_site_content = get_hive_site_content()
             if not self.fs.do_as_user(self.user, self.fs.exists, hive_site_lib) and hive_site_content:
             if not self.fs.do_as_user(self.user, self.fs.exists, hive_site_lib) and hive_site_content:
-              self.fs.do_as_user(self.user, self.fs.create, hive_site_lib, overwrite=True, permission=0o700, data=smart_str(hive_site_content))
+              self.fs.do_as_user(self.user, self.fs.create, hive_site_lib, overwrite=True, permission=0o755,
+                data=smart_str(hive_site_content))
           if action.data['type'] in ('sqoop', 'sqoop-document'):
           if action.data['type'] in ('sqoop', 'sqoop-document'):
             if CONFIG_JDBC_LIBS_PATH.get() and CONFIG_JDBC_LIBS_PATH.get() not in self.properties.get('oozie.libpath', ''):
             if CONFIG_JDBC_LIBS_PATH.get() and CONFIG_JDBC_LIBS_PATH.get() not in self.properties.get('oozie.libpath', ''):
               LOG.debug("Adding to oozie.libpath %s" % CONFIG_JDBC_LIBS_PATH.get())
               LOG.debug("Adding to oozie.libpath %s" % CONFIG_JDBC_LIBS_PATH.get())
@@ -395,9 +396,9 @@ STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.jo
 
 
   def _check_sqoop_statement(self, action):
   def _check_sqoop_statement(self, action):
     statement = ''
     statement = ''
-    if action.data['type'] == 'sqoop' and 'command' in action.data['properties']:         # Sqoop Workflow
+    if action.data['type'] == 'sqoop' and 'command' in action.data['properties']:  # Sqoop Workflow
       statement = action.data['properties']['command']
       statement = action.data['properties']['command']
-    elif action.data['type'] == 'sqoop-document' and 'uuid' in action.data['properties']: # Sqoop Editor
+    elif action.data['type'] == 'sqoop-document' and 'uuid' in action.data['properties']:  # Sqoop Editor
       from notebook.models import Notebook
       from notebook.models import Notebook
       notebook = Notebook(document=Document2.objects.get_by_uuid(user=self.user, uuid=action.data['properties']['uuid']))
       notebook = Notebook(document=Document2.objects.get_by_uuid(user=self.user, uuid=action.data['properties']['uuid']))
       statement = notebook.get_data()['snippets'][0]['statement_raw']
       statement = notebook.get_data()['snippets'][0]['statement_raw']
@@ -406,11 +407,11 @@ STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.jo
   def get_external_parameters(self, application_path):
   def get_external_parameters(self, application_path):
     """From XML and job.properties HDFS files"""
     """From XML and job.properties HDFS files"""
     deployment_dir = os.path.dirname(application_path)
     deployment_dir = os.path.dirname(application_path)
-    xml = self.fs.do_as_user(self.user, self.fs.read, application_path, 0, 1 * 1024**2)
+    xml = self.fs.do_as_user(self.user, self.fs.read, application_path, 0, 1 * 1024 ** 2)
 
 
     properties_file = deployment_dir + '/job.properties'
     properties_file = deployment_dir + '/job.properties'
     if self.fs.do_as_user(self.user, self.fs.exists, properties_file):
     if self.fs.do_as_user(self.user, self.fs.exists, properties_file):
-      properties = self.fs.do_as_user(self.user, self.fs.read, properties_file, 0, 1 * 1024**2)
+      properties = self.fs.do_as_user(self.user, self.fs.read, properties_file, 0, 1 * 1024 ** 2)
     else:
     else:
       properties = None
       properties = None
 
 
@@ -418,11 +419,12 @@ STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.jo
 
 
   def _get_external_parameters(self, xml, properties=None):
   def _get_external_parameters(self, xml, properties=None):
     from oozie.models import DATASET_FREQUENCY
     from oozie.models import DATASET_FREQUENCY
-    parameters = dict([(var, '') for var in find_variables(xml, include_named=False) if not self._is_coordinator() or var not in DATASET_FREQUENCY])
+    parameters = dict(
+      [(var, '') for var in find_variables(xml, include_named=False) if not self._is_coordinator() or var not in DATASET_FREQUENCY])
 
 
     if properties:
     if properties:
       parameters.update(dict([line.strip().split('=')
       parameters.update(dict([line.strip().split('=')
-                              for line in properties.split('\n') if not line.startswith('#') and len(line.strip().split('=')) == 2]))
+        for line in properties.split('\n') if not line.startswith('#') and len(line.strip().split('=')) == 2]))
     return parameters
     return parameters
 
 
   def _update_properties(self, jobtracker_addr, deployment_dir=None):
   def _update_properties(self, jobtracker_addr, deployment_dir=None):
@@ -455,7 +457,6 @@ STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.jo
 
 
       self._update_credentials_from_hive_action(credentials)
       self._update_credentials_from_hive_action(credentials)
 
 
-
   def _update_credentials_from_hive_action(self, credentials):
   def _update_credentials_from_hive_action(self, credentials):
     """
     """
     Hive JDBC url from conf should be replaced when URL is set in hive action. Use _HOST from
     Hive JDBC url from conf should be replaced when URL is set in hive action. Use _HOST from
@@ -464,15 +465,16 @@ STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.jo
     if hasattr(self.job, 'nodes'):
     if hasattr(self.job, 'nodes'):
       for action in self.job.nodes:
       for action in self.job.nodes:
         if action.data['type'] in ('hive2', 'hive-document') and \
         if action.data['type'] in ('hive2', 'hive-document') and \
-                        credentials.hiveserver2_name in self.properties['credentials'] and \
-                        action.data['properties']['jdbc_url'] and \
-                        len(action.data['properties']['jdbc_url'].split('//')) > 1:
+          credentials.hiveserver2_name in self.properties['credentials'] and \
+          action.data['properties']['jdbc_url'] and \
+          len(action.data['properties']['jdbc_url'].split('//')) > 1:
           try:
           try:
             hive_jdbc_url = action.data['properties']['jdbc_url']
             hive_jdbc_url = action.data['properties']['jdbc_url']
             hive_host_from_action = hive_jdbc_url.split('//')[1].split(':')[0]
             hive_host_from_action = hive_jdbc_url.split('//')[1].split(':')[0]
 
 
             hive_principal_from_conf = self.properties['credentials'][credentials.hiveserver2_name]['properties'][1][1]
             hive_principal_from_conf = self.properties['credentials'][credentials.hiveserver2_name]['properties'][1][1]
-            updated_hive_principal = hive_principal_from_conf.split('/')[0] + '/' + hive_host_from_action + '@' + hive_principal_from_conf.split('@')[1]
+            updated_hive_principal = hive_principal_from_conf.split('/')[0] + '/' + hive_host_from_action + '@' + \
+                                     hive_principal_from_conf.split('@')[1]
 
 
             self.properties['credentials'][credentials.hiveserver2_name]['properties'] = [
             self.properties['credentials'][credentials.hiveserver2_name]['properties'] = [
               ('hive2.jdbc.url', hive_jdbc_url),
               ('hive2.jdbc.url', hive_jdbc_url),
@@ -483,14 +485,13 @@ STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.jo
             LOG.error(msg)
             LOG.error(msg)
             raise PopupException(message=_(msg), detail=str(ex))
             raise PopupException(message=_(msg), detail=str(ex))
 
 
-
-
   def _create_deployment_dir(self):
   def _create_deployment_dir(self):
     """
     """
     Return the job deployment directory in HDFS, creating it if necessary.
     Return the job deployment directory in HDFS, creating it if necessary.
     The actual deployment dir should be 0711 owned by the user
     The actual deployment dir should be 0711 owned by the user
     """
     """
-    remote_deployment_dir = REMOTE_DEPLOYMENT_DIR.get().replace('$USER', self.user.username).replace('$TIME', str(time.time())).replace('$JOBID', str(self.job.id))
+    remote_deployment_dir = REMOTE_DEPLOYMENT_DIR.get().replace('$USER', self.user.username).replace('$TIME', str(time.time())).replace(
+      '$JOBID', str(self.job.id))
     # Automatic setup of the required directories if needed
     # Automatic setup of the required directories if needed
     create_directories(self.fs, [], remote_deployment_dir)
     create_directories(self.fs, [], remote_deployment_dir)
 
 
@@ -553,9 +554,9 @@ STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.jo
       for node in self.job.nodes:
       for node in self.job.nodes:
         jar_path = node.data['properties'].get('jar_path')
         jar_path = node.data['properties'].get('jar_path')
         if jar_path:
         if jar_path:
-          if not jar_path.startswith('/'): # If workspace relative path
+          if not jar_path.startswith('/'):  # If workspace relative path
             jar_path = self.fs.join(self.job.deployment_dir, jar_path)
             jar_path = self.fs.join(self.job.deployment_dir, jar_path)
-          if not jar_path.startswith(lib_path): # If not already in lib
+          if not jar_path.startswith(lib_path):  # If not already in lib
             files.append(jar_path)
             files.append(jar_path)
 
 
     if USE_LIBPATH_FOR_JARS.get():
     if USE_LIBPATH_FOR_JARS.get():
@@ -592,8 +593,8 @@ STORED AS TEXTFILE %s""" % (self.properties.get('send_result_path'), '\n\n\n'.jo
     """Delete the workflow deployment directory."""
     """Delete the workflow deployment directory."""
     try:
     try:
       path = self.job.deployment_dir
       path = self.job.deployment_dir
-      if self._do_as(self.user.username , self.fs.exists, path):
-        self._do_as(self.user.username , self.fs.rmtree, path)
+      if self._do_as(self.user.username, self.fs.exists, path):
+        self._do_as(self.user.username, self.fs.rmtree, path)
     except Exception as ex:
     except Exception as ex:
       LOG.warning("Failed to clean up workflow deployment directory for %s (owner %s). Caused by: %s", self.job.name, self.user, ex)
       LOG.warning("Failed to clean up workflow deployment directory for %s (owner %s). Caused by: %s", self.job.name, self.user, ex)
 
 
@@ -815,6 +816,7 @@ print _exec('%(service)s', 'submitHueQuery', {'clusterCrn': cluster_crn, 'payloa
             }'''
             }'''
     }
     }
 
 
+
 def create_directories(fs, directory_list=[], remote_deployment_dir=REMOTE_DEPLOYMENT_DIR.get()):
 def create_directories(fs, directory_list=[], remote_deployment_dir=REMOTE_DEPLOYMENT_DIR.get()):
   # If needed, create the remote home, deployment and data directories
   # If needed, create the remote home, deployment and data directories
   directories = [remote_deployment_dir] + directory_list
   directories = [remote_deployment_dir] + directory_list
@@ -827,4 +829,4 @@ def create_directories(fs, directory_list=[], remote_deployment_dir=REMOTE_DEPLO
         fs.do_as_user(fs.DEFAULT_USER, fs.create_home_dir, remote_home_dir)
         fs.do_as_user(fs.DEFAULT_USER, fs.create_home_dir, remote_home_dir)
       # Shared by all the users
       # Shared by all the users
       fs.do_as_user(fs.DEFAULT_USER, fs.mkdir, directory, 0o1777)
       fs.do_as_user(fs.DEFAULT_USER, fs.mkdir, directory, 0o1777)
-      fs.do_as_user(fs.DEFAULT_USER, fs.chmod, directory, 0o1777) # To remove after https://issues.apache.org/jira/browse/HDFS-3491
+      fs.do_as_user(fs.DEFAULT_USER, fs.chmod, directory, 0o1777)  # To remove after https://issues.apache.org/jira/browse/HDFS-3491