Browse Source

HUE-6094 [editor] Convert jobsub shell document types to new editor

Jenny Kim 8 years ago
parent
commit
e534486

+ 60 - 0
desktop/core/src/desktop/converter_tests.py

@@ -358,6 +358,66 @@ class TestDocumentConverter(object):
       wf.delete()
 
 
+  def test_convert_shell(self):
+    wf = Workflow.objects.new_workflow(self.user)
+    wf.save()
+    Workflow.objects.initialize(wf)
+    Link.objects.filter(parent__workflow=wf).delete()
+    action = add_node(wf, 'action-name-1', 'shell', [wf.start], {
+      u'job_xml': 'my-job.xml',
+      u'files': '["hello.py"]',
+      u'name': 'Shell',
+      u'job_properties': '[{"name": "mapred.job.queue.name", "value": "test"}]',
+      u'capture_output': 'on',
+      u'command': 'hello.py',
+      u'archives': '[{"dummy": "", "name": "test.zip"}]',
+      u'prepares': '[]',
+      u'params': '[{"type": "argument", "value": "baz"}, {"type": "env-var", "value": "foo=bar"}]',
+      u'description': 'Execute a Python script printing its arguments'
+    })
+    Link(parent=action, child=wf.end, name="ok").save()
+
+    # Setting doc.last_modified to older date
+    doc = Document.objects.get(id=wf.doc.get().id)
+    Document.objects.filter(id=doc.id).update(last_modified=datetime.strptime('2000-01-01T00:00:00Z', '%Y-%m-%dT%H:%M:%SZ'))
+    doc = Document.objects.get(id=doc.id)
+
+    try:
+      if IS_HUE_4.get():
+        # Test that corresponding doc2 is created after convert
+        assert_false(Document2.objects.filter(owner=self.user, type='query-shell').exists())
+
+        converter = DocumentConverter(self.user)
+        converter.convert()
+
+        doc2 = Document2.objects.get(owner=self.user, type='query-shell')
+
+        # Verify snippet values
+        assert_equal('ready', doc2.data_dict['snippets'][0]['status'])
+        assert_equal('hello.py', doc2.data_dict['snippets'][0]['properties']['command_path'])
+        assert_equal(['baz'], doc2.data_dict['snippets'][0]['properties']['arguments'])
+        assert_equal(['foo=bar'], doc2.data_dict['snippets'][0]['properties']['env_var'])
+        assert_equal(['mapred.job.queue.name=test'], doc2.data_dict['snippets'][0]['properties']['hadoopProperties'])
+        assert_equal(['test.zip'], doc2.data_dict['snippets'][0]['properties']['archives'])
+        assert_equal([{'type': 'file', 'path': 'hello.py'}], doc2.data_dict['snippets'][0]['properties']['files'])
+        assert_equal(True, doc2.data_dict['snippets'][0]['properties']['capture_output'])
+      else:
+        # Test that corresponding doc2 is created after convert
+        assert_false(Document2.objects.filter(owner=self.user, type='link-workflow').exists())
+
+        converter = DocumentConverter(self.user)
+        converter.convert()
+
+        doc2 = Document2.objects.get(owner=self.user, type='link-workflow')
+
+        # Verify absolute_url
+        response = self.client.get(doc2.get_absolute_url())
+        assert_equal(200, response.status_code)
+        assert_equal(doc.last_modified.strftime('%Y-%m-%dT%H:%M:%S'), doc2.last_modified.strftime('%Y-%m-%dT%H:%M:%S'))
+    finally:
+      wf.delete()
+
+
   def test_convert_pig_script(self):
     attrs = {
       'user': self.user,

+ 15 - 11
desktop/core/src/desktop/converters.py

@@ -26,7 +26,8 @@ from desktop.conf import IS_HUE_4
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.models import Document, DocumentPermission, DocumentTag, Document2, Directory, Document2Permission
 from notebook.api import _historify
-from notebook.models import import_saved_beeswax_query, import_saved_pig_script, import_saved_mapreduce_job
+from notebook.models import import_saved_beeswax_query, import_saved_pig_script, import_saved_mapreduce_job, \
+  import_saved_shell_job
 
 
 LOG = logging.getLogger(__name__)
@@ -152,20 +153,23 @@ class DocumentConverter(object):
         try:
           if doc.content_object:
             node = doc.content_object.start.get_child('to')
+            notebook = None
+
             if IS_HUE_4.get():
-              notebook = None
               if node.node_type == 'mapreduce':
                 notebook = import_saved_mapreduce_job(doc.content_object)
+              elif node.node_type == 'shell':
+                notebook = import_saved_shell_job(doc.content_object)
 
-              if notebook:
-                data = notebook.get_data()
-                doc2 = self._create_doc2(
-                  document=doc,
-                  doctype=data['type'],
-                  name=doc.name,
-                  description=data['description'],
-                  data=notebook.get_json()
-                )
+            if notebook:
+              data = notebook.get_data()
+              doc2 = self._create_doc2(
+                document=doc,
+                doctype=data['type'],
+                name=doc.name,
+                description=data['description'],
+                data=notebook.get_json()
+              )
             else:
               data = doc.content_object.data_dict
               data.update({'content_type': doc.content_type.model, 'object_id': doc.object_id})

+ 65 - 1
desktop/libs/notebook/src/notebook/models.py

@@ -246,7 +246,6 @@ def import_saved_pig_script(pig_script):
 
 def import_saved_mapreduce_job(wf):
   snippet_properties = {}
-
   node = wf.start.get_child('to')
 
   try:
@@ -294,6 +293,71 @@ def import_saved_mapreduce_job(wf):
   return notebook
 
 
+def import_saved_shell_job(wf):
+    snippet_properties = {}
+    node = wf.start.get_child('to')
+
+    snippet_properties['command_path'] = node.command
+
+    snippet_properties['arguments'] = []
+    snippet_properties['env_var'] = []
+    try:
+      params = json.loads(node.params)
+      if params:
+        for param in params:
+          if param['type'] == 'argument':
+            snippet_properties['arguments'].append(param['value'])
+          else:
+            snippet_properties['env_var'].append(param['value'])
+    except ValueError, e:
+      LOG.warn('Failed to parse parameters for shell job design "%s".' % wf.name)
+
+    snippet_properties['hadoopProperties'] = []
+    try:
+      properties = json.loads(node.job_properties)
+      if properties:
+        for prop in properties:
+          snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
+    except ValueError, e:
+      LOG.warn('Failed to parse job properties for shell job design "%s".' % wf.name)
+
+    snippet_properties['files'] = []
+    try:
+      files = json.loads(node.files)
+      for filepath in files:
+        snippet_properties['files'].append({'type': 'file', 'path': filepath})
+    except ValueError, e:
+      LOG.warn('Failed to parse files for shell job design "%s".' % wf.name)
+
+    snippet_properties['archives'] = []
+    try:
+      archives = json.loads(node.archives)
+      for archive in archives:
+        snippet_properties['archives'].append(archive['name'])
+    except ValueError, e:
+      LOG.warn('Failed to parse archives for shell job design "%s".' % wf.name)
+
+    snippet_properties['capture_output'] = node.capture_output
+
+    notebook = make_notebook(
+        name=wf.name,
+        description=wf.description,
+        editor_type='shell',
+        statement='',
+        status='ready',
+        snippet_properties=snippet_properties,
+        is_saved=True
+    )
+
+    # Remove functions, settings from snippet properties
+    data = notebook.get_data()
+    data['snippets'][0]['properties'].pop('functions')
+    data['snippets'][0]['properties'].pop('settings')
+
+    notebook.data = json.dumps(data)
+    return notebook
+
+
 def _convert_type(btype, bdata):
   from beeswax.models import HQL, IMPALA, RDBMS, SPARK