Browse Source

HUE-6094 [editor] Convert jobsub mapreduce document types to new editor

[converter] Refactor converter logic by type and add mapreduce test
Jenny Kim 8 years ago
parent
commit
2d437f2

+ 7 - 0
apps/jobsub/src/jobsub/templates/designs.mako

@@ -57,6 +57,13 @@ ${ commonheader(None, "jobsub", user, request) | n,unicode }
 
 <div class="container-fluid">
   <div class="card card-small">
+  % if is_hue_4:
+    <div class="alert">
+      ${ _('This is the old Job Editor, it is recommended to instead use the new ') }
+        <a href="${ url('notebook:editor') }?type=shell" target="_blank">${_('Editor')}</a>
+    </div>
+  % endif
+
   <h1 class="card-heading simple">${_('Designs')}</h1>
 
   <%actionbar:render>

+ 53 - 3
desktop/core/src/desktop/converter_tests.py

@@ -31,6 +31,8 @@ from librdbms.design import SQLdesign
 
 from beeswax.models import SavedQuery
 from beeswax.design import hql_query
+from oozie.models import Link, Workflow
+from oozie.tests import add_node
 from pig.models import create_or_update_script
 from useradmin.models import get_default_user_group
 
@@ -42,6 +44,7 @@ class TestDocumentConverter(object):
     self.user = User.objects.get(username="doc2")
     grant_access("doc2", "doc2", "beeswax")
     grant_access("doc2", "doc2", "pig")
+    grant_access("doc2", "doc2", "jobsub")
 
     # This creates the user directories for the new user
     response = self.client.get('/desktop/api2/doc/')
@@ -160,6 +163,7 @@ class TestDocumentConverter(object):
       query.delete()
       query2.delete()
 
+
   def test_convert_hive_query_with_special_chars(self):
     sql = 'SELECT * FROM sample_07'
     settings = [
@@ -303,9 +307,55 @@ class TestDocumentConverter(object):
       query.delete()
 
 
-  def test_convert_workflow(self):
-    # TODO: write me
-    pass
+  def test_convert_mapreduce(self):
+    wf = Workflow.objects.new_workflow(self.user)
+    wf.save()
+    Workflow.objects.initialize(wf)
+    Link.objects.filter(parent__workflow=wf).delete()
+    action = add_node(wf, 'action-name-1', 'mapreduce', [wf.start], {
+      'description': 'Test MR job design',
+      'files': '[]',
+      'jar_path': '/user/hue/oozie/examples/lib/hadoop-examples.jar',
+      'job_properties': '[{"name": "sleep.job.map.sleep.time", "value": "5"}, {"name": "sleep.job.reduce.sleep.time", "value": "10"}]',
+      'prepares': '[{"value":"${output}","type":"delete"},{"value":"/test","type":"mkdir"}]',
+      'archives': '[]',
+    })
+    Link(parent=action, child=wf.end, name="ok").save()
+
+    # Setting doc.last_modified to older date
+    doc = Document.objects.get(id=wf.doc.get().id)
+    Document.objects.filter(id=doc.id).update(last_modified=datetime.strptime('2000-01-01T00:00:00Z', '%Y-%m-%dT%H:%M:%SZ'))
+    doc = Document.objects.get(id=doc.id)
+
+    try:
+      if IS_HUE_4.get():
+        # Test that corresponding doc2 is created after convert
+        assert_false(Document2.objects.filter(owner=self.user, type='query-mapreduce').exists())
+
+        converter = DocumentConverter(self.user)
+        converter.convert()
+
+        doc2 = Document2.objects.get(owner=self.user, type='query-mapreduce')
+
+        # Verify snippet values
+        assert_equal('ready', doc2.data_dict['snippets'][0]['status'])
+        assert_equal('/user/hue/oozie/examples/lib/hadoop-examples.jar', doc2.data_dict['snippets'][0]['properties']['app_jar'])
+        assert_equal(['sleep.job.map.sleep.time=5', 'sleep.job.reduce.sleep.time=10'], doc2.data_dict['snippets'][0]['properties']['hadoopProperties'])
+      else:
+        # Test that corresponding doc2 is created after convert
+        assert_false(Document2.objects.filter(owner=self.user, type='link-workflow').exists())
+
+        converter = DocumentConverter(self.user)
+        converter.convert()
+
+        doc2 = Document2.objects.get(owner=self.user, type='link-workflow')
+
+        # Verify absolute_url
+        response = self.client.get(doc2.get_absolute_url())
+        assert_equal(200, response.status_code)
+        assert_equal(doc.last_modified.strftime('%Y-%m-%dT%H:%M:%S'), doc2.last_modified.strftime('%Y-%m-%dT%H:%M:%S'))
+    finally:
+      wf.delete()
 
 
   def test_convert_pig_script(self):

+ 67 - 34
desktop/core/src/desktop/converters.py

@@ -26,7 +26,7 @@ from desktop.conf import IS_HUE_4
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.models import Document, DocumentPermission, DocumentTag, Document2, Directory, Document2Permission
 from notebook.api import _historify
-from notebook.models import import_saved_beeswax_query, import_saved_pig_script
+from notebook.models import import_saved_beeswax_query, import_saved_pig_script, import_saved_mapreduce_job
 
 
 LOG = logging.getLogger(__name__)
@@ -47,6 +47,38 @@ class DocumentConverter(object):
 
 
   def convert(self):
+    self._convert_saved_queries()
+
+    self._convert_query_histories()
+
+    self._convert_job_designs()
+
+    self._convert_pig_scripts()
+
+    # Add converted docs to root directory
+    if self.imported_doc_count:
+      LOG.info('Successfully imported %d documents for user: %s' % (self.imported_doc_count, self.user.username))
+
+    # Log docs that failed to import
+    if self.failed_doc_ids:
+      LOG.error('Failed to import %d document(s) for user: %s - %s' % (len(self.failed_doc_ids), self.user.username, self.failed_doc_ids))
+
+    # Set is_trashed field for old documents with is_trashed=None
+    docs = Document2.objects.filter(owner=self.user, is_trashed=None).exclude(is_history=True)
+    for doc in docs:
+      try:
+        if doc.path and doc.path != '/.Trash':
+          doc_last_modified = doc.last_modified
+          doc.is_trashed = doc.path.startswith('/.Trash')
+          doc.save()
+
+          # save() updates the last_modified to current time. Resetting it using update()
+          Document2.objects.filter(id=doc.id).update(last_modified=doc_last_modified)
+      except Exception, e:
+        LOG.exception("Failed to set is_trashed field with exception: %s" % e)
+
+
+  def _convert_saved_queries(self):
     # Convert SavedQuery documents
     try:
       from beeswax.models import SavedQuery, HQL, IMPALA, RDBMS
@@ -59,11 +91,11 @@ class DocumentConverter(object):
             data = notebook.get_data()
 
             doc2 = self._create_doc2(
-                document=doc,
-                doctype=data['type'],
-                name=data['name'],
-                description=data['description'],
-                data=notebook.get_json()
+              document=doc,
+              doctype=data['type'],
+              name=data['name'],
+              description=data['description'],
+              data=notebook.get_json()
             )
 
             self.imported_doc_count += 1
@@ -73,11 +105,14 @@ class DocumentConverter(object):
     except ImportError:
       LOG.warn('Cannot convert Saved Query documents: beeswax app is not installed')
 
+
+  def _convert_query_histories(self):
     # Convert SQL Query history documents
     try:
       from beeswax.models import SavedQuery, HQL, IMPALA, RDBMS
 
-      docs = self._get_unconverted_docs(SavedQuery, only_history=True).filter(extra__in=[HQL, IMPALA, RDBMS]).order_by('-last_modified')
+      docs = self._get_unconverted_docs(SavedQuery, only_history=True).filter(extra__in=[HQL, IMPALA, RDBMS]).order_by(
+        '-last_modified')
 
       for doc in docs:
         try:
@@ -107,23 +142,40 @@ class DocumentConverter(object):
       LOG.warn('Cannot convert history documents: beeswax app is not installed')
 
 
+  def _convert_job_designs(self):
     # Convert Job Designer documents
     try:
       from oozie.models import Workflow
 
-      # TODO: Change this logic to actually embed the workflow data in Doc2 instead of linking to old job design
       docs = self._get_unconverted_docs(Workflow)
       for doc in docs:
         try:
           if doc.content_object:
-            data = doc.content_object.data_dict
-            data.update({'content_type': doc.content_type.model, 'object_id': doc.object_id})
-            doc2 = self._create_doc2(
+            node = doc.content_object.start.get_child('to')
+            if IS_HUE_4.get():
+              notebook = None
+              if node.node_type == 'mapreduce':
+                notebook = import_saved_mapreduce_job(doc.content_object)
+
+              if notebook:
+                data = notebook.get_data()
+                doc2 = self._create_doc2(
+                  document=doc,
+                  doctype=data['type'],
+                  name=doc.name,
+                  description=data['description'],
+                  data=notebook.get_json()
+                )
+            else:
+              data = doc.content_object.data_dict
+              data.update({'content_type': doc.content_type.model, 'object_id': doc.object_id})
+              doc2 = self._create_doc2(
                 document=doc,
                 doctype='link-workflow',
+                name=doc.name,
                 description=doc.description,
                 data=json.dumps(data)
-            )
+              )
             self.imported_doc_count += 1
         except Exception, e:
           self.failed_doc_ids.append(doc.id)
@@ -131,6 +183,8 @@ class DocumentConverter(object):
     except ImportError, e:
       LOG.warn('Cannot convert Job Designer documents: oozie app is not installed')
 
+
+  def _convert_pig_scripts(self):
     # Convert PigScript documents
     try:
       from pig.models import PigScript
@@ -157,6 +211,7 @@ class DocumentConverter(object):
               doc2 = self._create_doc2(
                 document=doc,
                 doctype='link-pigscript',
+                name=data['name'],
                 description=doc.description,
                 data=json.dumps(data)
               )
@@ -168,28 +223,6 @@ class DocumentConverter(object):
     except ImportError, e:
       LOG.warn('Cannot convert Pig documents: pig app is not installed')
 
-    # Add converted docs to root directory
-    if self.imported_doc_count:
-      LOG.info('Successfully imported %d documents for user: %s' % (self.imported_doc_count, self.user.username))
-
-    # Log docs that failed to import
-    if self.failed_doc_ids:
-      LOG.error('Failed to import %d document(s) for user: %s - %s' % (len(self.failed_doc_ids), self.user.username, self.failed_doc_ids))
-
-    # Set is_trashed field for old documents with is_trashed=None
-    docs = Document2.objects.filter(owner=self.user, is_trashed=None).exclude(is_history=True)
-    for doc in docs:
-      try:
-        if doc.path and doc.path != '/.Trash':
-          doc_last_modified = doc.last_modified
-          doc.is_trashed = doc.path.startswith('/.Trash')
-          doc.save()
-
-          # save() updates the last_modified to current time. Resetting it using update()
-          Document2.objects.filter(id=doc.id).update(last_modified=doc_last_modified)
-      except Exception, e:
-        LOG.exception("Failed to set is_trashed field with exception: %s" % e)
-
 
   def _get_unconverted_docs(self, content_type, only_history=False):
     docs = Document.objects.get_docs(self.user, content_type).filter(owner=self.user)

+ 54 - 0
desktop/libs/notebook/src/notebook/models.py

@@ -16,6 +16,7 @@
 # limitations under the License.
 
 import json
+import logging
 import math
 import numbers
 import uuid
@@ -27,6 +28,9 @@ from desktop.lib.i18n import smart_unicode
 from notebook.connectors.base import Notebook
 
 
+LOG = logging.getLogger(__name__)
+
+
 # Materialize and HTML escape results
 def escape_rows(rows, nulls_only=False):
   data = []
@@ -240,6 +244,56 @@ def import_saved_pig_script(pig_script):
   return notebook
 
 
+def import_saved_mapreduce_job(wf):
+  snippet_properties = {}
+
+  node = wf.start.get_child('to')
+
+  try:
+    files = json.loads(node.files)
+    for filepath in files:
+      snippet_properties['files'].append({'type': 'file', 'path': filepath})
+  except ValueError, e:
+    LOG.warn('Failed to parse files for mapreduce job design "%s".' % wf.name)
+
+  snippet_properties['archives'] = []
+  try:
+    archives = json.loads(node.archives)
+    for filepath in archives:
+      snippet_properties['archives'].append(filepath)
+  except ValueError, e:
+    LOG.warn('Failed to parse archives for mapreduce job design "%s".' % wf.name)
+
+  snippet_properties['hadoopProperties'] = []
+  try:
+    properties = json.loads(node.job_properties)
+    if properties:
+      for prop in properties:
+        snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
+  except ValueError, e:
+    LOG.warn('Failed to parse job properties for mapreduce job design "%s".' % wf.name)
+
+  snippet_properties['app_jar'] = node.jar_path
+
+  notebook = make_notebook(
+    name=wf.name,
+    description=wf.description,
+    editor_type='mapreduce',
+    statement='',
+    status='ready',
+    snippet_properties=snippet_properties,
+    is_saved=True
+  )
+
+  # Remove functions, settings from snippet properties
+  data = notebook.get_data()
+  data['snippets'][0]['properties'].pop('functions')
+  data['snippets'][0]['properties'].pop('settings')
+
+  notebook.data = json.dumps(data)
+  return notebook
+
+
 def _convert_type(btype, bdata):
   from beeswax.models import HQL, IMPALA, RDBMS, SPARK