Просмотр исходного кода

HUE-5090 [notebook] Refactor creation of a notebook with Hive or MR snippets

Romain Rigaux 9 лет назад
Родитель
Сommit
d760de1eb7

+ 36 - 58
desktop/libs/indexer/src/indexer/smart_indexer.py

@@ -24,20 +24,17 @@ from django.utils.translation import ugettext as _
 from mako.lookup import TemplateLookup
 from mako.template import Template
 
-
 from desktop.models import Document2
 from notebook.api import _execute_notebook
-from notebook.models import make_notebook2
-from oozie.models2 import Job
+from notebook.connectors.base import get_api
+from notebook.models import Notebook
 
-from indexer.fields import get_field_type
-from indexer.operations import get_checked_args
-from indexer.file_format import get_file_format_instance, get_file_format_class
 from indexer.conf import CONFIG_INDEXING_TEMPLATES_PATH
 from indexer.conf import CONFIG_INDEXER_LIBS_PATH
 from indexer.conf import zkensemble
-
-from notebook.connectors.base import get_api
+from indexer.fields import get_field_type
+from indexer.file_format import get_file_format_instance, get_file_format_class
+from indexer.operations import get_checked_args
 
 
 LOG = logging.getLogger(__name__)
@@ -52,6 +49,8 @@ class Indexer(object):
     self.user = User.objects.get(username=username) # To clean
 
   def _upload_workspace(self, morphline):
+    from oozie.models2 import Job
+
     hdfs_workspace_path = Job.get_workspace(self.username)
     hdfs_morphline_path = os.path.join(hdfs_workspace_path, "morphline.conf")
     hdfs_log4j_properties_path = os.path.join(hdfs_workspace_path, "log4j.properties")
@@ -69,12 +68,11 @@ class Indexer(object):
   def run_morphline(self, request, collection_name, morphline, input_path, query=None):
     workspace_path = self._upload_workspace(morphline)
 
-    snippets = []
+    notebook = Notebook(name='Indexer job for %s' % collection_name)
 
     if query:
-      from notebook.models import Notebook
-      notebook = Notebook(document=Document2.objects.get_by_uuid(user=self.user, uuid=query))
-      notebook_data = notebook.get_data()
+      q = Notebook(document=Document2.objects.get_by_uuid(user=self.user, uuid=query))
+      notebook_data = q.get_data()
       snippet = notebook_data['snippets'][0]
 
       api = get_api(request, snippet)
@@ -82,57 +80,37 @@ class Indexer(object):
       destination = '__hue_%s' % notebook_data['uuid'][:4]
       location = '/user/%s/__hue-%s' % (request.user,  notebook_data['uuid'][:4])
       sql, success_url = api.export_data_as_table(notebook_data, snippet, destination, is_temporary=True, location=location)
-
-      statement = sql
       input_path = '${nameNode}%s' % location
 
-      snippets.append({
-         'status': 'running',
-         'statement_raw': statement,
-         'statement': statement,
-         'type': 'query-hive',
-         'properties': {
-#             'files': [] if files is None else files,
-#             'functions': [] if functions is None else functions,
-#             'settings': [] if settings is None else settings
-         },
-         'database': snippet['database'],
-      }
-    )
-
-    snippets.append({
-        u'type': u'java',
-        u'status': u'running',
-        u'properties':  {
-          u'files': [
-              {u'path': u'%s/log4j.properties' % workspace_path, u'type': u'file'},
-              {u'path': u'%s/morphline.conf' % workspace_path, u'type': u'file'}
-          ],
-          u'class': u'org.apache.solr.hadoop.MapReduceIndexerTool',
-          u'app_jar': CONFIG_INDEXER_LIBS_PATH.get(),
-          u'arguments': [
-              u'--morphline-file',
-              u'morphline.conf',
-              u'--output-dir',
-              u'${nameNode}/user/%s/indexer' % self.username,
-              u'--log4j',
-              u'log4j.properties',
-              u'--go-live',
-              u'--zk-host',
-              zkensemble(),
-              u'--collection',
-              collection_name,
-              input_path,
-          ],
-          u'archives': [],
-        }
-      }
+      notebook.add_hive_snippet(snippet['database'], sql)
+
+    notebook.add_java_snippet(
+      clazz='org.apache.solr.hadoop.MapReduceIndexerTool',
+      app_jar=CONFIG_INDEXER_LIBS_PATH.get(),
+      arguments=[
+          u'--morphline-file',
+          u'morphline.conf',
+          u'--output-dir',
+          u'${nameNode}/user/%s/indexer' % self.username,
+          u'--log4j',
+          u'log4j.properties',
+          u'--go-live',
+          u'--zk-host',
+          zkensemble(),
+          u'--collection',
+          collection_name,
+          input_path,
+      ],
+      files=[
+          {u'path': u'%s/log4j.properties' % workspace_path, u'type': u'file'},
+          {u'path': u'%s/morphline.conf' % workspace_path, u'type': u'file'}
+      ]
     )
 
-    notebook = make_notebook2(name='Indexer job for %s' % collection_name, snippets=snippets).get_data()
-    snippet = {'wasBatchExecuted': True, 'type': 'oozie', 'id': notebook['snippets'][0]['id'], 'statement': ''}
+    notebook_data = notebook.get_data()
+    snippet = {'wasBatchExecuted': True, 'type': 'oozie', 'id': notebook_data['snippets'][0]['id'], 'statement': ''}
 
-    job_handle = _execute_notebook(request, notebook, snippet) # To set as managed
+    job_handle = _execute_notebook(request, notebook_data, snippet) # To set as managed
 
     return job_handle
 

+ 72 - 6
desktop/libs/notebook/src/notebook/connectors/base.py

@@ -17,6 +17,7 @@
 
 import json
 import logging
+import uuid
 
 from django.utils.translation import ugettext as _
 
@@ -57,19 +58,24 @@ class QueryError(Exception):
 
 class Notebook(object):
 
-  def __init__(self, document=None):
+  def __init__(self, document=None, **options):
     self.document = None
 
     if document is not None:
       self.data = document.data
       self.document = document
     else:
-      self.data = json.dumps({
+      _data = {
           'name': 'My Notebook',
+          'uuid': str(uuid.uuid4()),
           'description': '',
           'type': 'notebook',
+          'isSaved': False,
+          'sessions': [],
           'snippets': [],
-      })
+      }
+      _data.update(options)
+      self.data = json.dumps(_data)
 
   def get_json(self):
     _data = self.get_data()
@@ -89,6 +95,69 @@ class Notebook(object):
     return '\n\n\n'.join(['USE %s;\n\n%s' % (snippet['database'], snippet['statement_raw']) for snippet in self.get_data()['snippets']])
 
 
+  def add_hive_snippet(self, database, sql):
+    _data = json.loads(self.data)
+
+    _data['snippets'].append(self._make_snippet({
+       'status': 'running',
+       'statement_raw': sql,
+       'statement': sql,
+       'type': 'query-hive',
+       'properties': {
+            'files': [],
+            'functions': [],
+            'settings': [],
+       },
+       'database': database,
+    }))
+    self._add_session(_data, 'query-hive')
+
+    self.data = json.dumps(_data)
+
+  def add_java_snippet(self, clazz, app_jar, arguments, files):
+    _data = json.loads(self.data)
+
+    _data['snippets'].append(self._make_snippet({
+        u'type': u'java',
+        u'status': u'running',
+        u'properties':  {
+          u'files': files,
+          u'class': clazz,
+          u'app_jar': app_jar,
+          u'arguments': arguments,
+          u'archives': [],
+        }
+    }))
+    self._add_session(_data, 'java')
+
+    self.data = json.dumps(_data)
+
+  def _make_snippet(self, _snippet):
+    return {
+         'status': _snippet.get('status', 'ready'),
+         'id': str(uuid.uuid4()),
+         'statement_raw': _snippet.get('statement', ''),
+         'statement': _snippet.get('statement', ''),
+         'type': _snippet.get('type'),
+         'properties': _snippet['properties'],
+         'name': _snippet.get('name', '%(type)s snippet' % _snippet),
+         'database': _snippet.get('database'),
+         'result': {},
+         'variables': []
+    }
+
+  def _add_session(self, data, snippet_type):
+    from notebook.connectors.hiveserver2 import HS2Api # Cyclic dependency
+
+    if snippet_type not in [_s['type'] for _s in data['sessions']]:
+      data['sessions'].append({
+         'type': snippet_type,
+         'properties': HS2Api.get_properties(snippet_type),
+         'id': None
+      }
+    )
+
+
 def get_api(request, snippet):
   from notebook.connectors.hiveserver2 import HS2Api
   from notebook.connectors.jdbc import JdbcApi
@@ -183,6 +252,3 @@ class Api(object):
   def export_data_as_table(self, notebook, snippet, destination, is_temporary=False, location=None): raise NotImplementedError()
 
   def export_large_data_to_hdfs(self, notebook, snippet, destination): raise NotImplementedError()
-
-  def fetch_result_size(self, notebook, snippet):
-    pass

+ 0 - 7
desktop/libs/notebook/src/notebook/models.py

@@ -140,13 +140,6 @@ def make_notebook2(name='Browse', description='', is_saved=False, snippets=None)
     default_properties.update(snippet['properties'])
     snippet['properties'] = default_properties
 
-    if snippet['type'] == 'hive':
-      pass
-    elif snippet['type'] == 'impala':
-      pass
-    elif snippet['type'] == 'java':
-      pass
-
     _snippets.append(snippet)
 
   data = {