瀏覽代碼

HUE-5304 [indexer] Move SQL indexer into its own lib

Romain Rigaux 8 年之前
父節點
當前提交
d6401e4

+ 6 - 179
desktop/libs/indexer/src/indexer/api3.py

@@ -32,7 +32,8 @@ from notebook.models import make_notebook
 from indexer.controller import CollectionManagerController
 from indexer.file_format import HiveFormat
 from indexer.fields import Field
-from indexer.indexers.morphline import Indexer
+from indexer.indexers.morphline import MorhlineIndexer
+from indexer.indexers.sql import SQLIndexer
 from indexer.solr_client import SolrClient, SolrClientException
 
 
@@ -73,7 +74,7 @@ def guess_format(request):
   file_format = json.loads(request.POST.get('fileFormat', '{}'))
 
   if file_format['inputFormat'] == 'file':
-    indexer = Indexer(request.user, request.fs)
+    indexer = MorhlineIndexer(request.user, request.fs)
     if not request.fs.isfile(file_format["path"]):
       raise PopupException(_('Path %(path)s is not a file') % file_format)
 
@@ -107,7 +108,7 @@ def guess_field_types(request):
   file_format = json.loads(request.POST.get('fileFormat', '{}'))
 
   if file_format['inputFormat'] == 'file':
-    indexer = Indexer(request.user, request.fs)
+    indexer = MorhlineIndexer(request.user, request.fs)
     stream = request.fs.open(file_format["path"])
     _convert_format(file_format["format"], inverse=True)
 
@@ -212,186 +213,12 @@ def create_database(request, source, destination, start_time):
 
 
 def _create_table(request, source, destination, start_time=-1):
-  notebook = _create_table_from_a_file(request, source, destination, start_time)
+  notebook = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination, start_time)
   return notebook.execute(request, batch=False)
 
 
-def _create_table_from_a_file(request, source, destination, start_time=-1):
-  if '.' in destination['name']:
-    database, table_name = destination['name'].split('.', 1)
-  else:
-    database = 'default'
-    table_name = destination['name']
-  final_table_name = table_name
-
-  table_format = destination['tableFormat']
-
-  columns = destination['columns']
-  partition_columns = destination['partitionColumns']
-  kudu_partition_columns = destination['kuduPartitionColumns']
-  comment = destination['description']
-
-  source_path = source['path']
-  external = not destination['useDefaultLocation']
-  external_path = destination['nonDefaultLocation']
-
-  load_data = destination['importData']
-  skip_header = destination['hasHeader']
-
-  primary_keys = destination['primaryKeys']
-
-  if destination['useCustomDelimiters']:
-    field_delimiter = destination['customFieldDelimiter']
-    collection_delimiter = destination['customCollectionDelimiter']
-    map_delimiter = destination['customMapDelimiter']
-  else:
-    field_delimiter = ','
-    collection_delimiter = r'\002'
-    map_delimiter = r'\003'
-  regexp_delimiter = destination['customRegexp']
-
-  file_format = 'TextFile'
-  row_format = 'Delimited'
-  serde_name = ''
-  serde_properties = ''
-  extra_create_properties = ''
-  sql = ''
-
-  if source['inputFormat'] == 'manual':
-    load_data = False
-    source['format'] = {
-      'quoteChar': '"',
-      'fieldSeparator': ','
-    }
-
-  if table_format == 'json':
-    row_format = 'serde'
-    serde_name = 'org.apache.hive.hcatalog.data.JsonSerDe'
-  elif table_format == 'regexp':
-    row_format = 'serde'
-    serde_name = 'org.apache.hadoop.hive.serde2.RegexSerDe'
-    serde_properties = '"input.regex" = "%s"' % regexp_delimiter
-  elif table_format == 'csv':
-    if source['format']['quoteChar'] == '"':
-      source['format']['quoteChar'] = '\\"'
-    row_format = 'serde'
-    serde_name = 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
-    serde_properties = '''"separatorChar" = "%(fieldSeparator)s",
-  "quoteChar"     = "%(quoteChar)s",
-  "escapeChar"    = "\\\\"
-  ''' % source['format']
-
-
-  if table_format in ('parquet', 'kudu'):
-    if load_data:
-      table_name, final_table_name = 'hue__tmp_%s' % table_name, table_name
-
-      sql += '\n\nDROP TABLE IF EXISTS `%(database)s`.`%(table_name)s`;\n' % {
-          'database': database,
-          'table_name': table_name
-      }
-    else: # Manual
-      row_format = ''
-      file_format = table_format
-      skip_header = False
-      if table_format == 'kudu':
-        columns = [col for col in columns if col['name'] in primary_keys] + [col for col in columns if col['name'] not in primary_keys]
-
-  if table_format == 'kudu':
-    collection_delimiter = None
-    map_delimiter = None
-
-  if external or (load_data and table_format in ('parquet', 'kudu')):
-    if not request.fs.isdir(external_path): # File selected
-      external_path, external_file_name = request.fs.split(external_path)
-
-      if len(request.fs.listdir(external_path)) > 1:
-        external_path = external_path + '/%s_table' % external_file_name # If dir not just the file, create data dir and move file there.
-        request.fs.mkdir(external_path)
-        request.fs.rename(source_path, external_path)
-
-  sql += django_mako.render_to_string("gen/create_table_statement.mako", {
-      'table': {
-          'name': table_name,
-          'comment': comment,
-          'row_format': row_format,
-          'field_terminator': field_delimiter,
-          'collection_terminator': collection_delimiter, # Only if Hive
-          'map_key_terminator': map_delimiter, # Only if Hive
-          'serde_name': serde_name,
-          'serde_properties': serde_properties,
-          'file_format': file_format,
-          'external': external or load_data and table_format in ('parquet', 'kudu'),
-          'path': external_path,
-          'skip_header': skip_header,
-          'primary_keys': primary_keys if table_format == 'kudu' and not load_data else [],
-       },
-      'columns': columns,
-      'partition_columns': partition_columns,
-      'kudu_partition_columns': kudu_partition_columns,
-      'database': database
-    }
-  )
-
-  if table_format in ('text', 'json', 'csv', 'regexp') and not external and load_data:
-    form_data = {
-      'path': source_path,
-      'overwrite': False,
-      'partition_columns': [(partition['name'], partition['partitionValue']) for partition in partition_columns],
-    }
-    db = dbms.get(request.user)
-    sql += "\n\n%s;" % db.load_data(database, table_name, form_data, None, generate_ddl_only=True)
-
-  if load_data and table_format in ('parquet', 'kudu'):
-    file_format = table_format
-    if table_format == 'kudu':
-      columns_list = ['`%s`' % col for col in primary_keys + [col['name'] for col in destination['columns'] if col['name'] not in primary_keys]]
-      extra_create_properties = """PRIMARY KEY (%(primary_keys)s)
-      PARTITION BY HASH PARTITIONS 16
-      STORED AS %(file_format)s
-      TBLPROPERTIES(
-      'kudu.num_tablet_replicas' = '1'
-      )""" % {
-        'file_format': file_format,
-        'primary_keys': ', '.join(primary_keys)
-      }
-    else:
-      columns_list = ['*']
-      extra_create_properties = 'STORED AS %(file_format)s' % {'file_format': file_format}
-    sql += '''\n\nCREATE TABLE `%(database)s`.`%(final_table_name)s`%(comment)s
-      %(extra_create_properties)s
-      AS SELECT %(columns_list)s
-      FROM `%(database)s`.`%(table_name)s`;''' % {
-        'database': database,
-        'final_table_name': final_table_name,
-        'table_name': table_name,
-        'extra_create_properties': extra_create_properties,
-        'columns_list': ', '.join(columns_list),
-        'comment': ' COMMENT "%s"' % comment if comment else ''
-    }
-    sql += '\n\nDROP TABLE IF EXISTS `%(database)s`.`%(table_name)s`;\n' % {
-        'database': database,
-        'table_name': table_name
-    }
-
-  editor_type = 'impala' if table_format == 'kudu' else destination['apiHelperType']
-
-  on_success_url = reverse('metastore:describe_table', kwargs={'database': database, 'table': table_name})
-
-  return make_notebook(
-      name=_('Creating table %(database)s.%(table)s') % {'database': database, 'table': table_name},
-      editor_type=editor_type,
-      statement=sql.strip(),
-      status='ready',
-      database=database,
-      on_success_url=on_success_url,
-      last_executed=start_time,
-      is_task=True
-  )
-
-
 def _index(request, file_format, collection_name, query=None, start_time=None):
-  indexer = Indexer(request.user, request.fs)
+  indexer = MorhlineIndexer(request.user, request.fs)
 
   unique_field = indexer.get_unique_field(file_format)
   is_unique_generated = indexer.is_unique_generated(file_format)

+ 1 - 1
desktop/libs/indexer/src/indexer/file_format.py

@@ -26,7 +26,7 @@ from desktop.lib import i18n
 from indexer.argument import CheckboxArgument, TextDelimiterArgument
 from indexer.conf import ENABLE_NEW_INDEXER
 from indexer.fields import Field, guess_field_type_from_samples
-from indexer.morphline_operations import get_operator
+from indexer.indexers.morphline_operations import get_operator
 
 
 LOG = logging.getLogger(__name__)

+ 3 - 3
desktop/libs/indexer/src/indexer/indexers/morphline.py

@@ -33,13 +33,13 @@ from indexer.conf import CONFIG_INDEXING_TEMPLATES_PATH
 from indexer.conf import CONFIG_INDEXER_LIBS_PATH
 from indexer.fields import get_field_type
 from indexer.file_format import get_file_format_instance, get_file_format_class
-from indexer.morphline_operations import get_checked_args
+from indexer.indexers.morphline_operations import get_checked_args
 
 
 LOG = logging.getLogger(__name__)
 
 
-class Indexer(object):
+class MorhlineIndexer(object):
 
   def __init__(self, username, fs=None, jt=None):
     self.fs = fs
@@ -68,7 +68,7 @@ class Indexer(object):
     workspace_path = self._upload_workspace(morphline)
 
     task = make_notebook(
-      name=_('Indexer job for %s') % collection_name,
+      name=_('MorhlineIndexer job for %s') % collection_name,
       editor_type='notebook',
       on_success_url=reverse('search:browse', kwargs={'name': collection_name}),
       is_task=True,

+ 7 - 7
desktop/libs/indexer/src/indexer/indexers/morphline_tests.py

@@ -31,14 +31,14 @@ from indexer.controller import CollectionManagerController
 from indexer.file_format import ApacheCombinedFormat, RubyLogFormat, HueLogFormat
 from indexer.fields import Field
 from indexer.indexers.morphline_operations import get_operator
-from indexer.indexers.morphline import Indexer
+from indexer.indexers.morphline import MorhlineIndexer
 
 
 LOG = logging.getLogger(__name__)
 
 
 def _test_fixed_type_format_generate_morphline(format_):
-  indexer = Indexer("test")
+  indexer = MorhlineIndexer("test")
   format_instance = format_()
 
   morphline = indexer.generate_morphline_config("test_collection", {
@@ -52,7 +52,7 @@ def _test_generate_field_operation_morphline(operation_format):
   fields = TestIndexer.simpleCSVFields[:]
   fields[0]['operations'].append(operation_format)
 
-  indexer = Indexer("test")
+  indexer = MorhlineIndexer("test")
   morphline =indexer.generate_morphline_config("test_collection", {
       "columns": fields,
       "format": TestIndexer.simpleCSVFormat
@@ -125,7 +125,7 @@ class TestIndexer():
 
   def test_guess_csv_format(self):
     stream = StringIO.StringIO(TestIndexer.simpleCSVString)
-    indexer = Indexer("test")
+    indexer = MorhlineIndexer("test")
 
     guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
 
@@ -143,7 +143,7 @@ class TestIndexer():
         assert_equal(expected[key], actual[key])
 
   def test_guess_format_invalid_csv_format(self):
-    indexer = Indexer("test")
+    indexer = MorhlineIndexer("test")
     stream = StringIO.StringIO(TestIndexer.simpleCSVString)
 
     guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
@@ -170,7 +170,7 @@ class TestIndexer():
     assert_equal(fields, [])
 
   def test_generate_csv_morphline(self):
-    indexer = Indexer("test")
+    indexer = MorhlineIndexer("test")
     morphline =indexer.generate_morphline_config("test_collection", {
         "columns": self.simpleCSVFields,
         "format": self.simpleCSVFormat
@@ -258,7 +258,7 @@ class TestIndexer():
     make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
     user = User.objects.get(username="test")
     collection_name = "test_collection"
-    indexer = Indexer("test", fs=fs, jt=cluster.jt)
+    indexer = MorhlineIndexer("test", fs=fs, jt=cluster.jt)
     input_loc = "/tmp/test.csv"
 
     # upload the test file to hdfs

+ 213 - 0
desktop/libs/indexer/src/indexer/indexers/sql.py

@@ -0,0 +1,213 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.import logging
+
+import logging
+
+from django.contrib.auth.models import User
+from django.core.urlresolvers import reverse
+from django.utils.translation import ugettext as _
+
+from desktop.lib import django_mako
+from notebook.models import make_notebook
+
+
+LOG = logging.getLogger(__name__)
+
+
+try:
+  from beeswax.server import dbms
+except ImportError, e:
+  LOG.warn('Hive and HiveServer2 interfaces are not enabled')
+
+
+class SQLIndexer(object):
+
+  def __init__(self, user, fs):
+    self.fs = fs
+    self.user = user
+
+  def create_table_from_a_file(self, source, destination, start_time=-1):
+    if '.' in destination['name']:
+      database, table_name = destination['name'].split('.', 1)
+    else:
+      database = 'default'
+      table_name = destination['name']
+    final_table_name = table_name
+  
+    table_format = destination['tableFormat']
+  
+    columns = destination['columns']
+    partition_columns = destination['partitionColumns']
+    kudu_partition_columns = destination['kuduPartitionColumns']
+    comment = destination['description']
+  
+    source_path = source['path']
+    external = not destination['useDefaultLocation']
+    external_path = destination['nonDefaultLocation']
+  
+    load_data = destination['importData']
+    skip_header = destination['hasHeader']
+  
+    primary_keys = destination['primaryKeys']
+  
+    if destination['useCustomDelimiters']:
+      field_delimiter = destination['customFieldDelimiter']
+      collection_delimiter = destination['customCollectionDelimiter']
+      map_delimiter = destination['customMapDelimiter']
+    else:
+      field_delimiter = ','
+      collection_delimiter = r'\002'
+      map_delimiter = r'\003'
+    regexp_delimiter = destination['customRegexp']
+  
+    file_format = 'TextFile'
+    row_format = 'Delimited'
+    serde_name = ''
+    serde_properties = ''
+    extra_create_properties = ''
+    sql = ''
+  
+    if source['inputFormat'] == 'manual':
+      load_data = False
+      source['format'] = {
+        'quoteChar': '"',
+        'fieldSeparator': ','
+      }
+  
+    if table_format == 'json':
+      row_format = 'serde'
+      serde_name = 'org.apache.hive.hcatalog.data.JsonSerDe'
+    elif table_format == 'regexp':
+      row_format = 'serde'
+      serde_name = 'org.apache.hadoop.hive.serde2.RegexSerDe'
+      serde_properties = '"input.regex" = "%s"' % regexp_delimiter
+    elif table_format == 'csv':
+      if source['format']['quoteChar'] == '"':
+        source['format']['quoteChar'] = '\\"'
+      row_format = 'serde'
+      serde_name = 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
+      serde_properties = '''"separatorChar" = "%(fieldSeparator)s",
+    "quoteChar"     = "%(quoteChar)s",
+    "escapeChar"    = "\\\\"
+    ''' % source['format']
+  
+  
+    if table_format in ('parquet', 'kudu'):
+      if load_data:
+        table_name, final_table_name = 'hue__tmp_%s' % table_name, table_name
+  
+        sql += '\n\nDROP TABLE IF EXISTS `%(database)s`.`%(table_name)s`;\n' % {
+            'database': database,
+            'table_name': table_name
+        }
+      else: # Manual
+        row_format = ''
+        file_format = table_format
+        skip_header = False
+        if table_format == 'kudu':
+          columns = [col for col in columns if col['name'] in primary_keys] + [col for col in columns if col['name'] not in primary_keys]
+  
+    if table_format == 'kudu':
+      collection_delimiter = None
+      map_delimiter = None
+  
+    if external or (load_data and table_format in ('parquet', 'kudu')):
+      if not self.fs.isdir(external_path): # File selected
+        external_path, external_file_name = self.fs.split(external_path)
+  
+        if len(self.fs.listdir(external_path)) > 1:
+          external_path = external_path + '/%s_table' % external_file_name # If dir not just the file, create data dir and move file there.
+          self.fs.mkdir(external_path)
+          self.fs.rename(source_path, external_path)
+  
+    sql += django_mako.render_to_string("gen/create_table_statement.mako", {
+        'table': {
+            'name': table_name,
+            'comment': comment,
+            'row_format': row_format,
+            'field_terminator': field_delimiter,
+            'collection_terminator': collection_delimiter, # Only if Hive
+            'map_key_terminator': map_delimiter, # Only if Hive
+            'serde_name': serde_name,
+            'serde_properties': serde_properties,
+            'file_format': file_format,
+            'external': external or load_data and table_format in ('parquet', 'kudu'),
+            'path': external_path,
+            'skip_header': skip_header,
+            'primary_keys': primary_keys if table_format == 'kudu' and not load_data else [],
+         },
+        'columns': columns,
+        'partition_columns': partition_columns,
+        'kudu_partition_columns': kudu_partition_columns,
+        'database': database
+      }
+    )
+  
+    if table_format in ('text', 'json', 'csv', 'regexp') and not external and load_data:
+      form_data = {
+        'path': source_path,
+        'overwrite': False,
+        'partition_columns': [(partition['name'], partition['partitionValue']) for partition in partition_columns],
+      }
+      db = dbms.get(self.user)
+      sql += "\n\n%s;" % db.load_data(database, table_name, form_data, None, generate_ddl_only=True)
+  
+    if load_data and table_format in ('parquet', 'kudu'):
+      file_format = table_format
+      if table_format == 'kudu':
+        columns_list = ['`%s`' % col for col in primary_keys + [col['name'] for col in destination['columns'] if col['name'] not in primary_keys]]
+        extra_create_properties = """PRIMARY KEY (%(primary_keys)s)
+        PARTITION BY HASH PARTITIONS 16
+        STORED AS %(file_format)s
+        TBLPROPERTIES(
+        'kudu.num_tablet_replicas' = '1'
+        )""" % {
+          'file_format': file_format,
+          'primary_keys': ', '.join(primary_keys)
+        }
+      else:
+        columns_list = ['*']
+        extra_create_properties = 'STORED AS %(file_format)s' % {'file_format': file_format}
+      sql += '''\n\nCREATE TABLE `%(database)s`.`%(final_table_name)s`%(comment)s
+        %(extra_create_properties)s
+        AS SELECT %(columns_list)s
+        FROM `%(database)s`.`%(table_name)s`;''' % {
+          'database': database,
+          'final_table_name': final_table_name,
+          'table_name': table_name,
+          'extra_create_properties': extra_create_properties,
+          'columns_list': ', '.join(columns_list),
+          'comment': ' COMMENT "%s"' % comment if comment else ''
+      }
+      sql += '\n\nDROP TABLE IF EXISTS `%(database)s`.`%(table_name)s`;\n' % {
+          'database': database,
+          'table_name': table_name
+      }
+  
+    editor_type = 'impala' if table_format == 'kudu' else destination['apiHelperType']
+  
+    on_success_url = reverse('metastore:describe_table', kwargs={'database': database, 'table': table_name})
+  
+    return make_notebook(
+        name=_('Creating table %(database)s.%(table)s') % {'database': database, 'table': table_name},
+        editor_type=editor_type,
+        statement=sql.strip(),
+        status='ready',
+        database=database,
+        on_success_url=on_success_url,
+        last_executed=start_time,
+        is_task=True
+    )

文件差異過大導致無法顯示
+ 1 - 1
desktop/libs/indexer/src/indexer/indexers/sql_tests.py


+ 1 - 1
desktop/libs/indexer/src/indexer/views.py

@@ -28,7 +28,7 @@ from indexer.solr_client import SolrClient
 from indexer.fields import FIELD_TYPES, Field
 from indexer.file_format import get_file_indexable_format_types
 from indexer.management.commands import indexer_setup
-from indexer.morphline_operations import OPERATORS
+from indexer.indexers.morphline_operations import OPERATORS
 
 
 LOG = logging.getLogger(__name__)

部分文件因文件數量過多而無法顯示