Browse Source

HUE-8888 [importer] List Kafka streams as input

Romain 5 years ago
parent
commit
4fcfd00846

+ 115 - 32
desktop/libs/indexer/src/indexer/api3.py

@@ -53,10 +53,11 @@ from indexer.controller import CollectionManagerController
 from indexer.file_format import HiveFormat
 from indexer.file_format import HiveFormat
 from indexer.fields import Field
 from indexer.fields import Field
 from indexer.indexers.envelope import EnvelopeIndexer
 from indexer.indexers.envelope import EnvelopeIndexer
-from indexer.models import _save_pipeline
+from indexer.indexers.flink_sql import FlinkIndexer
 from indexer.indexers.morphline import MorphlineIndexer
 from indexer.indexers.morphline import MorphlineIndexer
 from indexer.indexers.rdbms import run_sqoop, _get_api
 from indexer.indexers.rdbms import run_sqoop, _get_api
 from indexer.indexers.sql import SQLIndexer
 from indexer.indexers.sql import SQLIndexer
+from indexer.models import _save_pipeline
 from indexer.solr_client import SolrClient, MAX_UPLOAD_SIZE
 from indexer.solr_client import SolrClient, MAX_UPLOAD_SIZE
 from indexer.indexers.flume import FlumeIndexer
 from indexer.indexers.flume import FlumeIndexer
 
 
@@ -144,7 +145,11 @@ def guess_format(request):
           storage[delim['data_type']] = delim['comment']
           storage[delim['data_type']] = delim['comment']
     if table_metadata.details['properties']['format'] == 'text':
     if table_metadata.details['properties']['format'] == 'text':
       format_ = {
       format_ = {
-        "quoteChar": "\"", "recordSeparator": '\\n', "type": "csv", "hasHeader": False, "fieldSeparator": storage.get('field.delim', ',')
+        "quoteChar": "\"",
+        "recordSeparator": '\\n',
+        "type": "csv",
+        "hasHeader": False,
+        "fieldSeparator": storage.get('field.delim', ',')
       }
       }
     elif table_metadata.details['properties']['format'] == 'parquet':
     elif table_metadata.details['properties']['format'] == 'parquet':
       format_ = {"type": "parquet", "hasHeader": False,}
       format_ = {"type": "parquet", "hasHeader": False,}
@@ -157,7 +162,12 @@ def guess_format(request):
   elif file_format['inputFormat'] == 'stream':
   elif file_format['inputFormat'] == 'stream':
     if file_format['streamSelection'] == 'kafka':
     if file_format['streamSelection'] == 'kafka':
       format_ = {
       format_ = {
-        "type": "csv", "fieldSeparator": ",", "hasHeader": True, "quoteChar": "\"", "recordSeparator": "\\n", 'topics': get_topics()
+        "type": "csv",
+        "fieldSeparator": ",",
+        "hasHeader": True,
+        "quoteChar": "\"",
+        "recordSeparator": "\\n",
+        'topics': get_topics()
       }
       }
     elif file_format['streamSelection'] == 'flume':
     elif file_format['streamSelection'] == 'flume':
       format_ = {"type": "csv", "fieldSeparator": ",", "hasHeader": True, "quoteChar": "\"", "recordSeparator": "\\n"}
       format_ = {"type": "csv", "fieldSeparator": ",", "hasHeader": True, "quoteChar": "\"", "recordSeparator": "\\n"}
@@ -263,23 +273,20 @@ def guess_field_types(request):
     }
     }
   elif file_format['inputFormat'] == 'stream':
   elif file_format['inputFormat'] == 'stream':
     if file_format['streamSelection'] == 'kafka':
     if file_format['streamSelection'] == 'kafka':
-      if file_format.get('kafkaSelectedTopics') == 'NavigatorAuditEvents':
+      if file_format.get('kafkaSelectedTopics') == 'user_behavior':
         kafkaFieldNames = [
         kafkaFieldNames = [
-          'id',
-          'additionalInfo', 'allowed', 'collectionName', 'databaseName', 'db',
-          'DELEGATION_TOKEN_ID', 'dst', 'entityId', 'family', 'impersonator',
-          'ip', 'name', 'objectType', 'objType', 'objUsageType',
-          'operationParams', 'operationText', 'op', 'opText', 'path',
-          'perms', 'privilege', 'qualifier', 'QUERY_ID', 'resourcePath',
-          'service', 'SESSION_ID', 'solrVersion', 'src', 'status',
-          'subOperation', 'tableName', 'table', 'time', 'type',
-          'url', 'user'
+          'user_id',
+          'item_id',
+          'category_id',
+          'behavior',
+          'ts'
         ]
         ]
-        kafkaFieldTypes = [
-          'string'
-        ] * len(kafkaFieldNames)
-        kafkaFieldNames.append('timeDate')
-        kafkaFieldTypes.append('date')
+        kafkaFieldTypes = ['BIGINT'] * len(kafkaFieldNames)
+
+        kafkaFieldNames.append('proctime')
+        kafkaFieldTypes.append('TIMESTAMP')
+        kafkaFieldNames.append('WATERMARK')
+        kafkaFieldTypes.append('WATERMARK')
       else:
       else:
         # Note: mocked here, should come from SFDC or Kafka API or sampling job
         # Note: mocked here, should come from SFDC or Kafka API or sampling job
         kafkaFieldNames = file_format.get('kafkaFieldNames', '').split(',')
         kafkaFieldNames = file_format.get('kafkaFieldNames', '').split(',')
@@ -303,7 +310,11 @@ def guess_field_types(request):
         },
         },
         "format": file_format['format']
         "format": file_format['format']
       })
       })
-      type_mapping = dict(list(zip(kafkaFieldNames, kafkaFieldTypes)))
+      type_mapping = dict(
+        list(
+          zip(kafkaFieldNames, kafkaFieldTypes)
+        )
+      )
 
 
       for col in format_['columns']:
       for col in format_['columns']:
         col['keyType'] = type_mapping[col['name']]
         col['keyType'] = type_mapping[col['name']]
@@ -373,7 +384,7 @@ def importer_submit(request):
   source = json.loads(request.POST.get('source', '{}'))
   source = json.loads(request.POST.get('source', '{}'))
   outputFormat = json.loads(request.POST.get('destination', '{}'))['outputFormat']
   outputFormat = json.loads(request.POST.get('destination', '{}'))['outputFormat']
   destination = json.loads(request.POST.get('destination', '{}'))
   destination = json.loads(request.POST.get('destination', '{}'))
-  destination['ouputFormat'] = outputFormat # Workaround a very weird bug
+  destination['ouputFormat'] = outputFormat  # Workaround a very weird bug
   start_time = json.loads(request.POST.get('start_time', '-1'))
   start_time = json.loads(request.POST.get('start_time', '-1'))
 
 
   if source['inputFormat'] == 'file':
   if source['inputFormat'] == 'file':
@@ -383,7 +394,7 @@ def importer_submit(request):
 
 
   if destination['ouputFormat'] in ('database', 'table'):
   if destination['ouputFormat'] in ('database', 'table'):
     destination['nonDefaultLocation'] = request.fs.netnormpath(destination['nonDefaultLocation']) \
     destination['nonDefaultLocation'] = request.fs.netnormpath(destination['nonDefaultLocation']) \
-                                        if destination['nonDefaultLocation'] else destination['nonDefaultLocation']
+        if destination['nonDefaultLocation'] else destination['nonDefaultLocation']
 
 
   if destination['ouputFormat'] == 'index':
   if destination['ouputFormat'] == 'index':
     source['columns'] = destination['columns']
     source['columns'] = destination['columns']
@@ -392,23 +403,66 @@ def importer_submit(request):
     if destination['indexerRunJob'] or source['inputFormat'] == 'stream':
     if destination['indexerRunJob'] or source['inputFormat'] == 'stream':
       _convert_format(source["format"], inverse=True)
       _convert_format(source["format"], inverse=True)
       job_handle = _large_indexing(
       job_handle = _large_indexing(
-        request, source, index_name, start_time=start_time, lib_path=destination['indexerJobLibPath'], destination=destination
+          request,
+          source,
+          index_name,
+          start_time=start_time,
+          lib_path=destination['indexerJobLibPath'],
+          destination=destination
       )
       )
     else:
     else:
       client = SolrClient(request.user)
       client = SolrClient(request.user)
-      job_handle = _small_indexing(request.user, request.fs, client, source, destination, index_name)
+      job_handle = _small_indexing(
+          request.user,
+          request.fs,
+          client,
+          source,
+          destination, index_name
+      )
   elif source['inputFormat'] in ('stream', 'connector') or destination['ouputFormat'] == 'stream':
   elif source['inputFormat'] in ('stream', 'connector') or destination['ouputFormat'] == 'stream':
-    job_handle = _envelope_job(request, source, destination, start_time=start_time, lib_path=destination['indexerJobLibPath'])
+    args = {
+      'source': source,
+      'destination': destination,
+      'start_time': start_time,
+      'dry_run': request.POST.get('show_command')
+    }
+    api = FlinkIndexer(
+      request.user,
+      request.fs
+    )
+    
+    job_handle = api.create_table_from_kafka(**args)
+
+    if request.POST.get('show_command'):
+      job_handle = {
+        'status': 0,
+        'commands': job_handle
+      }
   elif source['inputFormat'] == 'altus':
   elif source['inputFormat'] == 'altus':
     # BDR copy or DistCP + DDL + Sentry DDL copy
     # BDR copy or DistCP + DDL + Sentry DDL copy
     pass
     pass
   elif source['inputFormat'] == 'rdbms':
   elif source['inputFormat'] == 'rdbms':
     if destination['outputFormat'] in ('database', 'file', 'table', 'hbase'):
     if destination['outputFormat'] in ('database', 'file', 'table', 'hbase'):
-      job_handle = run_sqoop(request, source, destination, start_time)
+      job_handle = run_sqoop(
+        request,
+        source,
+        destination,
+        start_time
+      )
   elif destination['ouputFormat'] == 'database':
   elif destination['ouputFormat'] == 'database':
-    job_handle = _create_database(request, source, destination, start_time)
+    job_handle = _create_database(
+      request,
+      source,
+      destination,
+      start_time
+    )
   else:
   else:
-    job_handle = _create_table(request, source, destination, start_time)
+    job_handle = _create_table(
+      request,
+      source,
+      destination,
+      start_time
+    )
 
 
   request.audit = {
   request.audit = {
     'operation': 'EXPORT',
     'operation': 'EXPORT',
@@ -458,8 +512,20 @@ def _small_indexing(user, fs, client, source, destination, index_name):
       searcher = CollectionManagerController(user)
       searcher = CollectionManagerController(user)
       columns = [field['name'] for field in fields if field['name'] != 'hue_id']
       columns = [field['name'] for field in fields if field['name'] != 'hue_id']
       # Assumes handle still live
       # Assumes handle still live
-      fetch_handle = lambda rows, start_over: get_api(request, snippet).fetch_result(notebook, snippet, rows=rows, start_over=start_over)
-      rows = searcher.update_data_from_hive(index_name, columns, fetch_handle=fetch_handle, indexing_options=kwargs)
+      fetch_handle = lambda rows, start_over: get_api(
+          request, snippet
+      ).fetch_result(
+          notebook,
+          snippet,
+          rows=rows,
+          start_over=start_over
+      )
+      rows = searcher.update_data_from_hive(
+          index_name,
+          columns,
+          fetch_handle=fetch_handle,
+          indexing_options=kwargs
+      )
       # TODO if rows == MAX_ROWS truncation warning
       # TODO if rows == MAX_ROWS truncation warning
     elif source['inputFormat'] == 'manual':
     elif source['inputFormat'] == 'manual':
       pass # No need to do anything
       pass # No need to do anything
@@ -501,7 +567,10 @@ def _create_database(request, source, destination, start_time):
   )
   )
 
 
   editor_type = destination['apiHelperType']
   editor_type = destination['apiHelperType']
-  on_success_url = reverse('metastore:show_tables', kwargs={'database': database}) + "?source_type=" + source.get('sourceType', 'hive')
+  on_success_url = reverse(
+      'metastore:show_tables',
+      kwargs={'database': database}) + "?source_type=" + source.get('sourceType', 'hive'
+  )
 
 
   notebook = make_notebook(
   notebook = make_notebook(
       name=_('Creating database %(name)s') % destination,
       name=_('Creating database %(name)s') % destination,
@@ -567,7 +636,15 @@ def _large_indexing(request, file_format, collection_name, query=None, start_tim
 
 
   morphline = indexer.generate_morphline_config(collection_name, file_format, unique_field, lib_path=lib_path)
   morphline = indexer.generate_morphline_config(collection_name, file_format, unique_field, lib_path=lib_path)
 
 
-  return indexer.run_morphline(request, collection_name, morphline, input_path, query, start_time=start_time, lib_path=lib_path)
+  return indexer.run_morphline(
+      request,
+      collection_name,
+      morphline,
+      input_path,
+      query,
+      start_time=start_time,
+      lib_path=lib_path
+  )
 
 
 
 
 def _envelope_job(request, file_format, destination, start_time=None, lib_path=None):
 def _envelope_job(request, file_format, destination, start_time=None, lib_path=None):
@@ -647,7 +724,13 @@ def _envelope_job(request, file_format, destination, start_time=None, lib_path=N
         properties['kafkaFieldNames'] = properties['kafkaFieldNames'].lower() # Kudu names should be all lowercase
         properties['kafkaFieldNames'] = properties['kafkaFieldNames'].lower() # Kudu names should be all lowercase
       # Create table
       # Create table
       if not request.POST.get('show_command'):
       if not request.POST.get('show_command'):
-        SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(file_format, destination).execute(request)
+        SQLIndexer(
+            user=request.user,
+            fs=request.fs
+        ).create_table_from_a_file(
+            file_format,
+            destination
+        ).execute(request)
 
 
     if destination['tableFormat'] == 'kudu':
     if destination['tableFormat'] == 'kudu':
       manager = ManagerApi()
       manager = ManagerApi()

+ 92 - 0
desktop/libs/indexer/src/indexer/indexers/flink_sql.py

@@ -0,0 +1,92 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.import logging
+
+import logging
+import sys
+import uuid
+
+from django.urls import reverse
+from django.utils.translation import ugettext as _
+
+from notebook.models import make_notebook
+from useradmin.models import User
+
+from desktop.lib import django_mako
+from desktop.lib.exceptions_renderable import PopupException
+
+if sys.version_info[0] > 2:
+  from urllib.parse import urlparse, unquote as urllib_unquote
+else:
+  from urllib import unquote as urllib_unquote
+  from urlparse import urlparse
+
+
+LOG = logging.getLogger(__name__)
+
+
+class FlinkIndexer():
+
+  def __init__(self, user, fs):
+    self.fs = fs
+    self.user = user
+
+  def create_table_from_kafka(self, source, destination, start_time=-1, dry_run=False):
+    if '.' in destination['name']:
+      database, table_name = destination['name'].split('.', 1)
+    else:
+      database = 'default'
+      table_name = destination['name']
+    final_table_name = table_name
+
+    source_type = source['sourceType']
+    editor_type = '51'  # destination['sourceType']
+
+    sql = '''CREATE TABLE %(table_name)s (
+    user_id BIGINT,
+    item_id BIGINT,
+    category_id BIGINT,
+    behavior STRING,
+    ts TIMESTAMP(3),
+    proctime AS PROCTIME(),   -- generates processing-time attribute using computed column
+    WATERMARK FOR ts AS ts - INTERVAL '5' SECOND  -- defines watermark on ts column, marks ts as event-time attribute
+) WITH (
+    'connector' = 'kafka',  -- using kafka connector
+    'topic' = 'user_behavior',  -- kafka topic
+    'scan.startup.mode' = 'earliest-offset',  -- reading from the beginning
+    'properties.bootstrap.servers' = 'kafka:9094',  -- kafka broker address
+    'format' = 'json'  -- the data format is json
+);''' % {
+          'database': database,
+          'table_name': table_name
+      }
+
+    if dry_run:
+      return sql
+    else:
+      on_success_url = reverse(
+          'metastore:describe_table', kwargs={'database': database, 'table': final_table_name}
+      ) + '?source_type=' + source_type
+
+      return make_notebook(
+          name=_('Creating table %(database)s.%(table)s') % {'database': database, 'table': final_table_name},
+          editor_type=editor_type,
+          statement=sql.strip(),
+          status='ready',
+          database=database,
+          on_success_url=on_success_url,
+          last_executed=start_time,
+          is_task=True
+      )

+ 2 - 2
desktop/libs/kafka/src/kafka/conf.py

@@ -36,7 +36,7 @@ KAFKA = ConfigSection(
   key='kafka',
   key='kafka',
   help=_t("""Configuration options for Kafka API integration"""),
   help=_t("""Configuration options for Kafka API integration"""),
   members=dict(
   members=dict(
-    IS_ENABLED = Config(
+    IS_ENABLED=Config(
       key="is_enabled",
       key="is_enabled",
       help=_t("Enable the Kafka integration."),
       help=_t("Enable the Kafka integration."),
       type=coerce_bool,
       type=coerce_bool,
@@ -50,7 +50,7 @@ KAFKA = ConfigSection(
     ),
     ),
     KSQL_API_URL=Config(
     KSQL_API_URL=Config(
       key='ksql_api_url',
       key='ksql_api_url',
-      help=_t('Base URL of Kafka Ksql API.'),
+      help=_t('Base URL of ksqlDB API.'),
       default='http://127.0.0.1:8088'),
       default='http://127.0.0.1:8088'),
   )
   )
 )
 )

+ 1 - 2
desktop/libs/kafka/src/kafka/kafka_api.py

@@ -108,8 +108,7 @@ def get_topics():
       broker_host = manager.get_kafka_brokers().split(',')[0].split(':')[0]
       broker_host = manager.get_kafka_brokers().split(',')[0].split(':')[0]
       return [name for name in list(manager.get_kafka_topics(broker_host).keys()) if not name.startswith('__')]
       return [name for name in list(manager.get_kafka_topics(broker_host).keys()) if not name.startswith('__')]
     except Exception as e:
     except Exception as e:
-      print(e)
-      return ["traffic", "hueAccessLogs"]
+      return ['user_behavior']
 
 
 
 
 def get_topic(name):
 def get_topic(name):