Bladeren bron

HUE-8330 [importer] Example of from Kafka to Kudu via Spark

Romain Rigaux 7 jaren geleden
bovenliggende
commit
602f50f94e

+ 89 - 0
desktop/libs/indexer/src/data/morphline/navigator_topic.morphline.conf

@@ -0,0 +1,89 @@
+SOLR_LOCATOR : {
+  # Name of solr collection
+  collection : nav-audit
+
+  # ZooKeeper ensemble
+  zkHost : "spark2-envelope515-1.gce.cloudera.com:2181/solr"
+
+  # The maximum number of documents to send to Solr per network batch (throughput knob)
+  # batchSize : 100
+}
+
+morphlines : [
+  {
+    id: nav-json-input
+    importCommands : ["org.kitesdk.**"]
+    commands: [
+      {readJson {}}
+      {extractJsonPaths {
+        flatten : false
+        paths : {
+          additionalInfo : /additionalInfo
+          allowed : /allowed
+          collectionName : /collectionName
+          databaseName : /databaseName
+          db : /db
+          DELEGATION_TOKEN_ID : /DELEGATION_TOKEN_ID
+          dst : /dst
+          entityId : /entityId
+          family : /family
+          impersonator : /impersonator
+          ip : /ip
+          name : /name
+          objectType : /objectType
+          objType : /objType
+          objUsageType : /objUsageType
+          operationParams : /operationParams
+          operationText : /operationText
+          op : /op
+          opText : /opText
+          path : /path
+          perms : /perms
+          privilege : /privilege
+          qualifier : /qualifier
+          QUERY_ID : /QUERY_ID
+          resourcePath : /resourcePath
+          service : /service
+          SESSION_ID : /SESSION_ID
+          solrVersion : /solrVersion
+          src : /src
+          status : /status
+          subOperation : /subOperation
+          tableName : /tableName
+          table : /table
+          time : /time
+          type : /type
+          url : /url
+          user : /user
+          }
+        }
+      }
+      # {logError {format: "Output within Morphline: {}", args: ["@{}"]}}
+    ]
+  },
+  {
+    id: nav-load-solr
+    importCommands : ["org.kitesdk.**", "org.apache.solr.**"]
+    commands: [
+      {addValues {
+        timeDate : "@{time}"
+      }}
+      {convertTimestamp {
+        field : timeDate
+        inputFormats : ["unixTimeInMillis"]
+        inputTimezone : UTC
+        outputFormat : "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"
+      }}
+      {setValues {
+        id : "@{time}_@{service}_@{ip}"
+      }}
+      {sanitizeUnknownSolrFields {
+        solrLocator : ${SOLR_LOCATOR}
+      }}
+      # {logError {format: "Output within Morphline: {}", args: ["@{}"]}}
+      {loadSolr {
+        solrLocator:${SOLR_LOCATOR}
+      }}
+    ]
+  }
+]

+ 51 - 45
desktop/libs/indexer/src/indexer/api3.py

@@ -355,7 +355,7 @@ def importer_submit(request):
     else:
       client = SolrClient(request.user)
       job_handle = _small_indexing(request.user, request.fs, client, source, destination, index_name)
-  elif source['inputFormat'] in ('stream', 'sfdc') or destination['ouputFormat'] == 'stream':
+  elif source['inputFormat'] in ('stream', 'connector') or destination['ouputFormat'] == 'stream':
     job_handle = _envelope_job(request, source, destination, start_time=start_time, lib_path=destination['indexerJobLibPath'])
   elif source['inputFormat'] == 'altus':
     # BDR copy or DistCP + DDL + Sentry DDL copy
@@ -531,27 +531,28 @@ def _envelope_job(request, file_format, destination, start_time=None, lib_path=N
     }
   elif file_format['inputFormat'] == 'stream' and file_format['streamSelection'] == 'flume':
     pass
-  elif file_format['inputFormat'] in ('stream', 'sfdc'):
-    if file_format['inputFormat'] == 'sfdc':
-      properties = {
-        'streamSelection': file_format['streamSelection'],
-        'streamUsername': file_format['streamUsername'],
-        'streamPassword': file_format['streamPassword'],
-        'streamToken': file_format['streamToken'],
-        'streamEndpointUrl': file_format['streamEndpointUrl'],
-        'streamObject': file_format['streamObject'],
-      }
-    elif file_format['streamSelection'] == 'kafka':
+  elif file_format['inputFormat'] == 'stream':
+    if file_format['streamSelection'] == 'kafka':
       manager = ManagerApi()
       properties = {
         "brokers": manager.get_kafka_brokers(),
         "topics": file_format['kafkaSelectedTopics'],
         "kafkaFieldType": file_format['kafkaFieldType'],
         "kafkaFieldDelimiter": file_format['kafkaFieldDelimiter'],
-        "kafkaFieldNames": file_format['kafkaFieldNames'],
-        "kafkaFieldTypes": file_format['kafkaFieldTypes']
       }
 
+      if file_format.get('kafkaSelectedTopics') == 'NavigatorAuditEvents':
+        schema_fields = MorphlineIndexer.get_kept_field_list(file_format['sampleCols'])
+        properties.update({
+          "kafkaFieldNames": ','.join([_field['name'] for _field in schema_fields]),
+          "kafkaFieldTypes": ','.join([_field['type'] for _field in schema_fields])
+        })
+      else:
+        properties.update({
+          "kafkaFieldNames": file_format['kafkaFieldNames'],
+          "kafkaFieldTypes": file_format['kafkaFieldTypes']
+        })
+
       if True:
         properties['window'] = ''
       else: # For "KafkaSQL"
@@ -560,43 +561,48 @@ def _envelope_job(request, file_format, destination, start_time=None, lib_path=N
                 enabled = true
                 milliseconds = 60000
             }'''
+  elif file_format['inputFormat'] == 'connector':
+    # sfdc
+    properties = {
+      'streamSelection': file_format['streamSelection'],
+      'streamUsername': file_format['streamUsername'],
+      'streamPassword': file_format['streamPassword'],
+      'streamToken': file_format['streamToken'],
+      'streamEndpointUrl': file_format['streamEndpointUrl'],
+      'streamObject': file_format['streamObject'],
+    }
 
-    if destination['outputFormat'] == 'table':
-      if destination['isTargetExisting']:
-        # Todo: check if format matches
-        pass
-      else:
-        sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(file_format, destination).get_str()
-        print sql
+  if destination['outputFormat'] == 'table':
+    if destination['isTargetExisting']: # Todo: check if format matches
+      pass
+    else:
+      destination['importData'] = False # Avoid LOAD DATA
       if destination['tableFormat'] == 'kudu':
-        manager = ManagerApi()
-        properties["output_table"] = "impala::%s" % collection_name
-        properties["kudu_master"] = manager.get_kudu_master()
-      else:
-        properties['output_table'] = collection_name
-    elif destination['outputFormat'] == 'file':
-      properties['path'] = file_format["path"]
-      if file_format['inputFormat'] == 'stream':
-        properties['format'] = 'csv'
-      else:
-        properties['format'] = file_format['tableFormat'] # or csv
-    elif destination['outputFormat'] == 'index':
-      properties['collectionName'] = collection_name
-      properties['connection'] = SOLR_URL.get()
-# No needed anymore
-#       if destination['isTargetExisting']:
-#         # Todo: check if format matches
-#         pass
-#       else:
-#         client = SolrClient(request.user)
-#         kwargs = {}
-#         _create_solr_collection(request.user, request.fs, client, destination, collection_name, kwargs)
-
-  if destination['outputFormat'] == 'stream':
+        properties['kafkaFieldNames'] = properties['kafkaFieldNames'].lower() # Kudu names should be all lowercase    
+      # Create table
+      SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(file_format, destination).execute(request)
+
+    if destination['tableFormat'] == 'kudu':
+      manager = ManagerApi()
+      properties["output_table"] = "impala::%s" % collection_name
+      properties["kudu_master"] = manager.get_kudu_master()
+    else:
+      properties['output_table'] = collection_name
+  elif destination['outputFormat'] == 'stream':
     manager = ManagerApi()
     properties['brokers'] = manager.get_kafka_brokers()
     properties['topics'] = file_format['kafkaSelectedTopics']
     properties['kafkaFieldDelimiter'] = file_format['kafkaFieldDelimiter']
+  elif destination['outputFormat'] == 'file':
+    properties['path'] = file_format["path"]
+    if file_format['inputFormat'] == 'stream':
+      properties['format'] = 'csv'
+    else:
+      properties['format'] = file_format['tableFormat'] # or csv
+  elif destination['outputFormat'] == 'index':
+    properties['collectionName'] = collection_name
+    properties['connection'] = SOLR_URL.get()
+
 
   properties["app_name"] = 'Data Ingest'
   properties["inputFormat"] = file_format['inputFormat']

+ 47 - 28
desktop/libs/indexer/src/indexer/indexers/envelope.py

@@ -107,33 +107,55 @@ SPARK_KAFKA_VERSION=0.10 spark2-submit envelope.jar envelope.conf"""
   def generate_config(self, properties):
     if properties['inputFormat'] == 'stream':
       if properties['streamSelection'] == 'kafka':
-        input = """type = kafka
-                brokers = "%(brokers)s"
-                topics = [%(topics)s]
-                encoding = string
-                translator {
-                    type = %(kafkaFieldType)s
-                    delimiter = "%(kafkaFieldDelimiter)s"
-                    field.names = [%(kafkaFieldNames)s]
-                    field.types = [%(kafkaFieldTypes)s]
-                }
-                %(window)s
-        """ % properties
-      elif properties['streamSelection'] == 'sfdc':
-        input = """type = sfdc
-            mode = fetch-all
-            sobject = %(streamObject)s
-            sfdc: {
-              partner: {
-                username = "%(streamUsername)s"
-                password = "%(streamPassword)s"
-                token = "%(streamToken)s"
-                auth-endpoint = "%(streamEndpointUrl)s"
+        if properties['topics'] == 'NavigatorAuditEvents':
+          input = """
+              type = kafka
+              brokers = "%(brokers)s"
+              topics = [%(topics)s]
+              //group.id = nav-envelope
+              encoding = bytearray
+              parameter.auto.offset.reset = earliest
+              
+              translator {
+                type = morphline
+                encoding.key = UTF8
+                encoding.message = UTF8
+                morphline.file = "navigator_topic.morphline.conf"
+                morphline.id = "nav-json-input"
+                field.names = [%(kafkaFieldNames)s]
+                field.types = [%(kafkaFieldTypes)s]
               }
-            }
-  """ % properties
+              %(window)s
+          """ % properties
+        else:
+          input = """type = kafka
+                  brokers = "%(brokers)s"
+                  topics = [%(topics)s]
+                  encoding = string
+                  translator {
+                      type = %(kafkaFieldType)s
+                      delimiter = "%(kafkaFieldDelimiter)s"
+                      field.names = [%(kafkaFieldNames)s]
+                      field.types = [%(kafkaFieldTypes)s]
+                  }
+                  %(window)s
+          """ % properties
       else:
         raise PopupException(_('Stream format of %(inputFormat)s not recognized: %(streamSelection)s') % properties)
+    elif properties['inputFormat'] == 'connector':
+      # sfdc
+      input = """type = sfdc
+          mode = fetch-all
+          sobject = %(streamObject)s
+          sfdc: {
+            partner: {
+              username = "%(streamUsername)s"
+              password = "%(streamPassword)s"
+              token = "%(streamToken)s"
+              auth-endpoint = "%(streamEndpointUrl)s"
+            }
+          }
+""" % properties
     elif properties['inputFormat'] == 'file':
       input = """type = filesystem
         path = %(input_path)s
@@ -227,10 +249,7 @@ steps {
     outputdata {
         dependencies = [inputdata]
 
-        deriver {
-          type = sql
-          query.literal = \"\"\"SELECT * from inputdata\"\"\"
-        }
+
 
         %(output)s
     }

+ 12 - 0
desktop/libs/indexer/src/indexer/indexers/flume.py

@@ -65,6 +65,18 @@ class FlumeIndexer(object):
       ''' % {
        'directory': source['channelSourcePath']
     }
+    elif source['channelSourceType'] == 'kafka':
+      agent_source = '''
+  tier1.sources.source1.type = org.apache.flume.source.kafka.KafkaSource
+  tier1.sources.source1.channels = channel1
+  tier1.sources.source1.batchSize = 5000
+  tier1.sources.source1.batchDurationMillis = 2000
+  tier1.sources.source1.kafka.bootstrap.servers = localhost:9092
+  tier1.sources.source1.kafka.topics = test1, test2
+  tier1.sources.source1.kafka.consumer.group.id = custom.g.id
+      ''' % {
+       'directory': source['channelSourcePath']
+    }
     else:
       raise PopupException(_('Input format not recognized: %(channelSourceType)s') % source)
 

+ 4 - 2
desktop/libs/indexer/src/indexer/indexers/morphline.py

@@ -130,6 +130,7 @@ class MorphlineIndexer(object):
     return file_format.get_fields() if file_format else {'columns': []}
 
   # Breadth first ordering of fields
+  @classmethod
   def get_field_list(self, field_data, is_converting_types=False):
     fields = []
 
@@ -148,8 +149,9 @@ class MorphlineIndexer(object):
 
     return fields
 
-  def get_kept_field_list(self, field_data):
-    return [field for field in self.get_field_list(field_data) if field['keep']]
+  @classmethod
+  def get_kept_field_list(cls, field_data):
+    return [field for field in cls.get_field_list(field_data) if field['keep']]
 
   def get_unique_field(self, format_):
     unique_fields = [column['name'] for column in format_['columns'] if column['unique']]

+ 1 - 1
desktop/libs/notebook/src/notebook/connectors/base.py

@@ -317,7 +317,7 @@ def get_api(request, snippet):
   # Multi cluster
   if has_multi_cluster():
     cluster = json.loads(request.POST.get('cluster', '""')) # Via Catalog autocomplete API or Notebook create sessions
-    if cluster == 'undefined':
+    if cluster == '""' or cluster == 'undefined':
       cluster = None
     if not cluster and snippet.get('compute'): # Via notebook.ko.js
       cluster = snippet['compute']

+ 4 - 1
desktop/libs/notebook/src/notebook/connectors/hiveserver2.py

@@ -249,7 +249,10 @@ class HS2Api(Api):
       if statement.get('statement_id') == 0:
         if query.database and not statement['statement'].lower().startswith('set'):
           db.use(query.database)
-      handle = db.client.query(query, with_multiple_session=True)
+      if True:
+        handle = db.execute_and_wait(query=query, timeout_sec=5)
+      else:
+        handle = db.client.query(query, with_multiple_session=True)
     except QueryServerException, ex:
       raise QueryError(ex.message, handle=statement)