Эх сурвалжийг харах

HUE-8330 [importer] Add skeleton support for from Kafka to Solr

Romain Rigaux 7 жил өмнө
parent
commit
98cdc84

+ 1 - 1
apps/filebrowser/src/filebrowser/templates/display.mako

@@ -78,7 +78,7 @@ ${ fb_components.menubar() }
           <!-- /ko -->
 
           <!-- ko ifnot: $root.isViewing -->
-            <li><a class="pointer" data-bind="click: $root.viewFile"><i class="fa fa-eye"></i> ${_('View file')}</a></li>
+            <li><a class="pointer" data-bind="click: $root.viewFile"><i class="fa fa-reply"></i> ${_('View file')}</a></li>
           <!-- /ko -->
 
           <!-- ko if: $root.isViewing -->

+ 24 - 21
desktop/libs/indexer/src/indexer/api3.py

@@ -224,7 +224,6 @@ def guess_field_types(request):
       ]
     }
   elif file_format['inputFormat'] == 'stream':
-    # Note: mocked here, should come from SFDC or Kafka API or sampling job
     if file_format['streamSelection'] == 'kafka':
       if file_format.get('kafkaSelectedTopics') == 'NavigatorAuditEvents':
         kafkaFieldNames = [
@@ -244,6 +243,7 @@ def guess_field_types(request):
         kafkaFieldNames.append('timeDate')
         kafkaFieldTypes.append('date')
       else:
+        # Note: mocked here, should come from SFDC or Kafka API or sampling job
         kafkaFieldNames = file_format.get('kafkaFieldNames', '').split(',')
         kafkaFieldTypes = file_format.get('kafkaFieldTypes', '').split(',')
 
@@ -295,27 +295,30 @@ def guess_field_types(request):
               for col in columns
           ]
       }
-  elif file_format['streamSelection'] == 'sfdc':
-    sf = Salesforce(
-        username=file_format['streamUsername'],
-        password=file_format['streamPassword'],
-        security_token=file_format['streamToken']
-    )
-    table_metadata = [{
-        'name': column['name'],
-        'type': column['type']
-      } for column in sf.restful('sobjects/%(streamObject)s/describe/' % file_format)['fields']
-    ]
-    query = 'SELECT %s FROM %s LIMIT 4' % (', '.join([col['name'] for col in table_metadata]), file_format['streamObject'])
-    print query
-
-    format_ = {
-      "sample": [row.values()[1:] for row in sf.query_all(query)['records']],
-      "columns": [
-          Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')).to_dict()
-          for col in table_metadata
+  elif file_format['inputFormat'] == 'connector':
+    if file_format['connectorSelection'] == 'sfdc':
+      sf = Salesforce(
+          username=file_format['streamUsername'],
+          password=file_format['streamPassword'],
+          security_token=file_format['streamToken']
+      )
+      table_metadata = [{
+          'name': column['name'],
+          'type': column['type']
+        } for column in sf.restful('sobjects/%(streamObject)s/describe/' % file_format)['fields']
       ]
-    }
+      query = 'SELECT %s FROM %s LIMIT 4' % (', '.join([col['name'] for col in table_metadata]), file_format['streamObject'])
+      print query
+  
+      format_ = {
+        "sample": [row.values()[1:] for row in sf.query_all(query)['records']],
+        "columns": [
+            Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')).to_dict()
+            for col in table_metadata
+        ]
+      }
+  else:
+    raise PopupException(_('Input format not recognized: %(inputFormat)s') % file_format)
 
   return JsonResponse(format_)
 

+ 87 - 12
desktop/libs/indexer/src/indexer/indexers/envelope.py

@@ -115,7 +115,7 @@ SPARK_KAFKA_VERSION=0.10 spark2-submit envelope.jar envelope.conf"""
               //group.id = nav-envelope
               encoding = bytearray
               parameter.auto.offset.reset = earliest
-              
+
               translator {
                 type = morphline
                 encoding.key = UTF8
@@ -165,8 +165,47 @@ SPARK_KAFKA_VERSION=0.10 spark2-submit envelope.jar envelope.conf"""
       raise PopupException(_('Input format not recognized: %(inputFormat)s') % properties)
 
 
+    extra_step = ''
+    properties['output_deriver'] = """
+        deriver {
+          type = sql
+          query.literal = \"\"\"SELECT * from inputdata\"\"\"
+        }"""
+
+    if properties['inputFormat'] == 'stream' and properties['topics'] == 'NavigatorAuditEvents': # Kudu does not support upper case names
+      properties['output_deriver'] = """
+          deriver {
+            type = sql
+            query.literal = \"\"\"
+                SELECT concat_ws('-', time,  service, user) as id,
+                -- timeDate todo
+                additionalInfo as additionalinfo, allowed,
+                collectionName as collectionname,
+                databaseName as databasename, db,
+                DELEGATION_TOKEN_ID as delegation_token_id, dst,
+                entityId as entityid, time, family, impersonator, ip, name,
+                objectType as objecttype,
+                objType as objtype,
+                objUsageType as objusagetype, op,
+                operationParams as operationparams,
+                operationText as operationtext,
+                opText as optext, path, perms, privilege, qualifier,
+                QUERY_ID as query_id,
+                resourcePath as resourcepath, service,
+                SESSION_ID as session_id,
+                solrVersion as solrversion, src, status,
+                subOperation as suboperation,
+                tableName as tablename,
+                `table` as `table`, type, url, user
+                FROM inputdata
+            \"\"\"
+          }"""
+
+
     if properties['ouputFormat'] == 'file':
       output = """
+        %(output_deriver)s
+
         planner = {
           type = overwrite
         }
@@ -177,13 +216,10 @@ SPARK_KAFKA_VERSION=0.10 spark2-submit envelope.jar envelope.conf"""
           header = true
         }""" % properties
     elif properties['ouputFormat'] == 'table':
-      if properties['inputFormat'] == 'stream' and properties['streamSelection'] == 'kafka':
+      if properties['inputFormat'] == 'stream' and properties['streamSelection'] == 'kafka': # TODO: look at table output type instead and merge
         output = """
-          deriver {
-              type = sql
-              query.literal = \"""
-                  SELECT * FROM inputdata\"""
-          }
+          %(output_deriver)s
+
           planner {
               type = upsert
           }
@@ -194,6 +230,8 @@ SPARK_KAFKA_VERSION=0.10 spark2-submit envelope.jar envelope.conf"""
           }""" % properties
       else:
         output = """
+         %(output_deriver)s
+
           planner {
               type = append
           }
@@ -202,11 +240,45 @@ SPARK_KAFKA_VERSION=0.10 spark2-submit envelope.jar envelope.conf"""
               table.name = "%(output_table)s"
           }""" % properties
     elif properties['ouputFormat'] == 'index':
-      if properties['inputFormat'] == 'stream':
-        if properties['topics'] == 'NavigatorAuditEvents':
-          output = ''
+      if True: # Workaround until envelope solr output is official
+        output = """
+            // Load events to a Solr index
+            // TODO: Move this to a SolrOutput step, when this is available
+            deriver {
+              type = morphline
+              step.name = kafkaInput
+              morphline.file = ${vars.morphline.file}
+              morphline.id = ${vars.morphline.solr.indexer}
+              field.names = ${vars.json.field.names}
+              field.types = ${vars.json.field.types}
+            }
+          """ % properties
+        extra_step = """
+          solrOutput {
+            dependencies = [outputdata]
+
+            deriver {
+              type = sql
+              query.literal = \"\"\"
+                SELECT *
+                FROM outputdata LIMIT 0
+                \"\"\"
+            }
+
+            planner = {
+              type = append
+            }
+
+            output = {
+              type = log
+              path = ${vars.hdfs.basedir}
+              format = csv
+            }
+          }""" % properties
       else:
         output = """
+          %(output_deriver)s
+
           planner {
               type = upstert
           }
@@ -217,6 +289,8 @@ SPARK_KAFKA_VERSION=0.10 spark2-submit envelope.jar envelope.conf"""
           }""" % properties
     elif properties['ouputFormat'] == 'stream':
       output = """
+        %(output_deriver)s
+
         planner {
             type = append
         }
@@ -249,15 +323,16 @@ steps {
     outputdata {
         dependencies = [inputdata]
 
-
-
         %(output)s
     }
+
+    %(extra_step)s
 }
 
 """ % {
     'input': input,
     'output': output,
+    'extra_step': extra_step,
     'app_name': properties['app_name'],
     'batch': 'batch.milliseconds = 5000' if properties['inputFormat'] == 'stream' else ''
   }

+ 1 - 1
desktop/libs/indexer/src/indexer/templates/importer.mako

@@ -394,7 +394,7 @@ ${ assist.assistPanel() }
               </label>
             </div>
 
-            <!-- ko if: createWizard.source.connectorList() == 'sfdc' -->
+            <!-- ko if: createWizard.source.connectorSelection() == 'sfdc' -->
             <div class="control-group">
               <label class="control-label"><div>${ _('Username') }</div>
                 <input type="text" class="input-xxlarge" data-bind="value: createWizard.source.streamUsername" placeholder="user@company.com">