Quellcode durchsuchen

[importer] Adding get_sample_data from a stream

Romain vor 5 Jahren
Ursprung
Commit
bc5bf0c7c9

+ 18 - 18
desktop/libs/indexer/src/indexer/api3.py

@@ -274,24 +274,24 @@ def guess_field_types(request):
     }
   elif file_format['inputFormat'] == 'stream':
     if file_format['streamSelection'] == 'kafka':
-      if file_format.get('kafkaSelectedTopics') == 'user_behavior':
-        kafkaFieldNames = [
-          'user_id',
-          'item_id',
-          'category_id',
-          'behavior',
-          'ts'
-        ]
-        kafkaFieldTypes = ['BIGINT'] * len(kafkaFieldNames)
-
-        kafkaFieldNames.append('proctime')
-        kafkaFieldTypes.append('TIMESTAMP')
-        kafkaFieldNames.append('WATERMARK')
-        kafkaFieldTypes.append('WATERMARK')
-      else:
-        # Note: mocked here, should come from SFDC or Kafka API or sampling job
-        kafkaFieldNames = file_format.get('kafkaFieldNames', '').split(',')
-        kafkaFieldTypes = file_format.get('kafkaFieldTypes', '').split(',')
+      # if file_format.get('kafkaSelectedTopics') == 'user_behavior':
+      #   kafkaFieldNames = [
+      #     'user_id',
+      #     'item_id',
+      #     'category_id',
+      #     'behavior',
+      #     'ts'
+      #   ]
+      #   kafkaFieldTypes = ['BIGINT'] * len(kafkaFieldNames)
+
+      #   kafkaFieldNames.append('proctime')
+      #   kafkaFieldTypes.append('TIMESTAMP')
+      #   kafkaFieldNames.append('WATERMARK')
+      #   kafkaFieldTypes.append('WATERMARK')
+      # else:
+
+      kafkaFieldNames = file_format.get('kafkaFieldNames', '').split(',')
+      kafkaFieldTypes = file_format.get('kafkaFieldTypes', '').split(',')
 
       data = """%(kafkaFieldNames)s
 %(data)s""" % {

+ 2 - 0
desktop/libs/kafka/src/kafka/kafka_api.py

@@ -107,6 +107,8 @@ def get_topics(user):
       'database': 'topics'
     }
 
+    print(_get_notebook_api(user, connector_id=56).get_sample_data(snippet={}))
+
     return [
       topic['name']
       for topic in _get_notebook_api(user, connector_id=56).autocomplete(**data)['tables_meta']

+ 44 - 0
desktop/libs/notebook/src/notebook/connectors/ksql.py

@@ -128,6 +128,50 @@ class KSqlApi(Api):
 
     return response
 
+  @query_error_handler
+  def get_sample_data(self, snippet, database=None, table=None, column=None, is_async=False, operation=None):
+    notebook = {}
+    snippet = {'statement': 'print user_behavior FROM BEGINNING limit 10'}
+    a = self.execute(notebook, snippet)
+
+    # 'result': {'has_more': False, 'data':
+    [
+      ['Key format: ¯\\_(ツ)_/¯ - no data processe'],
+      ['Value format: JSON or KAFKA_STRIN'],
+      [
+        'rowtime: 2020/10/22 05:25:10.639 Z, '
+        'key: <null>, '
+        'value: {"user_id": "952483", "item_id":"310884", "category_id": "4580532", "behavior": "pv", "ts": "2017-11-27 00:00:00"'
+      ]
+    ]
+    # 'meta': [{'name': 'Row', 'type': 'STRING', 'comment': ''}], 'type': 'table'}
+
+    print(a)
+    return a
+    # db = self._get_db()
+
+    # data, description = db.query(
+    #     snippet['statement'],
+    #     channel_name=channel_name
+    # )
+
+    # if table and operation != 'hello' and operation != 'model':
+    #   columns = assist.get_columns(database, table)
+    #   response['full_headers'] = [{
+    #       'name': col.get('name'),
+    #       'type': self._get_column_type_name(col),
+    #       'comment': ''
+    #     } for col in columns
+    #   ]
+    # elif metadata:
+    #   response['full_headers'] = [{
+    #     'name': col[0] if type(col) is dict or type(col) is tuple else col.name if hasattr(col, 'name') else col,
+    #     'type': 'STRING_TYPE',
+    #     'comment': ''
+    #   } for col in metadata
+    # ]
+
+    # return response
 
   def fetch_result(self, notebook, snippet, rows, start_over):
     """Only called at the end of a live query."""