Przeglądaj źródła

[importer] Add stream table output format

Romain 5 lat temu
rodzic
commit
a005494424

+ 66 - 56
desktop/libs/indexer/src/indexer/api3.py

@@ -18,11 +18,11 @@
 from future import standard_library
 standard_library.install_aliases()
 
-from builtins import oct, zip
+from builtins import zip
 from past.builtins import basestring
 import json
 import logging
-import urllib.request, urllib.error
+import urllib.error
 import sys
 
 from django.urls import reverse
@@ -37,31 +37,30 @@ try:
 except ImportError:
   LOG.warn('simple_salesforce module not found')
 
-from desktop.lib import django_mako
 from desktop.lib.django_util import JsonResponse
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.i18n import smart_unicode
 from desktop.lib.python_util import check_encoding
 from desktop.models import Document2
-from kafka.kafka_api import get_topics
-from metadata.manager_client import ManagerApi
+from kafka.kafka_api import get_topics, get_topic_data
 from notebook.connectors.base import get_api, Notebook
 from notebook.decorators import api_error_handler
-from notebook.models import make_notebook, MockedDjangoRequest, escape_rows
+from notebook.models import MockedDjangoRequest, escape_rows
 
 from indexer.controller import CollectionManagerController
 from indexer.file_format import HiveFormat
 from indexer.fields import Field
-from indexer.indexers.envelope import EnvelopeIndexer, _envelope_job
+from indexer.indexers.envelope import _envelope_job
 from indexer.indexers.base import get_api
 from indexer.indexers.flink_sql import FlinkIndexer
 from indexer.indexers.morphline import MorphlineIndexer, _create_solr_collection
 from indexer.indexers.rdbms import run_sqoop, _get_api
-from indexer.indexers.sql import SQLIndexer, _create_database, _create_table
+from indexer.indexers.sql import _create_database, _create_table
 from indexer.models import _save_pipeline
 from indexer.solr_client import SolrClient, MAX_UPLOAD_SIZE
 from indexer.indexers.flume import FlumeIndexer
 
+
 if sys.version_info[0] > 2:
   from io import StringIO as string_io
   from urllib.parse import urlparse, unquote as urllib_unquote
@@ -157,21 +156,33 @@ def guess_format(request):
     else:
       raise PopupException('Hive table format %s is not supported.' % table_metadata.details['properties']['format'])
   elif file_format['inputFormat'] == 'query':
-    format_ = {"quoteChar": "\"", "recordSeparator": "\\n", "type": "csv", "hasHeader": False, "fieldSeparator": "\u0001"}
+    format_ = {
+      "quoteChar": "\"",
+      "recordSeparator": "\\n",
+      "type": "csv",
+      "hasHeader": False,
+      "fieldSeparator": "\u0001"
+    }
   elif file_format['inputFormat'] == 'rdbms':
     format_ = {"type": "csv"}
   elif file_format['inputFormat'] == 'stream':
     if file_format['streamSelection'] == 'kafka':
+      format_ = {
+        "type": "json",
+        # "fieldSeparator": ",",
+        # "hasHeader": True,
+        # "quoteChar": "\"",
+        # "recordSeparator": "\\n",
+        'topics': get_topics(request.user)
+      }
+    elif file_format['streamSelection'] == 'flume':
       format_ = {
         "type": "csv",
         "fieldSeparator": ",",
         "hasHeader": True,
         "quoteChar": "\"",
-        "recordSeparator": "\\n",
-        'topics': get_topics(request.user)
+        "recordSeparator": "\\n"
       }
-    elif file_format['streamSelection'] == 'flume':
-      format_ = {"type": "csv", "fieldSeparator": ",", "hasHeader": True, "quoteChar": "\"", "recordSeparator": "\\n"}
   elif file_format['inputFormat'] == 'connector':
     if file_format['connectorSelection'] == 'sfdc':
       sf = Salesforce(
@@ -274,52 +285,51 @@ def guess_field_types(request):
     }
   elif file_format['inputFormat'] == 'stream':
     if file_format['streamSelection'] == 'kafka':
-      # if file_format.get('kafkaSelectedTopics') == 'user_behavior':
-      #   kafkaFieldNames = [
-      #     'user_id',
-      #     'item_id',
-      #     'category_id',
-      #     'behavior',
-      #     'ts'
-      #   ]
-      #   kafkaFieldTypes = ['BIGINT'] * len(kafkaFieldNames)
-
-      #   kafkaFieldNames.append('proctime')
-      #   kafkaFieldTypes.append('TIMESTAMP')
-      #   kafkaFieldNames.append('WATERMARK')
-      #   kafkaFieldTypes.append('WATERMARK')
-      # else:
-
-      kafkaFieldNames = file_format.get('kafkaFieldNames', '').split(',')
-      kafkaFieldTypes = file_format.get('kafkaFieldTypes', '').split(',')
-
-      data = """%(kafkaFieldNames)s
-%(data)s""" % {
-        'kafkaFieldNames': ','.join(kafkaFieldNames),
-        'data': '\n'.join([','.join(['...'] * len(kafkaFieldTypes))] * 5)
-      }
-      stream = string_io()
-      stream.write(data)
+      data = get_topic_data(
+        request.user,
+        file_format.get('kafkaSelectedTopics')
+      )
 
-      _convert_format(file_format["format"], inverse=True)
+      kafkaFieldNames = [col['name'] for col in data['full_headers']]
+      kafkaFieldTypes = [col['type'] for col in data['full_headers']]
+      topics_data = data['rows']
 
-      indexer = MorphlineIndexer(request.user, request.fs)
-      format_ = indexer.guess_field_types({
-        "file": {
-            "stream": stream,
-            "name": file_format['path']
-        },
-        "format": file_format['format']
-      })
-      type_mapping = dict(
-        list(
-          zip(kafkaFieldNames, kafkaFieldTypes)
-        )
-      )
+      format_ = {
+          "sample": topics_data,
+          "columns": [
+              Field(col, 'string', unique=False).to_dict()
+              for col in kafkaFieldNames
+          ]
+      }
 
-      for col in format_['columns']:
-        col['keyType'] = type_mapping[col['name']]
-        col['type'] = type_mapping[col['name']]
+#       data = """%(kafkaFieldNames)s
+# %(data)s""" % {
+#         'kafkaFieldNames': ','.join(kafkaFieldNames),
+#         'data': '\n'.join([','.join(cols) for cols in topics_data])
+#       }
+#       stream = string_io()
+#       stream.write(data)
+
+#       _convert_format(file_format["format"], inverse=True)
+
+#       indexer = MorphlineIndexer(request.user, request.fs)
+
+#       format_ = indexer.guess_field_types({
+#         "file": {
+#             "stream": stream,
+#             "name": file_format['path']
+#         },
+#         "format": file_format['format']
+#       })
+#       type_mapping = dict(
+#         list(
+#           zip(kafkaFieldNames, kafkaFieldTypes)
+#         )
+#       )
+
+#       for col in format_['columns']:
+#         col['keyType'] = type_mapping[col['name']]
+#         col['type'] = type_mapping[col['name']]
     elif file_format['streamSelection'] == 'flume':
       if 'hue-httpd/access_log' in file_format['channelSourcePath']:
         columns = [

+ 13 - 1
desktop/libs/indexer/src/indexer/file_format.py

@@ -48,7 +48,10 @@ IMPORT_PEEK_NLINES = 20
 
 
 def get_format_types():
-  formats = [CSVFormat]
+  formats = [
+    CSVFormat,
+    JsonFormat
+  ]
 
   if ENABLE_SCALABLE_INDEXER.get():
     formats.extend([
@@ -599,6 +602,15 @@ class CSVFormat(FileFormat):
     return fields
 
 
+class JsonFormat(CSVFormat):
+  _name = "json"
+  _description = _("Json")
+  _args = [
+    CheckboxArgument("hasHeader", "Has Header")
+  ]
+  _extensions = ["json"]
+
+
 class GzipFileReader(object):
   TYPE = 'gzip'
 

+ 7 - 7
desktop/libs/indexer/src/indexer/indexers/flink_sql.py

@@ -60,14 +60,14 @@ class FlinkIndexer():
     category_id BIGINT,
     behavior STRING,
     ts TIMESTAMP(3),
-    proctime AS PROCTIME(),   -- generates processing-time attribute using computed column
-    WATERMARK FOR ts AS ts - INTERVAL '5' SECOND  -- defines watermark on ts column, marks ts as event-time attribute
+    proctime AS PROCTIME(),
+    WATERMARK FOR ts AS ts - INTERVAL '5' SECOND
 ) WITH (
-    'connector' = 'kafka',  -- using kafka connector
-    'topic' = 'user_behavior',  -- kafka topic
-    'scan.startup.mode' = 'earliest-offset',  -- reading from the beginning
-    'properties.bootstrap.servers' = 'kafka:9094',  -- kafka broker address
-    'format' = 'json'  -- the data format is json
+    'connector' = 'kafka',
+    'topic' = 'user_behavior',
+    'scan.startup.mode' = 'earliest-offset',
+    'properties.bootstrap.servers' = 'kafka:9094',
+    'format' = 'json'
 );''' % {
           'database': database,
           'table_name': table_name

+ 59 - 31
desktop/libs/indexer/src/indexer/templates/importer.mako

@@ -443,13 +443,16 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
       <!-- ko ifnot: createWizard.isGuessingFormat -->
       <h4>${_('Format')}</h4>
       <div class="card-body">
-        <label data-bind="visible: (createWizard.prefill.source_type().length == 0 || createWizard.prefill.target_type() == 'index') && (createWizard.source.inputFormat() == 'file' || createWizard.source.inputFormat() == 'stream')">
+        <label data-bind="visible: (createWizard.prefill.source_type().length == 0 || createWizard.prefill.target_type() == 'index') &&
+            (createWizard.source.inputFormat() == 'file' || createWizard.source.inputFormat() == 'stream')">
           <div>${_('File Type')}</div>
-          <select data-bind="selectize: $root.createWizard.fileTypes, value: $root.createWizard.fileTypeName, optionsText: 'description', optionsValue: 'name'"></select>
+          <select data-bind="selectize: $root.createWizard.fileTypes, value: $root.createWizard.fileTypeName,
+              optionsText: 'description', optionsValue: 'name'"></select>
         </label>
         <span class="inline-labels" data-bind="with: createWizard.source.format, visible: createWizard.source.show">
           <span data-bind="foreach: getArguments()">
-            <!-- ko template: {name: 'arg-' + $data.type, data: {description: $data.description, value: $parent[$data.name]}}--><!-- /ko -->
+            <!-- ko template: { name: 'arg-' + $data.type, data: {description: $data.description, value: $parent[$data.name]} }-->
+            <!-- /ko -->
           </span>
         </span>
       </div>
@@ -528,7 +531,7 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
             <input type="text" class="form-control name input-xxlarge" id="collectionName" data-bind="value: name, filechooser: name, filechooserOptions: { linkMarkup: true, skipInitialPathIfEmpty: true, openOnFocus: true, selectFolder: true, displayOnlyFolders: true, uploadFile: false}" placeholder="${ _('Name') }" title="${ _('Directory must not exist in the path') }">
             <!-- /ko -->
 
-            <!-- ko if: outputFormat() == 'index' -->
+            <!-- ko if: ['index', 'stream-table'].indexOf(outputFormat()) != -1 -->
             <label for="collectionName" class="control-label "><div>${ _('Name') }</div></label>
             <input type="text" class="form-control input-xlarge" id="collectionName" data-bind="value: name, valueUpdate: 'afterkeydown'" placeholder="${ _('Name') }">
             <!-- /ko -->
@@ -562,7 +565,6 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
             <!-- /ko -->
 
             <!-- ko if: outputFormat() == 'flume' -->
-
             <h4>${ _('Sink') }</h4>
             <div class="row-fluid">
               <div>
@@ -606,7 +608,7 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
       </div>
 
 
-        <!-- ko if: outputFormat() == 'table' && $root.createWizard.source.inputFormat() != 'rdbms' -->
+        <!-- ko if: ['table'].indexOf(outputFormat()) != -1 && $root.createWizard.source.inputFormat() != 'rdbms' -->
         <div class="card step">
           <h4>${_('Properties')}</h4>
           <div class="card-body">
@@ -692,7 +694,6 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
             </div>
 
             <label class="control-label"><div>${ _('Partitions') }</div>
-
               <!-- ko if: tableFormat() != 'kudu' && $root.createWizard.source.inputFormat() != 'rdbms' -->
               <div class="inline-table">
                 <div class="form-inline" data-bind="foreach: partitionColumns">
@@ -934,7 +935,7 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
         </div>
         <!-- /ko -->
 
-        <!-- ko if: ['table', 'index', 'hbase'].indexOf(outputFormat()) != -1 -->
+        <!-- ko if: ['table', 'index', 'hbase', 'stream-table'].indexOf(outputFormat()) != -1 -->
           <div class="card step">
             <h4>
               <!-- ko if: fieldEditorEnabled -->
@@ -979,17 +980,23 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
                   mode: sourceType
                 }}"></div>
               <!-- /ko -->
+
               <!-- ko ifnot: useFieldEditor -->
               <!-- ko if: $root.createWizard.source.inputFormat() === 'manual' -->
+
                 <form class="form-inline inline-table" data-bind="foreach: columns">
-                  <!-- ko if: $parent.outputFormat() == 'table' -->
-                    <a class="pointer pull-right margin-top-20" data-bind="click: function() { $parent.columns.remove($data); }"><i class="fa fa-minus"></i></a>
+                  <!-- ko if: ['table'].indexOf(outputFormat()) != -1 -->
+                    <a class="pointer pull-right margin-top-20" data-bind="click: function() { $parent.columns.remove($data); }">
+                      <i class="fa fa-minus"></i>
+                    </a>
                     <div data-bind="template: { name: 'table-field-template', data: $data }" class="margin-top-10 field inline-block"></div>
                     <div class="clearfix"></div>
                   <!-- /ko -->
 
                   <!-- ko if: $parent.outputFormat() == 'index' -->
-                    <a class="pointer pull-right margin-top-20" data-bind="click: function() { $parent.columns.remove($data); }"><i class="fa fa-minus"></i></a>
+                    <a class="pointer pull-right margin-top-20" data-bind="click: function() { $parent.columns.remove($data); }">
+                      <i class="fa fa-minus"></i>
+                    </a>
                     <div data-bind="template: { name: 'index-field-template', data: $data }, css: { 'disabled': !keep() }" class="margin-top-10 field inline-block index-field"></div>
                     <div class="clearfix"></div>
                   <!-- /ko -->
@@ -998,13 +1005,14 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
                 <div class="clearfix"></div>
 
                 <!-- ko if: outputFormat() == 'table' || outputFormat() == 'index' -->
-                  <a data-bind="click: function() { columns.push($root.loadDefaultField({})); }" class="pointer" title="${_('Add Field')}"><i class="fa fa-plus"></i> ${_('Add Field')}</a>
+                  <a data-bind="click: function() { columns.push($root.loadDefaultField({})); }" class="pointer" title="${_('Add Field')}">
+                  <i class="fa fa-plus"></i> ${_('Add Field')}</a>
                 <!-- /ko -->
               <!-- /ko -->
 
-              <!-- ko ifnot: $root.createWizard.source.inputFormat() === 'manual' -->
+              <!-- ko if: $root.createWizard.source.inputFormat() !== 'manual' -->
               <form class="form-inline inline-table" data-bind="foreachVisible: { data: columns, minHeight: 54, container: MAIN_SCROLLABLE }">
-                <!-- ko if: $parent.outputFormat() == 'table' && $root.createWizard.source.inputFormat() != 'rdbms' -->
+                <!-- ko if: ['table', 'stream-table'].indexOf($parent.outputFormat()) != -1 && $root.createWizard.source.inputFormat() != 'rdbms' -->
                   <div data-bind="template: { name: 'table-field-template', data: $data }" class="margin-top-10 field"></div>
                 <!-- /ko -->
 
@@ -1321,11 +1329,13 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
 
     <label class="control-group" data-bind="visible: createWizard.source.kafkaSelectedTopics">
       <label class="control-label"><div>${ _('Schema') }</div>
-        <label class="radio margin-right-10">
-          <input type="radio" name="kafkaSchemaManual" value="manual" data-bind="checked: createWizard.source.kafkaSchemaManual" /> ${_('Manual')}
-        </label>
         <label class="radio">
-          <input type="radio" name="kafkaSchemaManual" value="detect" data-bind="checked: createWizard.source.kafkaSchemaManual" /> ${_('Guess')}
+          <input type="radio" name="kafkaSchemaManual" value="detect" data-bind="checked: createWizard.source.kafkaSchemaManual" />
+          ${_('Guess')}
+        </label>
+        <label class="radio margin-right-10">
+          <input type="radio" name="kafkaSchemaManual" value="manual" data-bind="checked: createWizard.source.kafkaSchemaManual" />
+          ${_('Manual')}
         </label>
       </label>
 
@@ -1342,11 +1352,13 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
       </label>
       <br/>
       <label class="control-label"><div>${ _('Field names') }</div>
-        <input type="text" class="input-xxlarge" data-bind="value: createWizard.source.kafkaFieldNames" placeholder="${ _('The list of fields to consume, e.g. orders,returns') }">
+        <input type="text" class="input-xxlarge" data-bind="value: createWizard.source.kafkaFieldNames"
+          placeholder="${ _('The list of fields to consume, e.g. orders,returns') }">
       </label>
       <br/>
       <label class="control-label"><div>${ _('Field types') }</div>
-        <input type="text" class="input-xxlarge" data-bind="value: createWizard.source.kafkaFieldTypes" placeholder="${ _('The list of field typs, e.g. string,int') }">
+        <input type="text" class="input-xxlarge"
+          data-bind="value: createWizard.source.kafkaFieldTypes" placeholder="${ _('The list of field typs, e.g. string,int') }">
       </label>
       <br/>
       <label class="control-label" data-bind="visible: createWizard.source.hasStreamSelected"><div></div>
@@ -2013,7 +2025,7 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
           self.kafkaFieldTypes($.totalStorage('pai' + '_kafka_topics_' + newValue + '_kafkaFieldTypes'));
         }
       });
-      self.kafkaSchemaManual = ko.observable('manual');
+      self.kafkaSchemaManual = ko.observable('detect');
       self.kafkaFieldType = ko.observable('delimited');
       self.kafkaFieldDelimiter = ko.observable(',');
       self.kafkaFieldNames = ko.observable('');
@@ -2232,12 +2244,13 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
           {'name': '${ _("Search index") }', 'value': 'index'},
           % endif
           {'name': '${ _("Database") }', 'value': 'database'},
+          % if ENABLE_KAFKA.get():
+          {'name': '${ _("Stream Table") }', 'value': 'stream-table'},
+          {'name': '${ _("Stream Topic") }', 'value': 'stream'},
+          % endif
           % if ENABLE_SQOOP.get() or ENABLE_KAFKA.get():
           {'name': '${ _("Folder") }', 'value': 'file'},
           % endif
-          % if ENABLE_KAFKA.get():
-          {'name': '${ _("Stream") }', 'value': 'stream'},
-          % endif
           % if ENABLE_ALTUS.get():
           {'name': '${ _("Altus SDX") }', 'value': 'altus'},
           % endif
@@ -2247,22 +2260,36 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
       ]);
       self.outputFormats = ko.pureComputed(function() {
         return $.grep(self.outputFormatsList(), function(format) {
-          if (format.value === 'database' && wizard.source.inputFormat() !== 'manual' && (wizard.source.inputFormat() !== 'rdbms' || !wizard.source.rdbmsAllTablesSelected())) {
+          if (
+              format.value === 'database' &&
+              wizard.source.inputFormat() !== 'manual' &&
+              (wizard.source.inputFormat() !== 'rdbms' || !wizard.source.rdbmsAllTablesSelected())) {
             return false;
           }
-          if (format.value === 'file' && ['manual', 'rdbms', 'stream'].indexOf(wizard.source.inputFormat()) === -1) {
+          if (format.value === 'file' && (
+              wizard.source.inputFormat() === 'stream' ||
+              ['manual', 'rdbms', 'stream'].indexOf(wizard.source.inputFormat()) === -1)) {
             return false;
           }
-          if (format.value === 'index' && ['file', 'query', 'stream', 'manual'].indexOf(wizard.source.inputFormat()) === -1) {
+          if (format.value === 'index' && (
+              wizard.source.inputFormat() === 'stream' ||
+              ['file', 'query', 'stream', 'manual'].indexOf(wizard.source.inputFormat()) === -1)) {
             return false;
           }
-          if (format.value === 'table' && (wizard.source.inputFormat() === 'table' || (wizard.source.inputFormat() === 'rdbms' && wizard.source.rdbmsAllTablesSelected()))) {
+          if (format.value === 'table' &&
+              wizard.source.inputFormat() === 'stream' ||
+              (wizard.source.inputFormat() === 'table' || (
+                wizard.source.inputFormat() === 'rdbms' && wizard.source.rdbmsAllTablesSelected()))) {
             return false;
           }
           if (format.value === 'altus' && ['table'].indexOf(wizard.source.inputFormat()) === -1) {
             return false;
           }
-          if (format.value === 'stream' && ['file', 'stream'].indexOf(wizard.source.inputFormat()) === -1) {
+          if (format.value === 'stream' &&
+              (wizard.source.inputFormat() === 'stream' || ['file', 'stream'].indexOf(wizard.source.inputFormat()) === -1)) {
+            return false;
+          }
+          if (format.value === 'stream-table' && ['stream'].indexOf(wizard.source.inputFormat()) === -1) {
             return false;
           }
           if (format.value === 'hbase' && (wizard.source.inputFormat() !== 'rdbms' || wizard.source.rdbmsAllTablesSelected())) {
@@ -2277,7 +2304,8 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
         }
       });
       wizard.prefill.target_type.subscribe(function(newValue) {
-        setTimeout(function() { // target_type gets updated by the router (onePageViewModelModel.js) and delaying allow the notification to go through
+        // Target_type gets updated by the router (onePageViewModelModel.js) and delaying allow the notification to go through
+        setTimeout(function() {
           self.outputFormat(newValue || 'table');
         },0);
         if (newValue === 'database') {
@@ -2794,7 +2822,7 @@ ${ commonheader(_("Importer"), "indexer", user, request, "60px") | n,unicode }
         $(".jHueNotify").remove();
 
         self.indexingStarted(true);
-%if not is_embeddable:
+% if not is_embeddable:
         viewModel.isLoading(true);
         self.isIndexing(true);
 

+ 6 - 6
desktop/libs/indexer/src/indexer/views.py

@@ -34,6 +34,12 @@ from indexer.indexers.morphline_operations import OPERATORS
 LOG = logging.getLogger(__name__)
 
 
+HIVE_PRIMITIVE_TYPES = (
+  "string", "tinyint", "smallint", "int", "bigint", "boolean", "float", "double", "decimal", "timestamp", "date", "char", "varchar"
+)
+HIVE_TYPES = HIVE_PRIMITIVE_TYPES + ("array", "map", "struct")
+
+
 def collections(request, is_redirect=False):
   if not request.user.has_hue_permission(action="access", app='indexer'):
     raise PopupException(_('Missing permission.'), error_code=403)
@@ -83,12 +89,6 @@ def indexer(request):
   })
 
 
-HIVE_PRIMITIVE_TYPES = \
-    ("string", "tinyint", "smallint", "int", "bigint", "boolean",
-      "float", "double", "decimal", "timestamp", "date", "char", "varchar")
-HIVE_TYPES = HIVE_PRIMITIVE_TYPES + ("array", "map", "struct")
-
-
 def importer(request):
   prefill = {
     'source_type': '',

+ 8 - 2
desktop/libs/kafka/src/kafka/kafka_api.py

@@ -107,8 +107,6 @@ def get_topics(user):
       'database': 'topics'
     }
 
-    print(_get_notebook_api(user, connector_id=56).get_sample_data(snippet={}))
-
     return [
       topic['name']
       for topic in _get_notebook_api(user, connector_id=56).autocomplete(**data)['tables_meta']
@@ -116,6 +114,14 @@ def get_topics(user):
     ]
 
 
+def get_topic_data(user, name):
+  from metadata.models.bigquery_client import _get_notebook_api
+
+  data = _get_notebook_api(user, connector_id=56).get_sample_data(snippet={})
+  print(data)
+  return data
+
+
 def get_topic(name):
   if has_kafka_api():
     pass

+ 37 - 36
desktop/libs/notebook/src/notebook/connectors/ksql.py

@@ -18,6 +18,7 @@
 from __future__ import absolute_import
 
 import logging
+import json
 
 from django.utils.translation import ugettext as _
 
@@ -131,47 +132,47 @@ class KSqlApi(Api):
   @query_error_handler
   def get_sample_data(self, snippet, database=None, table=None, column=None, is_async=False, operation=None):
     notebook = {}
-    snippet = {'statement': 'print user_behavior FROM BEGINNING limit 10'}
-    a = self.execute(notebook, snippet)
 
-    # 'result': {'has_more': False, 'data':
-    [
-      ['Key format: ¯\\_(ツ)_/¯ - no data processe'],
-      ['Value format: JSON or KAFKA_STRIN'],
-      [
-        'rowtime: 2020/10/22 05:25:10.639 Z, '
-        'key: <null>, '
-        'value: {"user_id": "952483", "item_id":"310884", "category_id": "4580532", "behavior": "pv", "ts": "2017-11-27 00:00:00"'
-      ]
-    ]
-    # 'meta': [{'name': 'Row', 'type': 'STRING', 'comment': ''}], 'type': 'table'}
+    snippet = {
+      'statement': 'PRINT user_behavior FROM BEGINNING LIMIT 10'  # From beginning in case no new data is coming
+    }
+    sample = self.execute(notebook, snippet)['result']['data']
 
-    print(a)
-    return a
-    # db = self._get_db()
-
-    # data, description = db.query(
-    #     snippet['statement'],
-    #     channel_name=channel_name
-    # )
-
-    # if table and operation != 'hello' and operation != 'model':
-    #   columns = assist.get_columns(database, table)
-    #   response['full_headers'] = [{
-    #       'name': col.get('name'),
-    #       'type': self._get_column_type_name(col),
-    #       'comment': ''
-    #     } for col in columns
+    # 'result': {'has_more': False, 'data':
+    # [
+    #   ['Key format: ¯\\_(ツ)_/¯ - no data processed'], #     Key format: JSON or SESSION(KAFKA_STRING) or HOPPING(KAFKA_STRING) or TUMBLING(KAFKA_STRING) or KAFKA_STRING
+    #   ['Value format: JSON or KAFKA_STRING']
+    #   [
+    #     'rowtime: 2020/10/22 05:25:10.639 Z, '
+    #     'key: <null>, '
+    #     'value: {"user_id": "952483", "item_id":"310884", "category_id": "4580532", "behavior": "pv", "ts": "2017-11-27 00:00:00"}'
     #   ]
-    # elif metadata:
-    #   response['full_headers'] = [{
-    #     'name': col[0] if type(col) is dict or type(col) is tuple else col.name if hasattr(col, 'name') else col,
-    #     'type': 'STRING_TYPE',
-    #     'comment': ''
-    #   } for col in metadata
     # ]
+    # 'meta': [{'name': 'Row', 'type': 'STRING', 'comment': ''}], 'type': 'table'}
 
-    # return response
+    response = {
+      'status': 0,
+      'result': {}
+    }
+    print(sample[2:12])
+    print(sample[2:12][0])
+    print(sample[2:12][0][0].rsplit(', value: ', 1))
+    response['rows'] = [
+      list(json.loads(row[0].rsplit(', value: ', 1)[1]).values())
+      for row in sample[2:12]
+    ]
+
+    columns = json.loads(sample[2][0].rsplit(', value: ', 1)[1]).keys()
+
+    response['full_headers'] = [{
+        'name': col,
+        'type': 'string',
+        'comment': ''
+      } for col in columns
+    ]
+
+    print(response)
+    return response
 
   def fetch_result(self, notebook, snippet, rows, start_over):
     """Only called at the end of a live query."""