浏览代码

[notebook] Fix series of styling issues

Romain 5 年之前
父节点
当前提交
c1c6adf4b8

+ 4 - 4
desktop/libs/indexer/src/indexer/views.py

@@ -82,10 +82,10 @@ def indexer(request):
   return render('indexer.mako', request, {
   return render('indexer.mako', request, {
       'is_embeddable': request.GET.get('is_embeddable', False),
       'is_embeddable': request.GET.get('is_embeddable', False),
       'indexes_json': json.dumps(indexes),
       'indexes_json': json.dumps(indexes),
-      'fields_json' : json.dumps([field.name for field in FIELD_TYPES]),
-      'operators_json' : json.dumps([operator.to_dict() for operator in OPERATORS]),
-      'file_types_json' : json.dumps([format_.format_info() for format_ in get_file_indexable_format_types()]),
-      'default_field_type' : json.dumps(Field().to_dict())
+      'fields_json': json.dumps([field.name for field in FIELD_TYPES]),
+      'operators_json': json.dumps([operator.to_dict() for operator in OPERATORS]),
+      'file_types_json': json.dumps([format_.format_info() for format_ in get_file_indexable_format_types()]),
+      'default_field_type': json.dumps(Field().to_dict())
   })
   })
 
 
 
 

+ 2 - 1
desktop/libs/notebook/src/notebook/connectors/ksql.py

@@ -140,7 +140,8 @@ class KSqlApi(Api):
 
 
     # 'result': {'has_more': False, 'data':
     # 'result': {'has_more': False, 'data':
     # [
     # [
-    #   ['Key format: ¯\\_(ツ)_/¯ - no data processed'], #     Key format: JSON or SESSION(KAFKA_STRING) or HOPPING(KAFKA_STRING) or TUMBLING(KAFKA_STRING) or KAFKA_STRING
+    #    Key format --> JSON or SESSION(KAFKA_STRING) or HOPPING(KAFKA_STRING) or TUMBLING(KAFKA_STRING) or KAFKA_STRING
+    #   ['Key format: ¯\\_(ツ)_/¯ - no data processed'],
     #   ['Value format: JSON or KAFKA_STRING']
     #   ['Value format: JSON or KAFKA_STRING']
     #   [
     #   [
     #     'rowtime: 2020/10/22 05:25:10.639 Z, '
     #     'rowtime: 2020/10/22 05:25:10.639 Z, '

+ 152 - 129
desktop/libs/notebook/src/notebook/models.py

@@ -71,7 +71,8 @@ def escape_rows(rows, nulls_only=False, encoding=None):
         elif field is None:
         elif field is None:
           escaped_field = 'NULL'
           escaped_field = 'NULL'
         else:
         else:
-          escaped_field = smart_unicode(field, errors='replace', encoding=encoding) # Prevent error when getting back non utf8 like charset=iso-8859-1
+          # Prevent error when getting back non utf8 like charset=iso-8859-1
+          escaped_field = smart_unicode(field, errors='replace', encoding=encoding)
           if not nulls_only:
           if not nulls_only:
             escaped_field = escape(escaped_field).replace(' ', ' ')
             escaped_field = escape(escaped_field).replace(' ', ' ')
         escaped_row.append(escaped_field)
         escaped_row.append(escaped_field)
@@ -89,7 +90,8 @@ def make_notebook(
     namespace=None, compute=None, is_presentation_mode=False):
     namespace=None, compute=None, is_presentation_mode=False):
   '''
   '''
   skip_historify: do not add the task to the query history. e.g. SQL Dashboard
   skip_historify: do not add the task to the query history. e.g. SQL Dashboard
-  is_task / isManaged: true when being a managed by Hue operation (include_managed=True in document), e.g. exporting query result, dropping some tables
+  is_task / isManaged: true when being a managed by Hue operation (include_managed=True in document),
+  e.g. exporting query result, dropping some tables
   '''
   '''
   from notebook.connectors.hiveserver2 import HS2Api
   from notebook.connectors.hiveserver2 import HS2Api
 
 
@@ -161,7 +163,7 @@ def make_notebook(
          'database': database,
          'database': database,
          'namespace': namespace if namespace else {},
          'namespace': namespace if namespace else {},
          'compute': compute if compute else {},
          'compute': compute if compute else {},
-         'result': {'handle':{}},
+         'result': {'handle': {}},
          'variables': []
          'variables': []
       }
       }
     ] if not is_notebook else []
     ] if not is_notebook else []
@@ -226,7 +228,7 @@ def make_notebook2(name='Browse', description='', is_saved=False, snippets=None)
          'properties': _snippet['properties'],
          'properties': _snippet['properties'],
          'name': name,
          'name': name,
          'database': _snippet.get('database'),
          'database': _snippet.get('database'),
-         'result': {'handle':{}},
+         'result': {'handle': {}},
          'variables': []
          'variables': []
       } for _snippet in _snippets
       } for _snippet in _snippets
     ]
     ]
@@ -352,123 +354,123 @@ def import_saved_mapreduce_job(wf):
 
 
 
 
 def import_saved_shell_job(wf):
 def import_saved_shell_job(wf):
-    snippet_properties = {}
-    node = wf.start.get_child('to')
-
-    snippet_properties['command_path'] = node.command
-
-    snippet_properties['arguments'] = []
-    snippet_properties['env_var'] = []
-    try:
-      params = json.loads(node.params)
-      if params:
-        for param in params:
-          if param['type'] == 'argument':
-            snippet_properties['arguments'].append(param['value'])
-          else:
-            snippet_properties['env_var'].append(param['value'])
-    except ValueError as e:
-      LOG.warn('Failed to parse parameters for shell job design "%s".' % wf.name)
-
-    snippet_properties['hadoopProperties'] = []
-    try:
-      properties = json.loads(node.job_properties)
-      if properties:
-        for prop in properties:
-          snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
-    except ValueError as e:
-      LOG.warn('Failed to parse job properties for shell job design "%s".' % wf.name)
-
-    snippet_properties['files'] = []
-    try:
-      files = json.loads(node.files)
-      for filepath in files:
-        snippet_properties['files'].append({'type': 'file', 'path': filepath})
-    except ValueError as e:
-      LOG.warn('Failed to parse files for shell job design "%s".' % wf.name)
-
-    snippet_properties['archives'] = []
-    try:
-      archives = json.loads(node.archives)
-      for archive in archives:
-        snippet_properties['archives'].append(archive['name'])
-    except ValueError as e:
-      LOG.warn('Failed to parse archives for shell job design "%s".' % wf.name)
-
-    snippet_properties['capture_output'] = node.capture_output
-
-    notebook = make_notebook(
-        name=wf.name,
-        description=wf.description,
-        editor_type='shell',
-        statement='',
-        status='ready',
-        snippet_properties=snippet_properties,
-        is_saved=True
-    )
+  snippet_properties = {}
+  node = wf.start.get_child('to')
+
+  snippet_properties['command_path'] = node.command
+
+  snippet_properties['arguments'] = []
+  snippet_properties['env_var'] = []
+  try:
+    params = json.loads(node.params)
+    if params:
+      for param in params:
+        if param['type'] == 'argument':
+          snippet_properties['arguments'].append(param['value'])
+        else:
+          snippet_properties['env_var'].append(param['value'])
+  except ValueError as e:
+    LOG.warn('Failed to parse parameters for shell job design "%s".' % wf.name)
+
+  snippet_properties['hadoopProperties'] = []
+  try:
+    properties = json.loads(node.job_properties)
+    if properties:
+      for prop in properties:
+        snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
+  except ValueError as e:
+    LOG.warn('Failed to parse job properties for shell job design "%s".' % wf.name)
+
+  snippet_properties['files'] = []
+  try:
+    files = json.loads(node.files)
+    for filepath in files:
+      snippet_properties['files'].append({'type': 'file', 'path': filepath})
+  except ValueError as e:
+    LOG.warn('Failed to parse files for shell job design "%s".' % wf.name)
+
+  snippet_properties['archives'] = []
+  try:
+    archives = json.loads(node.archives)
+    for archive in archives:
+      snippet_properties['archives'].append(archive['name'])
+  except ValueError as e:
+    LOG.warn('Failed to parse archives for shell job design "%s".' % wf.name)
+
+  snippet_properties['capture_output'] = node.capture_output
+
+  notebook = make_notebook(
+      name=wf.name,
+      description=wf.description,
+      editor_type='shell',
+      statement='',
+      status='ready',
+      snippet_properties=snippet_properties,
+      is_saved=True
+  )
 
 
-    # Remove functions, settings from snippet properties
-    data = notebook.get_data()
-    data['snippets'][0]['properties'].pop('functions')
-    data['snippets'][0]['properties'].pop('settings')
+  # Remove functions, settings from snippet properties
+  data = notebook.get_data()
+  data['snippets'][0]['properties'].pop('functions')
+  data['snippets'][0]['properties'].pop('settings')
 
 
-    notebook.data = json.dumps(data)
-    return notebook
+  notebook.data = json.dumps(data)
+  return notebook
 
 
 
 
 def import_saved_java_job(wf):
 def import_saved_java_job(wf):
-    snippet_properties = {}
-    node = wf.start.get_child('to')
-
-    snippet_properties['app_jar'] = node.jar_path
-    snippet_properties['class'] = node.main_class
-    snippet_properties['args'] = node.args if node.args else ''
-    snippet_properties['java_opts'] = node.java_opts if node.java_opts else ''
-
-    snippet_properties['hadoopProperties'] = []
-    try:
-      properties = json.loads(node.job_properties)
-      if properties:
-        for prop in properties:
-          snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
-    except ValueError as e:
-      LOG.warn('Failed to parse job properties for Java job design "%s".' % wf.name)
-
-    snippet_properties['files'] = []
-    try:
-      files = json.loads(node.files)
-      for filepath in files:
-        snippet_properties['files'].append({'type': 'file', 'path': filepath})
-    except ValueError as e:
-      LOG.warn('Failed to parse files for Java job design "%s".' % wf.name)
-
-    snippet_properties['archives'] = []
-    try:
-      archives = json.loads(node.archives)
-      for archive in archives:
-        snippet_properties['archives'].append(archive['name'])
-    except ValueError as e:
-      LOG.warn('Failed to parse archives for Java job design "%s".' % wf.name)
-
-    snippet_properties['capture_output'] = node.capture_output
-
-    notebook = make_notebook(
-        name=wf.name,
-        description=wf.description,
-        editor_type='java',
-        statement='',
-        status='ready',
-        snippet_properties=snippet_properties,
-        is_saved=True
-    )
+  snippet_properties = {}
+  node = wf.start.get_child('to')
 
 
-    # Remove functions, settings from snippet properties
-    data = notebook.get_data()
-    data['snippets'][0]['properties'].pop('functions')
-    data['snippets'][0]['properties'].pop('settings')
+  snippet_properties['app_jar'] = node.jar_path
+  snippet_properties['class'] = node.main_class
+  snippet_properties['args'] = node.args if node.args else ''
+  snippet_properties['java_opts'] = node.java_opts if node.java_opts else ''
+
+  snippet_properties['hadoopProperties'] = []
+  try:
+    properties = json.loads(node.job_properties)
+    if properties:
+      for prop in properties:
+        snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
+  except ValueError as e:
+    LOG.warn('Failed to parse job properties for Java job design "%s".' % wf.name)
+
+  snippet_properties['files'] = []
+  try:
+    files = json.loads(node.files)
+    for filepath in files:
+      snippet_properties['files'].append({'type': 'file', 'path': filepath})
+  except ValueError as e:
+    LOG.warn('Failed to parse files for Java job design "%s".' % wf.name)
 
 
-    notebook.data = json.dumps(data)
-    return notebook
+  snippet_properties['archives'] = []
+  try:
+    archives = json.loads(node.archives)
+    for archive in archives:
+      snippet_properties['archives'].append(archive['name'])
+  except ValueError as e:
+    LOG.warn('Failed to parse archives for Java job design "%s".' % wf.name)
+
+  snippet_properties['capture_output'] = node.capture_output
+
+  notebook = make_notebook(
+      name=wf.name,
+      description=wf.description,
+      editor_type='java',
+      statement='',
+      status='ready',
+      snippet_properties=snippet_properties,
+      is_saved=True
+  )
+
+  # Remove functions, settings from snippet properties
+  data = notebook.get_data()
+  data['snippets'][0]['properties'].pop('functions')
+  data['snippets'][0]['properties'].pop('settings')
+
+  notebook.data = json.dumps(data)
+  return notebook
 
 
 
 
 def _convert_type(btype, bdata):
 def _convert_type(btype, bdata):
@@ -548,18 +550,30 @@ class Analytics(object):
     stats.append(('Last modified', '1 day'))
     stats.append(('Last modified', '1 day'))
     stats.append(('Users', User.objects.filter(last_login__gte=one_day).count()))
     stats.append(('Users', User.objects.filter(last_login__gte=one_day).count()))
     stats.append(('Sessions', Session.objects.filter(expire_date__gte=one_day).count()))
     stats.append(('Sessions', Session.objects.filter(expire_date__gte=one_day).count()))
-    stats.append(('Executed queries', Document2.objects.filter(last_modified__gte=one_day, is_history=True, type__startswith='query-').count()))
+    stats.append(('Executed queries', Document2.objects.filter(
+        last_modified__gte=one_day, is_history=True, type__startswith='query-').count()
+      )
+    )
 
 
     stats.append(('\nLast modified', '1 week'))
     stats.append(('\nLast modified', '1 week'))
     stats.append(('Users', User.objects.filter(last_login__gte=one_week).count()))
     stats.append(('Users', User.objects.filter(last_login__gte=one_week).count()))
     stats.append(('Sessions', Session.objects.filter(expire_date__gte=one_week).count()))
     stats.append(('Sessions', Session.objects.filter(expire_date__gte=one_week).count()))
-    stats.append(('Executed queries', Document2.objects.filter(last_modified__gte=one_week, is_history=True, type__startswith='query-').count()))
-    stats.append(('Saved queries', Document2.objects.filter(last_modified__gte=one_week, is_history=False, type__startswith='query-').count()))
+    stats.append(('Executed queries', Document2.objects.filter(
+        last_modified__gte=one_week, is_history=True, type__startswith='query-').count()
+      )
+    )
+    stats.append(('Saved queries', Document2.objects.filter(
+        last_modified__gte=one_week, is_history=False, type__startswith='query-').count()
+      )
+    )
 
 
     stats.append(('\nAll', ''))
     stats.append(('\nAll', ''))
     stats.append(('Active users 30 days', User.objects.filter(last_login__gte=one_month).count()))
     stats.append(('Active users 30 days', User.objects.filter(last_login__gte=one_month).count()))
     stats.append(('Sessions 30 days', Session.objects.filter(expire_date__gte=one_month).count()))
     stats.append(('Sessions 30 days', Session.objects.filter(expire_date__gte=one_month).count()))
-    stats.append(('Executed queries 30 days', Document2.objects.filter(last_modified__gte=one_month, is_history=True, type__startswith='query-').count()))
+    stats.append(('Executed queries 30 days', Document2.objects.filter(
+        last_modified__gte=one_month, is_history=True, type__startswith='query-').count()
+      )
+    )
     stats.append(('Active users 90 days', User.objects.filter(last_login__gte=three_months).count()))
     stats.append(('Active users 90 days', User.objects.filter(last_login__gte=three_months).count()))
 
 
     return stats
     return stats
@@ -574,7 +588,7 @@ class Analytics(object):
 
 
     stats.append({
     stats.append({
       'name': 'user',
       'name': 'user',
-      'value': '%s - %s' % (user_id, user.username),'description': _('User info')
+      'value': '%s - %s' % (user_id, user.username), 'description': _('User info')
     })
     })
     query_executions = queries.filter(is_history=True, type__startswith='query-')
     query_executions = queries.filter(is_history=True, type__startswith='query-')
     stats.append({
     stats.append({
@@ -592,7 +606,10 @@ class Analytics(object):
       'value': query_executions.filter(last_modified__gte=one_month).count(),
       'value': query_executions.filter(last_modified__gte=one_month).count(),
       'description': _('Query executions 30 days total')
       'description': _('Query executions 30 days total')
     })
     })
-    last_month_daily = queries.filter(last_modified__gte=one_month).annotate(day=Trunc('last_modified', 'day')).values('day').annotate(c=Count('day')).values('day', 'c').order_by('day')
+    last_month_daily = queries.filter(
+        last_modified__gte=one_month).annotate(
+          day=Trunc('last_modified', 'day')
+        ).values('day').annotate(c=Count('day')).values('day', 'c').order_by('day')
     stats.append({
     stats.append({
       'name': 'query_executions_30_days_histogram',
       'name': 'query_executions_30_days_histogram',
       'value': last_month_daily,
       'value': last_month_daily,
@@ -623,7 +640,10 @@ class Analytics(object):
       'value': executions.exclude(owner=query.owner).count(),
       'value': executions.exclude(owner=query.owner).count(),
       'description': _('Executions by others')
       'description': _('Executions by others')
     })
     })
-    last_month_daily = executions.filter(last_modified__gte=one_month).annotate(day=Trunc('last_modified', 'day')).values('day').annotate(c=Count('day')).values('day', 'c').order_by('day')
+    last_month_daily = executions.filter(
+        last_modified__gte=one_month).annotate(
+          day=Trunc('last_modified', 'day')
+        ).values('day').annotate(c=Count('day')).values('day', 'c').order_by('day')
     stats.append({
     stats.append({
       'name': 'executions_30_days_histogram',
       'name': 'executions_30_days_histogram',
       'value': last_month_daily,
       'value': last_month_daily,
@@ -635,7 +655,7 @@ class Analytics(object):
 
 
 
 
 class MockRequest():
 class MockRequest():
-  def __init__(self, user, ):
+  def __init__(self, user):
     self.user = user
     self.user = user
     self.POST = {}
     self.POST = {}
 
 
@@ -653,18 +673,21 @@ def install_custom_examples():
     dialects = [
     dialects = [
       interpreter['dialect']
       interpreter['dialect']
       for interpreter in get_ordered_interpreters(user)
       for interpreter in get_ordered_interpreters(user)
-      if interpreter['dialect'] in ('hive', 'impala')  # Only for hive/impala currently, would also need to port to Notebook install examples.
+      # Only for hive/impala currently, would also need to port to Notebook install examples.
+      if interpreter['dialect'] in ('hive', 'impala')
     ]
     ]
 
 
     queries = EXAMPLES.QUERIES.get()
     queries = EXAMPLES.QUERIES.get()
     tables = EXAMPLES.TABLES.get()  # No-op. Only for the saved query samples, not the tables currently.
     tables = EXAMPLES.TABLES.get()  # No-op. Only for the saved query samples, not the tables currently.
 
 
-    LOG.info('Installing custom examples queries: %(queries)s, tables: %(tables)s for dialects %(dialects)s belonging to user %(user)s' % {
-      'queries': queries,
-      'tables': tables,
-      'dialects': dialects,
-      'user': user
-    })
+    LOG.info('Installing custom examples queries: %(queries)s, tables: %(tables)s for dialects %(dialects)s '
+      'belonging to user %(user)s' % {
+        'queries': queries,
+        'tables': tables,
+        'dialects': dialects,
+        'user': user
+      }
+    )
 
 
     result = []
     result = []