Bladeren bron

HUE-8665 [editor] Add fix for missing statistics.

jdesjean 7 jaren geleden
bovenliggende
commit
6d83ff8
25 gewijzigde bestanden met toevoegingen van 180 en 27 verwijderingen
  1. 22 0
      apps/impala/src/impala/api.py
  2. 2 1
      apps/impala/src/impala/urls.py
  3. 0 0
      desktop/core/src/desktop/static/desktop/css/hue-embedded.css
  4. 0 0
      desktop/core/src/desktop/static/desktop/css/hue.css
  5. 23 0
      desktop/core/src/desktop/static/desktop/js/apiHelper.js
  6. 4 0
      desktop/core/src/desktop/static/desktop/less/hue4.less
  7. 25 10
      desktop/core/src/desktop/templates/ko_components/ko_execution_analysis.mako
  8. 3 0
      desktop/libs/libanalyze/reasons/agg_performance.json
  9. 3 0
      desktop/libs/libanalyze/reasons/bytes_read_skew.json
  10. 3 0
      desktop/libs/libanalyze/reasons/join_performance.json
  11. 3 0
      desktop/libs/libanalyze/reasons/metadata_missing.json
  12. 3 0
      desktop/libs/libanalyze/reasons/remote_scan_ranges.json
  13. 3 0
      desktop/libs/libanalyze/reasons/rows_read_skew.json
  14. 3 0
      desktop/libs/libanalyze/reasons/scan_performance.json
  15. 3 0
      desktop/libs/libanalyze/reasons/scanner_parallelism.json
  16. 3 0
      desktop/libs/libanalyze/reasons/selective_scan.json
  17. 3 0
      desktop/libs/libanalyze/reasons/skew.json
  18. 3 0
      desktop/libs/libanalyze/reasons/slow_table_sink.json
  19. 3 0
      desktop/libs/libanalyze/reasons/sort_performance.json
  20. 3 0
      desktop/libs/libanalyze/reasons/spilling.json
  21. 7 0
      desktop/libs/libanalyze/reasons/stats_missing.json
  22. 3 0
      desktop/libs/libanalyze/reasons/too_many_columns.json
  23. 8 0
      desktop/libs/libanalyze/src/libanalyze/analyze.py
  24. 11 0
      desktop/libs/libanalyze/src/libanalyze/models.py
  25. 36 16
      desktop/libs/libanalyze/src/libanalyze/rules.py

+ 22 - 0
apps/impala/src/impala/api.py

@@ -41,6 +41,8 @@ from impala.server import get_api as get_impalad_api, _get_impala_server_url
 from libanalyze import analyze as analyzer
 from libanalyze import rules
 
+from notebook.models import make_notebook
+
 LOG = logging.getLogger(__name__)
 ANALYZER = rules.TopDownAnalysis() # We need to parse some files so save as global
 
@@ -154,4 +156,24 @@ def alanize(request):
         heatmap[key] = metrics
     response['data'] = { 'query': { 'healthChecks' : result[0]['result'], 'summary': summary, 'heatmap': heatmap, 'heatmapMetrics': sorted(list(heatmap.iterkeys())) } }
     response['status'] = 0
+  return JsonResponse(response)
+
+@require_POST
+@error_handler
+def alanize_fix(request):
+  response = {'status': -1}
+  fix = json.loads(request.POST.get('fix'))
+  start_time = json.loads(request.POST.get('start_time'), '-1')
+  if fix['id'] == 0:
+    notebook = make_notebook(
+      name=_('compute stats %(data)s') % fix,
+      editor_type='impala',
+      statement='compute stats %(data)s' % fix,
+      status='ready',
+      last_executed=start_time,
+      is_task=True
+    )
+    response['details'] = { 'task': notebook.execute(request, batch=True) }
+    response['status'] = 0
+
   return JsonResponse(response)

+ 2 - 1
apps/impala/src/impala/urls.py

@@ -25,7 +25,8 @@ urlpatterns = [
   url(r'^api/refresh/(?P<database>\w+)/(?P<table>\w+)$', impala_api.refresh_table, name='refresh_table'),
   url(r'^api/query/(?P<query_history_id>\d+)/exec_summary$', impala_api.get_exec_summary, name='get_exec_summary'),
   url(r'^api/query/(?P<query_history_id>\d+)/runtime_profile', impala_api.get_runtime_profile, name='get_runtime_profile'),
-  url(r'^api/query/alanize', impala_api.alanize, name='alanize'),
+  url(r'^api/query/alanize$', impala_api.alanize, name='alanize'),
+  url(r'^api/query/alanize/fix$', impala_api.alanize_fix, name='alanize_fix'),
 ]
 
 urlpatterns += beeswax_urls

File diff suppressed because it is too large
+ 0 - 0
desktop/core/src/desktop/static/desktop/css/hue-embedded.css


File diff suppressed because it is too large
+ 0 - 0
desktop/core/src/desktop/static/desktop/css/hue.css


+ 23 - 0
desktop/core/src/desktop/static/desktop/js/apiHelper.js

@@ -2083,6 +2083,29 @@ var ApiHelper = (function () {
     return promise;
   };
 
+  ApiHelper.prototype.fixQueryExecutionAnalysis = function (options)  {
+    var self = this;
+    var url = '/impala/api/query/alanize/fix';
+    var deferred = $.Deferred();
+
+    var request = self.simplePost(url, {
+      fix: JSON.stringify(options.fix),
+      start_time: options.start_time
+      }, {
+      silenceErrors: options.silenceErrors,
+      successCallback: function (response) {
+        if (response.status === 0) {
+          deferred.resolve(response.details);
+        } else {
+          deferred.reject();
+        }
+      },
+      errorCallback: deferred.reject
+    });
+
+    return new CancellablePromise(deferred, request);
+  };
+
   /**
    * @param {Object} options
    * @param {boolean} [options.silenceErrors]

+ 4 - 0
desktop/core/src/desktop/static/desktop/less/hue4.less

@@ -1026,3 +1026,7 @@ ul.risk-list {
   -webkit-animation-name: autofill;
   -webkit-animation-fill-mode: both;
 }
+
+.striked {
+  text-decoration: line-through;
+}

+ 25 - 10
desktop/core/src/desktop/templates/ko_components/ko_execution_analysis.mako

@@ -62,7 +62,7 @@ from desktop.views import _ko
               <div><span data-bind="text: contribution_factor_str"></span> - <strong><span data-bind="duration: wall_clock_time"></strong></div>
               <ol data-bind="foreach: reason">
                 <li>
-                  <span data-bind="text: message"></span><strong> - <span data-bind="numberFormat: { value: impact, unit: unit }"></span></strong>
+                  <span data-bind="text: message, css: { striked: fix.fixed }"></span><strong> - <span data-bind="numberFormat: { value: impact, unit: unit }"></span></strong><span data-bind="visible: fix.fixable && !fix.fixed"> - <a href="javascript:void(0);" data-bind="click: $parents[2].handleFix.bind($parents[2], $data.fix)">${_('Fix')}</a></span>
                 </li>
               </ol>
             </li>
@@ -81,6 +81,16 @@ from desktop.views import _ko
 
         self.loading = ko.observable(false);
         self.analysis = ko.observable();
+        self.analysis.subscribe(function (analysis) {
+          $('[href*=executionAnalysis] span:eq(1)').text(self.analysisCount());
+          setTimeout(function () { // Wait for analysis to render
+            if (analysis.heatmap) {
+              self.updateHeatMap(analysis.heatmap[analysis.heatmapMetrics[0]], analysis.heatmapMetrics[0]);
+            } else {
+              d3.select(".heatmap").remove();
+            }
+          }, 0);
+        });
         self.healthChecks = ko.pureComputed(function () {
           var analysis = self.analysis()
           if (!analysis) {
@@ -97,7 +107,6 @@ from desktop.views import _ko
           }
           return '';
         });
-
         self.lastAnalysisPromise = undefined;
 
         var clearAnalysisSub = huePubSub.subscribe('editor.clear.execution.analysis', function() {
@@ -108,8 +117,6 @@ from desktop.views import _ko
             self.lastAnalysisPromise.cancel();
           }
           self.analysis(undefined);
-          $('[href*=executionAnalysis] span:eq(1)').text(self.analysisCount());
-          d3.select(".heatmap").remove();
         });
 
         var executionAnalysisSub = huePubSub.subscribe('editor.update.execution.analysis', function (details) {
@@ -131,7 +138,7 @@ from desktop.views import _ko
       };
       ExecutionAnalysis.prototype.heatmapMetricChanged = function (model, el) {
         var self = this;
-        self.updateHeatMap(self.analysis()['heatmap'][el.target.value], el.target.value);
+        self.updateHeatMap(self.analysis().heatmap[el.target.value], el.target.value);
       };
       ExecutionAnalysis.prototype.loadAnalysis = function (compute, queryId) {
         var self = this;
@@ -141,11 +148,8 @@ from desktop.views import _ko
           compute: compute,
           queryId: queryId
         }).done(function (response) {
-          self.analysis(response.query);
-          $('[href*=executionAnalysis] span:eq(1)').text(self.analysisCount());
-          setTimeout(function () { // Wait for analysis to render
-            self.updateHeatMap(response.query['heatmap'][response.query.heatmapMetrics[0]], response.query.heatmapMetrics[0]);
-          }, 0);
+          var analysis = response.query;
+          self.analysis(analysis);
         }).always(function () {
           self.loading(false);
         });
@@ -209,6 +213,17 @@ from desktop.views import _ko
             .on("mouseout", tip.hide);
       }
 
+      ExecutionAnalysis.prototype.handleFix = function (fix) {
+        var self = this;
+        //TODO: Loading
+        ApiHelper.getInstance().fixQueryExecutionAnalysis({ fix: fix, start_time: ko.mapping.toJSON((new Date()).getTime()) })
+        .done(function(resp) {
+          huePubSub.publish('notebook.task.submitted', resp.task.history_uuid);
+          fix.fixed = true;
+          self.analysis.valueHasMutated();
+        });
+      };
+
       ExecutionAnalysis.prototype.dispose = function () {
         var self = this;
         while (self.disposals.length) {

+ 3 - 0
desktop/libs/libanalyze/reasons/agg_performance.json

@@ -8,5 +8,8 @@
     "expr": "vars['LocalTime'] - float(vars['InputRows']) / 0.01",
     "message": "Excess time (over expected time) spent in the aggregate; might be caused by complex group by",
     "label": "Slow Aggregate"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/bytes_read_skew.json

@@ -15,5 +15,8 @@
     "message": "Addition IO time cost by the skew (assuming 5 disks)",
     "label": "Bytes Read Skew",
     "prio": 2
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/join_performance.json

@@ -9,5 +9,8 @@
     "expr": "vars['ProbeTime'] - float(vars['ProbeRows']) / 0.005",
     "message": "Excess time (over expected time) spent in the hash join",
     "label": "Slow Hash Join"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/metadata_missing.json

@@ -11,5 +11,8 @@
     "expr": "vars['MetadataLoadTime']",
     "message": "The metadata was not cached and it slowed down planning time noticeably.",
     "label": "Metadata Missing"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/remote_scan_ranges.json

@@ -11,5 +11,8 @@
     "message": "Addition IO time cost by the remote read (assuming 30MB/sec remote)",
     "label": "Remote reads",
     "prio": 2
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/rows_read_skew.json

@@ -14,5 +14,8 @@
     "message": "The skew (max-avg) in rows processed",
     "label": "Rows Read Skew",
     "prio": 2
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/scan_performance.json

@@ -12,5 +12,8 @@
     "expr": "(vars['ScannerThreadsUserTime'] + vars['ScannerThreadsSysTime'] - vars['RowsRead'] * 100) / max(1,vars['AverageScannerThreadConcurrency'])",
     "message": "Predicates might be expensive (expectes speed 10m rows per sec per core)",
     "label": "Slow HDFS Scan"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/scanner_parallelism.json

@@ -12,5 +12,8 @@
     "message": "Speed can be improved by that much if there's 8 scanner threads",
     "label": "Lack of scanner thread parallelism",
     "prio": 2
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/selective_scan.json

@@ -11,5 +11,8 @@
     "expr": "float(vars['RowsRead'] - vars['RowsReturned']) / vars['LocalTime']",
     "message": "Filters are selective. Consider a more aggresive partitioning scheme",
     "label": "Insufficient Partitioning"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/skew.json

@@ -11,5 +11,8 @@
     "message": "The skew (max-avg) contributed this amount of time to this SQL operator",
     "expr": "(vars['max'] - vars['avg'])",
     "label": "TotalTime Skew"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/slow_table_sink.json

@@ -12,5 +12,8 @@
     "expr": "float(vars['LocalTime']) - 0.01 / float(vars['BytesWritten'])",
     "message": "Write speed should be at least 1MB/sec. It's slower than expected",
     "label": "Slow write speed"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/sort_performance.json

@@ -9,5 +9,8 @@
     "expr": "vars['LocalTime'] - float(vars['InputRows']) / 0.01",
     "message": "Excess time (over expected time) spent in the sort; might be caused by too many sorting column",
     "label": "Slow Sorting"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/spilling.json

@@ -7,5 +7,8 @@
     "expr": "1",
     "message": "This operation has spilled to disk. Check if the ressource configuration of Impala can be changed to allow for a higher memory limit.",
     "label": " Spilled Partitions"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 7 - 0
desktop/libs/libanalyze/reasons/stats_missing.json

@@ -4,10 +4,17 @@
   "metric_names": [
     "MissingStats"
   ],
+  "info_names": ["Table"],
   "rule": {
     "condition": "vars['MissingStats'] == 1",
     "expr": "1",
     "message": "The statistics are missing or corrupt which prevent scan and join optimizations.",
     "label": "Statistics Missing"
+  },
+  "fix": {
+    "id": 0,
+    "fixable": true,
+    "message": "Trigger compute stats for table.",
+    "data": "vars['Table']"
   }
 }

+ 3 - 0
desktop/libs/libanalyze/reasons/too_many_columns.json

@@ -12,5 +12,8 @@
     "expr": "float(vars['NumColumns'] - 15) / float(vars['NumColumns']) * vars['LocalTime']",
     "message": "Number of materialized columns is high (>15). Consider materializing less columns to improve the performance.",
     "label": "Many Materialized Columns"
+  },
+  "fix": {
+    "fixable": false
   }
 }

+ 8 - 0
desktop/libs/libanalyze/src/libanalyze/analyze.py

@@ -162,6 +162,14 @@ class Node(object):
                   'unit': ctr_map[cc].unit, 'parent': parent, 'node': node})
     return counters
 
+  def find_info_by_name(self, pattern):
+    node = self
+    ctr_map = node.val.info_strings
+    counters = []
+    if ctr_map.get(pattern):
+      counters.append({'name': pattern, 'value': ctr_map.get(pattern), 'node': node})
+    return counters
+
   # Only for fragments
   def is_averaged(self):
     return re.search(r"Averaged", self.val.name) is not None

+ 11 - 0
desktop/libs/libanalyze/src/libanalyze/models.py

@@ -36,6 +36,7 @@ class Reason(object):
     self.message = None
     self.impact = None
     self.unit = None
+    self.fix = None
     self.__dict__.update(kwargs)
 
   def to_json(self):
@@ -80,6 +81,16 @@ def query_element_by_metric(profile, node_name, metric_name):
   metric = reduce(lambda x, y: x + y.find_metric_by_name(metric_name), nodes, [])
   return map(lambda x: L(x['value'], 0, x['node'].fragment.id() if x['node'].fragment else '', x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], unit=x['unit'], fragment_id=0, fid=x['node'].fragment.id() if x['node'].fragment else '', host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']), metric)
 
+def query_element_by_info(profile, node_name, metric_name):
+  """Given the query_id, searches for the corresponding query profile and
+  selects the node instances given by node_name, selects the metric given by
+  metric_name and groups by fragment and fragment instance."""
+
+  result = profile.find_all_by_name(node_name)
+  nodes = filter(lambda x: not x.fragment or x.fragment.is_averaged() == False, result)
+  metric = reduce(lambda x, y: x + y.find_info_by_name(metric_name), nodes, [])
+  return map(lambda x: L(x['value'], 0, x['node'].fragment.id() if x['node'].fragment else '', x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], fragment_id=0, fid=x['node'].fragment.id() if x['node'].fragment else '', host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']), metric)
+
 def query_avg_fragment_metric_by_node_nid(profile, node_nid, metric_name):
   """
   Given the surragate node id (i.e. unique id of the plan node in the database),

+ 36 - 16
desktop/libs/libanalyze/src/libanalyze/rules.py

@@ -17,6 +17,7 @@
 import copy
 import glob
 import json
+import logging
 import os
 import re
 import types
@@ -31,6 +32,8 @@ from libanalyze import models
 from libanalyze import exprs
 from libanalyze import utils
 
+LOG = logging.getLogger(__name__)
+
 def to_double(metric_value):
     return struct.unpack('d', struct.pack('q', metric_value))[0]
 
@@ -115,6 +118,7 @@ class SQLOperatorReason:
         :return:
         """
         impact = -1
+        expr_data = ''
         if len(self.exprs):
             assert len(self.metric_names) == 1
 
@@ -158,11 +162,19 @@ class SQLOperatorReason:
                     if (impact is None or impact < expr_val):
                         impact = expr_val
 
+            if self.kwargs.get('info_names'):
+              db_result = [models.query_element_by_info(profile, plan_node_id, m) for m in self.kwargs['info_names']]
+              all_metrics = zip(*db_result)
+              for row in all_metrics:
+                metric_values = map(lambda x: x.value, row)
+                local_vars['vars'].update(dict(zip(self.kwargs['info_names'], metric_values)))
+                expr_data = exprs.Expr.evaluate(self.kwargs['fix']['data'], local_vars)
+
         msg = self.rule["label"] + ": " + self.rule["message"]
         return {
             "impact": impact,
             "message": msg,
-            "unit": self.kwargs.get('unit_id', -1)
+            "data": expr_data
         }
 
     def check_exprs(self, group):
@@ -229,12 +241,12 @@ class SummaryReason(SQLOperatorReason):
         msg = self.rule["label"] + ": " + self.rule["message"]
         return {
             "impact": impact,
-            "message": msg,
-            "unit": self.kwargs.get('unit_id', -1)
+            "message": msg
         }
 
 class JoinOrderStrategyCheck(SQLOperatorReason):
-    def __init__(self): pass
+    def __init__(self):
+      self.kwargs = {'fix': { 'fixable': False }, 'unit': 5}
 
     def evaluate(self, profile, plan_node_id):
         """
@@ -278,12 +290,12 @@ class JoinOrderStrategyCheck(SQLOperatorReason):
         impact = (networkcost - min(bcost, scost) - 1) / hosts / 0.01
         return {
             "impact": impact,
-            "message": "Wrong join strategy - RHS %d; LHS %d" % (rhsRows, lhsRows),
-            "unit": 5
+            "message": "Wrong join strategy - RHS %d; LHS %d" % (rhsRows, lhsRows)
         }
 
 class ExplodingJoinCheck(SQLOperatorReason):
-    def __init__(self): pass
+    def __init__(self):
+      self.kwargs = {'fix': { 'fixable': False }, 'unit': 5}
 
     def evaluate(self, profile, plan_node_id):
         """
@@ -307,12 +319,12 @@ class ExplodingJoinCheck(SQLOperatorReason):
             impact = probeTime * (rowsReturned - probeRows) / rowsReturned
         return {
             "impact": impact,
-            "message": "Exploding join: %d input rows are exploded to %d output rows" % (probeRows, rowsReturned),
-            "unit": 5
+            "message": "Exploding join: %d input rows are exploded to %d output rows" % (probeRows, rowsReturned)
         }
 
 class NNRpcCheck(SQLOperatorReason):
-    def __init__(self): pass
+    def __init__(self):
+      self.kwargs = {'fix': { 'fixable': False }, 'unit': 5}
 
     def evaluate(self, profile, plan_node_id):
         """
@@ -331,8 +343,7 @@ class NNRpcCheck(SQLOperatorReason):
         impact = max(0, (totalStorageTime - hdfsRawReadTime) / avgReadThreads)
         return {
             "impact": impact,
-            "message": "This is the time waiting for HDFS NN RPC.",
-            "unit": 5
+            "message": "This is the time waiting for HDFS NN RPC."
         }
 
 class TopDownAnalysis:
@@ -345,6 +356,7 @@ class TopDownAnalysis:
         self.sqlOperatorReasons = {}
         for r in glob.glob("{0}/*.json".format(self.base_dir)):
             with open(r, "r") as fid:
+                LOG.debug('Loading file %s' % r)
                 json_object = json.load(fid)
                 type = json_object["type"]
                 node_names = json_object["node_name"]
@@ -472,7 +484,11 @@ class TopDownAnalysis:
             if isinstance(impact, float) and (impact).is_integer():
               evaluation["impact"] = int(impact)
             if (evaluation["impact"] > 0):
-                reason = models.Reason(message=evaluation['message'], impact=evaluation['impact'], unit=evaluation['unit'])
+                fix = {}
+                fix.update(cause.kwargs['fix'])
+                if evaluation.get('data'):
+                  fix['data'] = evaluation['data']
+                reason = models.Reason(message=evaluation['message'], impact=evaluation['impact'], unit=cause.kwargs.get('unit_id', ''), fix=fix)
                 reasons.append(reason)
         return sorted(reasons, key=lambda x: x.impact, reverse=True)
 
@@ -490,7 +506,11 @@ class TopDownAnalysis:
             if isinstance(impact, float) and (impact).is_integer():
               evaluation["impact"] = int(impact)
             if (evaluation["impact"] > 0):
-                reason = models.Reason(message=evaluation['message'], impact=evaluation['impact'], unit=evaluation['unit'])
+                fix = {}
+                fix.update(cause.kwargs['fix'])
+                if evaluation.get('data'):
+                  fix['data'] = evaluation['data']
+                reason = models.Reason(message=evaluation['message'], impact=evaluation['impact'], unit=cause.kwargs.get('unit_id', ''), fix=fix)
                 reasons.append(reason)
         return sorted(reasons, key=lambda x: x.impact, reverse=True)
 
@@ -562,8 +582,8 @@ class TopDownAnalysis:
 
           missing_stats = {}
           for key in ['Tables Missing Stats', 'Tables With Corrupt Table Stats']:
-            if summary.val.info_strings[key]:
-              tables = summary.val.info_strings['Tables Missing Stats'].split(',')
+            if summary.val.info_strings.get(key):
+              tables = summary.val.info_strings.get(key).split(',')
               for table in tables:
                 missing_stats[table] = 1
 

Some files were not shown because too many files changed in this diff