Browse Source

HUE-8665 [editor] Add basic execution analysis for Impala

jdesjean 7 years ago
parent
commit
b3d86e3f7a
45 changed files with 2635 additions and 31 deletions
  1. 41 0
      apps/impala/src/impala/api.py
  2. 22 0
      apps/impala/src/impala/server.py
  3. 1 0
      apps/impala/src/impala/urls.py
  4. 3 0
      apps/jobbrowser/src/jobbrowser/apis/query_api.py
  5. 1 0
      desktop/Makefile
  6. 0 0
      desktop/core/src/desktop/static/desktop/ext/js/d3-tip.min.js
  7. 1 0
      desktop/core/src/desktop/static/desktop/ext/js/sprintf.min.js
  8. 2 1
      desktop/core/src/desktop/static/desktop/js/apiHelper.js
  9. 110 0
      desktop/core/src/desktop/static/desktop/js/ko.hue-bindings.js
  10. 107 15
      desktop/core/src/desktop/templates/ko_components/ko_execution_analysis.mako
  11. 34 0
      desktop/libs/libanalyze/Makefile
  12. 1 0
      desktop/libs/libanalyze/babel.cfg
  13. 1 0
      desktop/libs/libanalyze/gen-py/Metrics/__init__.py
  14. 11 0
      desktop/libs/libanalyze/gen-py/Metrics/constants.py
  15. 82 0
      desktop/libs/libanalyze/gen-py/Metrics/ttypes.py
  16. 1 0
      desktop/libs/libanalyze/gen-py/RuntimeProfile/__init__.py
  17. 11 0
      desktop/libs/libanalyze/gen-py/RuntimeProfile/constants.py
  18. 646 0
      desktop/libs/libanalyze/gen-py/RuntimeProfile/ttypes.py
  19. 1 0
      desktop/libs/libanalyze/hueversion.py
  20. 12 0
      desktop/libs/libanalyze/reasons/agg_performance.json
  21. 19 0
      desktop/libs/libanalyze/reasons/bytes_read_skew.json
  22. 13 0
      desktop/libs/libanalyze/reasons/join_performance.json
  23. 15 0
      desktop/libs/libanalyze/reasons/remote_scan_ranges.json
  24. 18 0
      desktop/libs/libanalyze/reasons/rows_read_skew.json
  25. 15 0
      desktop/libs/libanalyze/reasons/scan_performance.json
  26. 16 0
      desktop/libs/libanalyze/reasons/scanner_parallelism.json
  27. 15 0
      desktop/libs/libanalyze/reasons/selective_scan.json
  28. 15 0
      desktop/libs/libanalyze/reasons/skew.json
  29. 16 0
      desktop/libs/libanalyze/reasons/slow_table_sink.json
  30. 12 0
      desktop/libs/libanalyze/reasons/sort_performance.json
  31. 11 0
      desktop/libs/libanalyze/reasons/spilling.json
  32. 16 0
      desktop/libs/libanalyze/reasons/too_many_columns.json
  33. 29 0
      desktop/libs/libanalyze/setup.py
  34. 15 0
      desktop/libs/libanalyze/src/libanalyze/__init__.py
  35. 397 0
      desktop/libs/libanalyze/src/libanalyze/analyze.py
  36. 37 0
      desktop/libs/libanalyze/src/libanalyze/analyze_test.py
  37. 47 0
      desktop/libs/libanalyze/src/libanalyze/dot.py
  38. 45 0
      desktop/libs/libanalyze/src/libanalyze/exprs.py
  39. 46 0
      desktop/libs/libanalyze/src/libanalyze/gjson.py
  40. 134 0
      desktop/libs/libanalyze/src/libanalyze/models.py
  41. 530 0
      desktop/libs/libanalyze/src/libanalyze/rules.py
  42. 70 0
      desktop/libs/libanalyze/src/libanalyze/utils.py
  43. 0 0
      desktop/libs/libanalyze/testdata/profile.thrift
  44. 15 14
      desktop/libs/notebook/src/notebook/static/notebook/js/notebook.ko.js
  45. 1 1
      desktop/libs/notebook/src/notebook/templates/editor_components.mako

+ 41 - 0
apps/impala/src/impala/api.py

@@ -17,13 +17,16 @@
 
 ## Main views are inherited from Beeswax.
 
+import base64
 import logging
 import json
+import struct
 
 from django.utils.translation import ugettext as _
 from django.views.decorators.http import require_POST
 
 from desktop.lib.django_util import JsonResponse
+from desktop.models import Document2
 
 from beeswax.api import error_handler
 from beeswax.server.dbms import get_cluster_config
@@ -32,9 +35,14 @@ from beeswax.server import dbms as beeswax_dbms
 from beeswax.views import authorized_get_query_history
 
 from impala import dbms
+from impala.dbms import _get_server_name
+from impala.server import get_api as get_impalad_api, _get_impala_server_url
 
+from libanalyze import analyze as analyzer
+from libanalyze import rules
 
 LOG = logging.getLogger(__name__)
+ANALYZER = rules.TopDownAnalysis() # We need to parse some files so save as global
 
 @require_POST
 @error_handler
@@ -112,3 +120,36 @@ def get_runtime_profile(request, query_history_id):
     response['profile'] = profile
 
   return JsonResponse(response)
+
+@require_POST
+@error_handler
+def alanize(request):
+  response = {'status': -1}
+  cluster = json.loads(request.POST.get('cluster', '{}'))
+  query_id = json.loads(request.POST.get('query_id'))
+
+  application = _get_server_name(cluster)
+  query_server = dbms.get_query_server_config()
+  session = Session.objects.get_session(request.user, query_server['server_name'])
+  server_url = _get_impala_server_url(session)
+
+  if query_id:
+    LOG.debug("Attempting to get Impala query profile at server_url %s for query ID: %s" % (server_url, query_id))
+    doc = Document2.objects.get(id=query_id)
+    snippets = doc.data_dict.get('snippets', [])
+    secret = snippets[0]['result']['handle']['secret']
+    api = get_impalad_api(user=request.user, url=server_url)
+    impala_query_id = "%x:%x" % struct.unpack(b"QQ", base64.decodestring(secret))
+    api.kill(impala_query_id) # There are many statistics that are not present when the query is open. Close it first.
+    query_profile = api.get_query_profile_encoded(impala_query_id)
+    profile = analyzer.analyze(analyzer.parse_data(query_profile))
+    result = ANALYZER.run(profile)
+
+    heatmap = {}
+    summary = analyzer.summary(profile)
+    heatmapMetrics = ['AverageThreadTokens', 'BloomFilterBytes', 'PeakMemoryUsage', 'PerHostPeakMemUsage', 'PrepareTime', 'RowsProduced', 'TotalCpuTime', 'TotalNetworkReceiveTime', 'TotalNetworkSendTime', 'TotalStorageWaitTime', 'TotalTime']
+    for key in heatmapMetrics:
+      heatmap[key] = analyzer.heatmap_by_host(profile, key)
+    response['data'] = { 'query': { 'healthChecks' : result[0]['result'], 'summary': summary, 'heatmap': heatmap, 'heatmapMetrics': heatmapMetrics } }
+    response['status'] = 0
+  return JsonResponse(response)

+ 22 - 0
apps/impala/src/impala/server.py

@@ -281,3 +281,25 @@ class ImpalaDaemonApi(object):
         return resp
     except ValueError, e:
       raise ImpalaDaemonApiException('ImpalaDaemonApi query_finstances did not return valid JSON: %s' % e)
+
+  def get_query_summary(self, query_id):
+    params = {
+      'query_id': query_id,
+      'json': 'true'
+    }
+
+    resp = self._root.get('query_summary', params=params)
+    try:
+      if isinstance(resp, basestring):
+        return json.loads(resp)
+      else:
+        return resp
+    except ValueError, e:
+      raise ImpalaDaemonApiException('ImpalaDaemonApi query_summary did not return valid JSON: %s' % e)
+
+  def get_query_profile_encoded(self, query_id):
+    params = {
+      'query_id': query_id
+    }
+
+    return self._root.get('query_profile_encoded', params=params)

+ 1 - 0
apps/impala/src/impala/urls.py

@@ -25,6 +25,7 @@ urlpatterns = [
   url(r'^api/refresh/(?P<database>\w+)/(?P<table>\w+)$', impala_api.refresh_table, name='refresh_table'),
   url(r'^api/query/(?P<query_history_id>\d+)/exec_summary$', impala_api.get_exec_summary, name='get_exec_summary'),
   url(r'^api/query/(?P<query_history_id>\d+)/runtime_profile', impala_api.get_runtime_profile, name='get_runtime_profile'),
+  url(r'^api/query/alanize', impala_api.alanize, name='alanize'),
 ]
 
 urlpatterns += beeswax_urls

+ 3 - 0
apps/jobbrowser/src/jobbrowser/apis/query_api.py

@@ -161,6 +161,9 @@ class QueryApi(Api):
     else:
       return self._query(appid)
 
+  def profile_encoded(self, appid):
+    return self.api.get_query_profile_encoded(query_id=appid)
+
   def _memory(self, appid, app_type, app_property, app_filters):
     return self.api.get_query_memory(query_id=appid);
 

+ 1 - 0
desktop/Makefile

@@ -44,6 +44,7 @@ APPS := core \
 	libs/azure \
 	libs/hadoop \
 	libs/indexer \
+	libs/libanalyze \
 	libs/liboauth \
 	libs/liboozie \
 	libs/libopenid \

File diff suppressed because it is too large
+ 0 - 0
desktop/core/src/desktop/static/desktop/ext/js/d3-tip.min.js


File diff suppressed because it is too large
+ 1 - 0
desktop/core/src/desktop/static/desktop/ext/js/sprintf.min.js


+ 2 - 1
desktop/core/src/desktop/static/desktop/js/apiHelper.js

@@ -2041,7 +2041,8 @@ var ApiHelper = (function () {
    */
   ApiHelper.prototype.fetchQueryExecutionAnalysis = function (options)  {
     var self = this;
-    var url = '/metadata/api/workload_analytics/get_impala_query/';
+    //var url = '/metadata/api/workload_analytics/get_impala_query/';
+    var url = '/impala/api/query/alanize';
     var deferred = $.Deferred();
 
     var tries = 0;

+ 110 - 0
desktop/core/src/desktop/static/desktop/js/ko.hue-bindings.js

@@ -7500,4 +7500,114 @@
     }
   };
 
+  ko.bindingHandlers.numberFormat = (function() {
+    var that;
+    return that = {
+      init: function (element, valueAccessor) {
+        that.format(element, valueAccessor);
+      },
+      update: function (element, valueAccessor) {
+        that.format(element, valueAccessor);
+      },
+      format: function (element, valueAccessor) {
+        var value = valueAccessor();
+        var unwrapped = ko.unwrap(value);
+        var fn;
+        if (unwrapped.unit == 3) {
+          fn = ko.bindingHandlers.bytesize.humanSize
+        } else { // 5
+          fn = ko.bindingHandlers.duration.humanTime
+        }
+        var formatted = fn(unwrapped.value);
+        $(element).text(formatted);
+      }
+    }
+  })();
+
+  ko.bindingHandlers.duration = (function() {
+    var that;
+    return that = {
+      init: function (element, valueAccessor) {
+        that.format(element, valueAccessor);
+      },
+      update: function (element, valueAccessor) {
+        that.format(element, valueAccessor);
+      },
+      format: function (element, valueAccessor) {
+        var value = valueAccessor();
+        var formatted = that.humanTime(ko.unwrap(value));
+        $(element).text(formatted);
+      },
+      humanTime: function (value) {
+        value = value * 1;
+        if (value < Math.pow(10, 3)) {
+          return value + " ns";
+        } else if (value < Math.pow(10, 6)) {
+          value = (value * 1.0) / Math.pow(10, 6);
+          return sprintf("%.4f ms", value);
+        } else if (value < Math.pow(10, 9)) {
+          value = (value * 1.0) / Math.pow(10, 9);
+          return sprintf("%.4f s", value);
+        } else {
+          // get the ms value
+          var SECOND = 1000;
+          var MINUTE = SECOND * 60;
+          var HOUR = MINUTE * 60;
+          var value = value * 1 / Math.pow(10, 6);
+          var buffer = "";
+
+          if (value > (HOUR)) {
+            buffer += sprintf("%i h ", value / HOUR);
+            value = value % HOUR;
+          }
+
+          if (value > MINUTE) {
+            buffer += sprintf("%i m ", value / MINUTE);
+            value = value % MINUTE;
+          }
+
+          if (value > SECOND) {
+            buffer += sprintf("%.3f s", value * 1.0 / SECOND);
+          }
+          return buffer;
+        }
+      }
+    };
+  })();
+
+  ko.bindingHandlers.bytesize = (function() {
+    var that;
+    return that = {
+      units: ["B", "KB", "MB", "GB", "TB", "PB"],
+      init: function (element, valueAccessor) {
+        that.format(element, valueAccessor);
+      },
+      update: function (element, valueAccessor) {
+        that.format(element, valueAccessor);
+      },
+      format: function (element, valueAccessor) {
+        var value = valueAccessor();
+        var formatted = that.humanSize(ko.unwrap(value));
+        $(element).text(formatted);
+      },
+      getBaseLog: function(x, y) {
+        return Math.log(x) / Math.log(y);
+      },
+      humanSize: function(bytes) {
+        if (!bytes) {
+          return '';
+        }
+
+        // Special case small numbers (including 0), because they're exact.
+        if (bytes < 1024) {
+          return sprintf("%d B", bytes);
+        }
+
+        var index = Math.floor(that.getBaseLog(bytes, 1024));
+        index = Math.min(that.units.length - 1, index);
+        return sprintf("%.1f %s", bytes / Math.pow(1024, index), that.units[index])
+      }
+    };
+  })();
+
 })();

+ 107 - 15
desktop/core/src/desktop/templates/ko_components/ko_execution_analysis.mako

@@ -25,14 +25,8 @@ from desktop.views import _ko
 %>
 
 <%def name="executionAnalysis()">
-  <script type="text/html" id="health-check-details-content">
-    <div data-bind="text: description"></div>
-  </script>
-
-  <script type="text/html" id="health-check-details-title">
-    <span data-bind="text: name"></span>
-  </script>
-
+  <script src="${ static('desktop/ext/js/d3-tip.min.js') }"></script>
+  <script src="${ static('desktop/ext/js/sprintf.min.js') }"></script>
   <script type="text/html" id="hue-execution-analysis-template">
     <div class="hue-execution-analysis">
       <!-- ko hueSpinner: { spin: loading, inline: true } --><!-- /ko -->
@@ -46,11 +40,34 @@ from desktop.views import _ko
         <div class="no-analysis">${ _('Analysis was not possible for the executed query.') }</div>
         <!-- /ko -->
         <!-- ko with: analysis -->
-        <ul class="risk-list" data-bind="foreach: healthChecks">
-          <li data-bind="templatePopover : { placement: 'right', contentTemplate: 'health-check-details-content', titleTemplate: 'health-check-details-title', minWidth: '320px', trigger: 'hover' }">
-            <div class="risk-list-title risk-list-normal"><span data-bind="text: name"></span></div>
-          </li>
-        </ul>
+        <div>
+          <h4>${_('Heatmap')}</h4>
+          <div>
+              <select data-bind="options: heatmapMetrics, event: { change: $parent.heatmapMetricChanged.bind($parent) }"></select>
+              <svg class="heatmap"/>
+          </div>
+        </div>
+        <div>
+          <h4>${_('Summary')}</h4>
+          <ul class="risk-list" data-bind="foreach: summary" style="margin-bottom: 10px">
+            <li>
+              <span data-bind="text: key"></span>: <strong><span data-bind="numberFormat: { value: value, unit: unit }"></strong></span>
+            </li>
+          </ul>
+        </div>
+        <div>
+          <h4>${_('Top down analysis')}</h4>
+          <ul class="risk-list" data-bind="foreach: healthChecks">
+            <li>
+              <div><span data-bind="text: contribution_factor_str"></span> - <strong><span data-bind="duration: wall_clock_time"></strong></div>
+              <ul class="risk-list" data-bind="foreach: reason">
+                <li>
+                  <span data-bind="text: message"></span><strong> - <span data-bind="duration: impact"></span></strong>
+                </li>
+              </ul>
+            </li>
+          </ul>
+        </div>
         <!-- /ko -->
       <!-- /ko -->
     </div>
@@ -65,6 +82,14 @@ from desktop.views import _ko
         self.loading = ko.observable(false);
         self.analysis = ko.observable();
         self.analysisPossible = ko.observable(true);
+        self.analysisCount = ko.pureComputed(function () {
+          if (!self.analysis()) {
+            return '';
+          }
+          return '(' + self.analysis().healthChecks.reduce(function (count, check) {
+            return count + check.reason.length;
+          }, 0) + ')';
+        });
 
         self.lastAnalysisPromise = undefined;
 
@@ -76,6 +101,9 @@ from desktop.views import _ko
             self.lastAnalysisPromise.cancel();
           }
           self.analysis(undefined);
+          $('[href*=executionAnalysis] span:eq(1)').text(self.analysisCount());
+          $(".d3-tip");
+          d3.select(".heatmap").remove();
         });
 
         var executionAnalysisSub = huePubSub.subscribe('editor.update.execution.analysis', function (details) {
@@ -95,7 +123,10 @@ from desktop.views import _ko
           executionAnalysisSub.remove();
         });
       };
-
+      ExecutionAnalysis.prototype.heatmapMetricChanged = function (model, el) {
+        var self = this;
+        self.updateHeatMap(self.analysis()['heatmap'][el.target.value], el.target.value);
+      };
       ExecutionAnalysis.prototype.loadAnalysis = function (compute, queryId) {
         var self = this;
         self.loading(true);
@@ -104,12 +135,73 @@ from desktop.views import _ko
           compute: compute,
           queryId: queryId
         }).done(function (response) {
-          self.analysis(response.query)
+          self.analysis(response.query);
+          $('[href*=executionAnalysis] span:eq(1)').text(self.analysisCount());
+          setTimeout(function () { // Wait for analysis to render
+            self.updateHeatMap(response.query['heatmap'][response.query.heatmapMetrics[0]], response.query.heatmapMetrics[0]);
+          }, 0);
         }).always(function () {
           self.loading(false);
         });
       };
 
+      ExecutionAnalysis.prototype.updateHeatMap = function(data, counterName) {
+        // Heatmap block and gap sizes
+        var blockWidth = 40;
+        var blockGap = 5;
+
+        // Create tooltip
+        var d3 = window.d3v3;
+        $(".d3-tip").remove();
+        var tip = d3.d3tip()
+          .attr('class', 'd3-tip')
+          .offset([-10, 0])
+          .html(function(d) {
+            var host = d[0];
+            if (host.indexOf(":") >= 0) {
+                host = host.substring(0, host.indexOf(":"));
+            }
+            var value = d[2];
+            var formattedValue = String(value).replace(/\B(?=(\d{3})+(?!\d))/g, ",");
+            return "<strong style='color:cyan'>" + host + "</strong><br><strong>" + counterName + ":</strong> <span style='color:red'>" + formattedValue + "</span>";
+          });
+        d3.select(".heatmap").call(tip);
+
+        // Color gradient
+        var colors = ['#f6faaa', '#9E0142'];
+        var colorScale = d3.scale.linear()
+            .domain([0, 1])
+            .interpolate(d3.interpolateHsl)
+            .range(colors);
+
+        // Define map dimensions
+        var svgWidth = $(".heatmap").width();
+        var cols = Math.trunc(svgWidth / (blockWidth + blockGap));
+        $(".heatmap").height((Math.trunc((data.data.length - 1) / cols) + 1) * (blockWidth + blockGap));
+
+        // Attribute functions
+        var x = function(d, i) { return (i % cols) * (blockWidth + blockGap) + 1; };
+        var y = function(d, i) { return Math.trunc(i / cols) * (blockWidth + blockGap) + 1; };
+        var c = function(d, i) { return colorScale(d[3]); };
+
+        d3.select(".heatmap").selectAll(".box")
+            .data([])
+          .exit()
+            .remove();
+        d3.select(".heatmap").selectAll(".box")
+            .data(data.data)
+          .enter()
+            .append("rect")
+            .attr("class", "box")
+            .attr("x", x)
+            .attr("y", y)
+            .attr("height", blockWidth)
+            .attr("width", blockWidth)
+            .attr("fill", c)
+            .on("mouseover", tip.show)
+            .on("mouseout", tip.hide);
+      }
+
       ExecutionAnalysis.prototype.dispose = function () {
         var self = this;
         while (self.disposals.length) {

+ 34 - 0
desktop/libs/libanalyze/Makefile

@@ -0,0 +1,34 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+ifeq ($(ROOT),)
+  $(error "Error: Expect the environment variable $$ROOT to point to the Desktop installation")
+endif
+
+include $(ROOT)/Makefile.sdk
+
+default::
+	@echo '  env-install    : Install into virtual-env'
+
+#
+# env-install
+#   Install app into the virtual environment.
+#
+.PHONY: env-install
+env-install: compile ext-env-install
+	@echo '--- Installing $(APP_NAME) into virtual-env'
+	@$(ENV_PYTHON) setup.py develop -N -q

+ 1 - 0
desktop/libs/libanalyze/babel.cfg

@@ -0,0 +1 @@
+[python: src/libanalyze/**.py]

+ 1 - 0
desktop/libs/libanalyze/gen-py/Metrics/__init__.py

@@ -0,0 +1 @@
+__all__ = ['ttypes', 'constants']

+ 11 - 0
desktop/libs/libanalyze/gen-py/Metrics/constants.py

@@ -0,0 +1,11 @@
+#
+# Autogenerated by Thrift Compiler (0.9.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TException, TApplicationException
+from ttypes import *
+

+ 82 - 0
desktop/libs/libanalyze/gen-py/Metrics/ttypes.py

@@ -0,0 +1,82 @@
+#
+# Autogenerated by Thrift Compiler (0.9.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TException, TApplicationException
+
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol, TProtocol
+try:
+  from thrift.protocol import fastbinary
+except:
+  fastbinary = None
+
+
+class TUnit:
+  UNIT = 0
+  UNIT_PER_SECOND = 1
+  CPU_TICKS = 2
+  BYTES = 3
+  BYTES_PER_SECOND = 4
+  TIME_NS = 5
+  DOUBLE_VALUE = 6
+  NONE = 7
+  TIME_MS = 8
+  TIME_S = 9
+
+  _VALUES_TO_NAMES = {
+    0: "UNIT",
+    1: "UNIT_PER_SECOND",
+    2: "CPU_TICKS",
+    3: "BYTES",
+    4: "BYTES_PER_SECOND",
+    5: "TIME_NS",
+    6: "DOUBLE_VALUE",
+    7: "NONE",
+    8: "TIME_MS",
+    9: "TIME_S",
+  }
+
+  _NAMES_TO_VALUES = {
+    "UNIT": 0,
+    "UNIT_PER_SECOND": 1,
+    "CPU_TICKS": 2,
+    "BYTES": 3,
+    "BYTES_PER_SECOND": 4,
+    "TIME_NS": 5,
+    "DOUBLE_VALUE": 6,
+    "NONE": 7,
+    "TIME_MS": 8,
+    "TIME_S": 9,
+  }
+
+class TMetricKind:
+  GAUGE = 0
+  COUNTER = 1
+  PROPERTY = 2
+  STATS = 3
+  SET = 4
+  HISTOGRAM = 5
+
+  _VALUES_TO_NAMES = {
+    0: "GAUGE",
+    1: "COUNTER",
+    2: "PROPERTY",
+    3: "STATS",
+    4: "SET",
+    5: "HISTOGRAM",
+  }
+
+  _NAMES_TO_VALUES = {
+    "GAUGE": 0,
+    "COUNTER": 1,
+    "PROPERTY": 2,
+    "STATS": 3,
+    "SET": 4,
+    "HISTOGRAM": 5,
+  }
+

+ 1 - 0
desktop/libs/libanalyze/gen-py/RuntimeProfile/__init__.py

@@ -0,0 +1 @@
+__all__ = ['ttypes', 'constants']

+ 11 - 0
desktop/libs/libanalyze/gen-py/RuntimeProfile/constants.py

@@ -0,0 +1,11 @@
+#
+# Autogenerated by Thrift Compiler (0.9.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TException, TApplicationException
+from ttypes import *
+

+ 646 - 0
desktop/libs/libanalyze/gen-py/RuntimeProfile/ttypes.py

@@ -0,0 +1,646 @@
+#
+# Autogenerated by Thrift Compiler (0.9.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TException, TApplicationException
+import Metrics.ttypes
+
+
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol, TProtocol
+try:
+  from thrift.protocol import fastbinary
+except:
+  fastbinary = None
+
+
+
+class TCounter:
+  """
+  Attributes:
+   - name
+   - unit
+   - value
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'name', None, None, ), # 1
+    (2, TType.I32, 'unit', None, None, ), # 2
+    (3, TType.I64, 'value', None, None, ), # 3
+  )
+
+  def __init__(self, name=None, unit=None, value=None,):
+    self.name = name
+    self.unit = unit
+    self.value = value
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.unit = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I64:
+          self.value = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('TCounter')
+    if self.name is not None:
+      oprot.writeFieldBegin('name', TType.STRING, 1)
+      oprot.writeString(self.name)
+      oprot.writeFieldEnd()
+    if self.unit is not None:
+      oprot.writeFieldBegin('unit', TType.I32, 2)
+      oprot.writeI32(self.unit)
+      oprot.writeFieldEnd()
+    if self.value is not None:
+      oprot.writeFieldBegin('value', TType.I64, 3)
+      oprot.writeI64(self.value)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.name is None:
+      raise TProtocol.TProtocolException(message='Required field name is unset!')
+    if self.unit is None:
+      raise TProtocol.TProtocolException(message='Required field unit is unset!')
+    if self.value is None:
+      raise TProtocol.TProtocolException(message='Required field value is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class TEventSequence:
+  """
+  Attributes:
+   - name
+   - timestamps
+   - labels
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'name', None, None, ), # 1
+    (2, TType.LIST, 'timestamps', (TType.I64,None), None, ), # 2
+    (3, TType.LIST, 'labels', (TType.STRING,None), None, ), # 3
+  )
+
+  def __init__(self, name=None, timestamps=None, labels=None,):
+    self.name = name
+    self.timestamps = timestamps
+    self.labels = labels
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.LIST:
+          self.timestamps = []
+          (_etype3, _size0) = iprot.readListBegin()
+          for _i4 in xrange(_size0):
+            _elem5 = iprot.readI64();
+            self.timestamps.append(_elem5)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.LIST:
+          self.labels = []
+          (_etype9, _size6) = iprot.readListBegin()
+          for _i10 in xrange(_size6):
+            _elem11 = iprot.readString();
+            self.labels.append(_elem11)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('TEventSequence')
+    if self.name is not None:
+      oprot.writeFieldBegin('name', TType.STRING, 1)
+      oprot.writeString(self.name)
+      oprot.writeFieldEnd()
+    if self.timestamps is not None:
+      oprot.writeFieldBegin('timestamps', TType.LIST, 2)
+      oprot.writeListBegin(TType.I64, len(self.timestamps))
+      for iter12 in self.timestamps:
+        oprot.writeI64(iter12)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.labels is not None:
+      oprot.writeFieldBegin('labels', TType.LIST, 3)
+      oprot.writeListBegin(TType.STRING, len(self.labels))
+      for iter13 in self.labels:
+        oprot.writeString(iter13)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.name is None:
+      raise TProtocol.TProtocolException(message='Required field name is unset!')
+    if self.timestamps is None:
+      raise TProtocol.TProtocolException(message='Required field timestamps is unset!')
+    if self.labels is None:
+      raise TProtocol.TProtocolException(message='Required field labels is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class TTimeSeriesCounter:
+  """
+  Attributes:
+   - name
+   - unit
+   - period_ms
+   - values
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'name', None, None, ), # 1
+    (2, TType.I32, 'unit', None, None, ), # 2
+    (3, TType.I32, 'period_ms', None, None, ), # 3
+    (4, TType.LIST, 'values', (TType.I64,None), None, ), # 4
+  )
+
+  def __init__(self, name=None, unit=None, period_ms=None, values=None,):
+    self.name = name
+    self.unit = unit
+    self.period_ms = period_ms
+    self.values = values
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.unit = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I32:
+          self.period_ms = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.LIST:
+          self.values = []
+          (_etype17, _size14) = iprot.readListBegin()
+          for _i18 in xrange(_size14):
+            _elem19 = iprot.readI64();
+            self.values.append(_elem19)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('TTimeSeriesCounter')
+    if self.name is not None:
+      oprot.writeFieldBegin('name', TType.STRING, 1)
+      oprot.writeString(self.name)
+      oprot.writeFieldEnd()
+    if self.unit is not None:
+      oprot.writeFieldBegin('unit', TType.I32, 2)
+      oprot.writeI32(self.unit)
+      oprot.writeFieldEnd()
+    if self.period_ms is not None:
+      oprot.writeFieldBegin('period_ms', TType.I32, 3)
+      oprot.writeI32(self.period_ms)
+      oprot.writeFieldEnd()
+    if self.values is not None:
+      oprot.writeFieldBegin('values', TType.LIST, 4)
+      oprot.writeListBegin(TType.I64, len(self.values))
+      for iter20 in self.values:
+        oprot.writeI64(iter20)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.name is None:
+      raise TProtocol.TProtocolException(message='Required field name is unset!')
+    if self.unit is None:
+      raise TProtocol.TProtocolException(message='Required field unit is unset!')
+    if self.period_ms is None:
+      raise TProtocol.TProtocolException(message='Required field period_ms is unset!')
+    if self.values is None:
+      raise TProtocol.TProtocolException(message='Required field values is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class TRuntimeProfileNode:
+  """
+  Attributes:
+   - name
+   - num_children
+   - counters
+   - metadata
+   - indent
+   - info_strings
+   - info_strings_display_order
+   - child_counters_map
+   - event_sequences
+   - time_series_counters
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'name', None, None, ), # 1
+    (2, TType.I32, 'num_children', None, None, ), # 2
+    (3, TType.LIST, 'counters', (TType.STRUCT,(TCounter, TCounter.thrift_spec)), None, ), # 3
+    (4, TType.I64, 'metadata', None, None, ), # 4
+    (5, TType.BOOL, 'indent', None, None, ), # 5
+    (6, TType.MAP, 'info_strings', (TType.STRING,None,TType.STRING,None), None, ), # 6
+    (7, TType.LIST, 'info_strings_display_order', (TType.STRING,None), None, ), # 7
+    (8, TType.MAP, 'child_counters_map', (TType.STRING,None,TType.SET,(TType.STRING,None)), None, ), # 8
+    (9, TType.LIST, 'event_sequences', (TType.STRUCT,(TEventSequence, TEventSequence.thrift_spec)), None, ), # 9
+    (10, TType.LIST, 'time_series_counters', (TType.STRUCT,(TTimeSeriesCounter, TTimeSeriesCounter.thrift_spec)), None, ), # 10
+  )
+
+  def __init__(self, name=None, num_children=None, counters=None, metadata=None, indent=None, info_strings=None, info_strings_display_order=None, child_counters_map=None, event_sequences=None, time_series_counters=None,):
+    self.name = name
+    self.num_children = num_children
+    self.counters = counters
+    self.metadata = metadata
+    self.indent = indent
+    self.info_strings = info_strings
+    self.info_strings_display_order = info_strings_display_order
+    self.child_counters_map = child_counters_map
+    self.event_sequences = event_sequences
+    self.time_series_counters = time_series_counters
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.num_children = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.LIST:
+          self.counters = []
+          (_etype24, _size21) = iprot.readListBegin()
+          for _i25 in xrange(_size21):
+            _elem26 = TCounter()
+            _elem26.read(iprot)
+            self.counters.append(_elem26)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.I64:
+          self.metadata = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.BOOL:
+          self.indent = iprot.readBool();
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.MAP:
+          self.info_strings = {}
+          (_ktype28, _vtype29, _size27 ) = iprot.readMapBegin()
+          for _i31 in xrange(_size27):
+            _key32 = iprot.readString();
+            _val33 = iprot.readString();
+            self.info_strings[_key32] = _val33
+          iprot.readMapEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.LIST:
+          self.info_strings_display_order = []
+          (_etype37, _size34) = iprot.readListBegin()
+          for _i38 in xrange(_size34):
+            _elem39 = iprot.readString();
+            self.info_strings_display_order.append(_elem39)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 8:
+        if ftype == TType.MAP:
+          self.child_counters_map = {}
+          (_ktype41, _vtype42, _size40 ) = iprot.readMapBegin()
+          for _i44 in xrange(_size40):
+            _key45 = iprot.readString();
+            _val46 = set()
+            (_etype50, _size47) = iprot.readSetBegin()
+            for _i51 in xrange(_size47):
+              _elem52 = iprot.readString();
+              _val46.add(_elem52)
+            iprot.readSetEnd()
+            self.child_counters_map[_key45] = _val46
+          iprot.readMapEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 9:
+        if ftype == TType.LIST:
+          self.event_sequences = []
+          (_etype56, _size53) = iprot.readListBegin()
+          for _i57 in xrange(_size53):
+            _elem58 = TEventSequence()
+            _elem58.read(iprot)
+            self.event_sequences.append(_elem58)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 10:
+        if ftype == TType.LIST:
+          self.time_series_counters = []
+          (_etype62, _size59) = iprot.readListBegin()
+          for _i63 in xrange(_size59):
+            _elem64 = TTimeSeriesCounter()
+            _elem64.read(iprot)
+            self.time_series_counters.append(_elem64)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('TRuntimeProfileNode')
+    if self.name is not None:
+      oprot.writeFieldBegin('name', TType.STRING, 1)
+      oprot.writeString(self.name)
+      oprot.writeFieldEnd()
+    if self.num_children is not None:
+      oprot.writeFieldBegin('num_children', TType.I32, 2)
+      oprot.writeI32(self.num_children)
+      oprot.writeFieldEnd()
+    if self.counters is not None:
+      oprot.writeFieldBegin('counters', TType.LIST, 3)
+      oprot.writeListBegin(TType.STRUCT, len(self.counters))
+      for iter65 in self.counters:
+        iter65.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.metadata is not None:
+      oprot.writeFieldBegin('metadata', TType.I64, 4)
+      oprot.writeI64(self.metadata)
+      oprot.writeFieldEnd()
+    if self.indent is not None:
+      oprot.writeFieldBegin('indent', TType.BOOL, 5)
+      oprot.writeBool(self.indent)
+      oprot.writeFieldEnd()
+    if self.info_strings is not None:
+      oprot.writeFieldBegin('info_strings', TType.MAP, 6)
+      oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.info_strings))
+      for kiter66,viter67 in self.info_strings.items():
+        oprot.writeString(kiter66)
+        oprot.writeString(viter67)
+      oprot.writeMapEnd()
+      oprot.writeFieldEnd()
+    if self.info_strings_display_order is not None:
+      oprot.writeFieldBegin('info_strings_display_order', TType.LIST, 7)
+      oprot.writeListBegin(TType.STRING, len(self.info_strings_display_order))
+      for iter68 in self.info_strings_display_order:
+        oprot.writeString(iter68)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.child_counters_map is not None:
+      oprot.writeFieldBegin('child_counters_map', TType.MAP, 8)
+      oprot.writeMapBegin(TType.STRING, TType.SET, len(self.child_counters_map))
+      for kiter69,viter70 in self.child_counters_map.items():
+        oprot.writeString(kiter69)
+        oprot.writeSetBegin(TType.STRING, len(viter70))
+        for iter71 in viter70:
+          oprot.writeString(iter71)
+        oprot.writeSetEnd()
+      oprot.writeMapEnd()
+      oprot.writeFieldEnd()
+    if self.event_sequences is not None:
+      oprot.writeFieldBegin('event_sequences', TType.LIST, 9)
+      oprot.writeListBegin(TType.STRUCT, len(self.event_sequences))
+      for iter72 in self.event_sequences:
+        iter72.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.time_series_counters is not None:
+      oprot.writeFieldBegin('time_series_counters', TType.LIST, 10)
+      oprot.writeListBegin(TType.STRUCT, len(self.time_series_counters))
+      for iter73 in self.time_series_counters:
+        iter73.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.name is None:
+      raise TProtocol.TProtocolException(message='Required field name is unset!')
+    if self.num_children is None:
+      raise TProtocol.TProtocolException(message='Required field num_children is unset!')
+    if self.counters is None:
+      raise TProtocol.TProtocolException(message='Required field counters is unset!')
+    if self.metadata is None:
+      raise TProtocol.TProtocolException(message='Required field metadata is unset!')
+    if self.indent is None:
+      raise TProtocol.TProtocolException(message='Required field indent is unset!')
+    if self.info_strings is None:
+      raise TProtocol.TProtocolException(message='Required field info_strings is unset!')
+    if self.info_strings_display_order is None:
+      raise TProtocol.TProtocolException(message='Required field info_strings_display_order is unset!')
+    if self.child_counters_map is None:
+      raise TProtocol.TProtocolException(message='Required field child_counters_map is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class TRuntimeProfileTree:
+  """
+  Attributes:
+   - nodes
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'nodes', (TType.STRUCT,(TRuntimeProfileNode, TRuntimeProfileNode.thrift_spec)), None, ), # 1
+  )
+
+  def __init__(self, nodes=None,):
+    self.nodes = nodes
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.nodes = []
+          (_etype77, _size74) = iprot.readListBegin()
+          for _i78 in xrange(_size74):
+            _elem79 = TRuntimeProfileNode()
+            _elem79.read(iprot)
+            self.nodes.append(_elem79)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('TRuntimeProfileTree')
+    if self.nodes is not None:
+      oprot.writeFieldBegin('nodes', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.nodes))
+      for iter80 in self.nodes:
+        iter80.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.nodes is None:
+      raise TProtocol.TProtocolException(message='Required field nodes is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)

+ 1 - 0
desktop/libs/libanalyze/hueversion.py

@@ -0,0 +1 @@
+../../../VERSION

+ 12 - 0
desktop/libs/libanalyze/reasons/agg_performance.json

@@ -0,0 +1,12 @@
+{
+  "type": "SQLOperator",
+  "node_name": "AGGREGATION_NODE",
+  "metric_names": [
+    "LocalTime"
+  ],
+  "rule": {
+    "expr": "vars['LocalTime'] - float(vars['InputRows']) / 0.01",
+    "message": "Excess time (over expected time) spent in the aggregate; might be caused by complex group by",
+    "label": "Slow Aggregate"
+  }
+}

+ 19 - 0
desktop/libs/libanalyze/reasons/bytes_read_skew.json

@@ -0,0 +1,19 @@
+{
+  "type": "SQLOperator",
+  "node_name": "HDFS_SCAN_NODE",
+  "metric_names": [
+    "BytesRead"
+  ],
+  "exprs": [
+    "max",
+    "avg"
+  ],
+  "unit_id": 0,
+  "rule": {
+    "condition": "vars['IOBound']==True",
+    "expr": "(vars['max'] - vars['avg']) / 100000000 / 5",
+    "message": "Addition IO time cost by the skew (assuming 5 disks)",
+    "label": "Bytes Read Skew",
+    "prio": 2
+  }
+}

+ 13 - 0
desktop/libs/libanalyze/reasons/join_performance.json

@@ -0,0 +1,13 @@
+{
+  "type": "SQLOperator",
+  "node_name": "HASH_JOIN_NODE",
+  "metric_names": [
+    "ProbeRows",
+    "ProbeTime"
+  ],
+  "rule": {
+    "expr": "vars['ProbeTime'] - float(vars['ProbeRows']) / 0.005",
+    "message": "Excess time (over expected time) spent in the hash join",
+    "label": "Slow Hash Join"
+  }
+}

+ 15 - 0
desktop/libs/libanalyze/reasons/remote_scan_ranges.json

@@ -0,0 +1,15 @@
+{
+  "type": "SQLOperator",
+  "node_name": "HDFS_SCAN_NODE",
+  "metric_names": [
+    "BytesReadRemoteUnexpected"
+  ],
+  "unit_id": 0,
+  "rule": {
+    "condition": "vars['IOBound']==True",
+    "expr": "vars['BytesReadRemoteUnexpected'] * (1/30 - 1/100) * 1/1024/1024",
+    "message": "Addition IO time cost by the remote read (assuming 30MB/sec remote)",
+    "label": "Remote reads",
+    "prio": 2
+  }
+}

+ 18 - 0
desktop/libs/libanalyze/reasons/rows_read_skew.json

@@ -0,0 +1,18 @@
+{
+  "type": "SQLOperator",
+  "node_name": "HDFS_SCAN_NODE",
+  "metric_names": [
+    "RowsRead"
+  ],
+  "exprs": [
+    "max",
+    "avg"
+  ],
+  "unit_id": 0,
+  "rule": {
+    "expr": "(vars['max'] - vars['avg']) / 40.0 * 1000",
+    "message": "The skew (max-avg) in rows processed",
+    "label": "Rows Read Skew",
+    "prio": 2
+  }
+}

+ 15 - 0
desktop/libs/libanalyze/reasons/scan_performance.json

@@ -0,0 +1,15 @@
+{
+  "type": "SQLOperator",
+  "node_name": "HDFS_SCAN_NODE",
+  "metric_names": [
+    "RowsRead",
+    "ScannerThreadsUserTime",
+    "ScannerThreadsSysTime",
+    "AverageScannerThreadConcurrency"
+  ],
+  "rule": {
+    "expr": "(vars['ScannerThreadsUserTime'] + vars['ScannerThreadsSysTime'] - vars['RowsRead'] * 100) / max(1,vars['AverageScannerThreadConcurrency'])",
+    "message": "Predicates might be expensive (expectes speed 10m rows per sec per core)",
+    "label": "Slow HDFS Scan"
+  }
+}

+ 16 - 0
desktop/libs/libanalyze/reasons/scanner_parallelism.json

@@ -0,0 +1,16 @@
+{
+  "type": "SQLOperator",
+  "node_name": "HDFS_SCAN_NODE",
+  "metric_names": [
+    "AverageScannerThreadConcurrency",
+    "LocalTime"
+  ],
+  "unit_id": 0,
+  "rule": {
+    "condition": "vars['IOBound']==False",
+    "expr": "(8 - vars['AverageScannerThreadConcurrency']) / 8 * vars['LocalTime']",
+    "message": "Speed can be improved by that much if there's 8 scanner threads",
+    "label": "Lack of scanner thread parallelism",
+    "prio": 2
+  }
+}

+ 15 - 0
desktop/libs/libanalyze/reasons/selective_scan.json

@@ -0,0 +1,15 @@
+{
+  "type": "SQLOperator",
+  "node_name": "HDFS_SCAN_NODE",
+  "metric_names": [
+    "RowsRead",
+    "RowsReturned",
+    "LocalTime"
+  ],
+  "rule": {
+    "condition": "vars['RowsRead']>8000000",
+    "expr": "float(vars['RowsRead'] - vars['RowsReturned']) / vars['LocalTime']",
+    "message": "Filters are selective. Consider a more aggresive partitioning scheme",
+    "label": "Insufficient Partitioning"
+  }
+}

+ 15 - 0
desktop/libs/libanalyze/reasons/skew.json

@@ -0,0 +1,15 @@
+{
+  "type": "SQLOperator",
+  "node_name": "ANY",
+  "metric_names": "LocalTime",
+  "exprs": [
+    "max",
+    "avg"
+  ],
+  "unit_id": 5,
+  "rule": {
+    "message": "The skew (max-avg) contributed this amount of time to this SQL operator",
+    "expr": "(vars['max'] - vars['avg'])",
+    "label": "TotalTime Skew"
+  }
+}

+ 16 - 0
desktop/libs/libanalyze/reasons/slow_table_sink.json

@@ -0,0 +1,16 @@
+{
+  "type": "SQLOperator",
+  "node_name": "HdfsTableSink",
+  "collapse": true,
+  "metric_names": [
+    "BytesWritten",
+    "LocalTime"
+  ],
+  "unit_id": 5,
+  "rule": {
+    "condition": "vars['BytesWritten'] > 0",
+    "expr": "float(vars['LocalTime']) - 0.01 / float(vars['BytesWritten'])",
+    "message": "Write speed should be at least 1MB/sec. It's slower than expected",
+    "label": "Slow write speed"
+  }
+}

+ 12 - 0
desktop/libs/libanalyze/reasons/sort_performance.json

@@ -0,0 +1,12 @@
+{
+  "type": "SQLOperator",
+  "node_name": "SORT_NODE",
+  "metric_names": [
+    "LocalTime"
+  ],
+  "rule": {
+    "expr": "vars['LocalTime'] - float(vars['InputRows']) / 0.01",
+    "message": "Excess time (over expected time) spent in the sort; might be caused by too many sorting column",
+    "label": "Slow Sorting"
+  }
+}

+ 11 - 0
desktop/libs/libanalyze/reasons/spilling.json

@@ -0,0 +1,11 @@
+{
+  "type": "SQLOperator",
+  "node_name": ["HashJoinNode", "AGGREGATION_NODE"],
+  "metric_names": "SpilledPartitions",
+  "rule": {
+    "condition": "vars['SpilledPartitions'] > 0",
+    "expr": "1",
+    "message": "This operation has spilled to disk. Check if the ressource configuration of Impala can be changed to allow for a higher memory limit.",
+    "label": " Spilled Partitions"
+  }
+}

+ 16 - 0
desktop/libs/libanalyze/reasons/too_many_columns.json

@@ -0,0 +1,16 @@
+{
+  "type": "SQLOperator",
+  "node_name": "HDFS_SCAN_NODE",
+  "collapse": true,
+  "metric_names": [
+    "NumColumns",
+    "LocalTime"
+  ],
+  "unit_id": 5,
+  "rule": {
+    "condition": "vars['NumColumns'] > 15",
+    "expr": "float(vars['NumColumns'] - 15) / float(vars['NumColumns']) * vars['LocalTime']",
+    "message": "Number of materialized columns is high (>15). Consider materializing less columns to improve the performance.",
+    "label": "Many Materialized Columns"
+  }
+}

+ 29 - 0
desktop/libs/libanalyze/setup.py

@@ -0,0 +1,29 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from setuptools import setup, find_packages
+from hueversion import VERSION
+
+setup(
+  name='libanalyze',
+  version=VERSION,
+  url='http://github.com/cloudera/hue',
+  description='Analyze Execution Profile for Impala',
+  packages=find_packages('src'),
+  package_dir={'': 'src'},
+  install_requires=['setuptools', 'desktop'],
+  entry_points={'desktop.sdk.lib': 'libanalyze=libanalyze'}
+)

+ 15 - 0
desktop/libs/libanalyze/src/libanalyze/__init__.py

@@ -0,0 +1,15 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+ 397 - 0
desktop/libs/libanalyze/src/libanalyze/analyze.py

@@ -0,0 +1,397 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import base64
+import json
+import os
+import re
+import sys
+sys.path.append(os.path.join(os.path.dirname(__file__), "../..", 'gen-py'))
+
+from RuntimeProfile.ttypes import *
+from thrift.protocol import TCompactProtocol
+from thrift.transport import TTransport
+
+from libanalyze import dot
+from libanalyze import gjson as jj
+from libanalyze import models
+
+
+class Node(object):
+  """Simple Node"""
+
+  def __init__(self, val):
+    super(Node, self).__init__()
+    self.val = val
+    self.children = []
+    self.fragment = None
+    self.fragment_instance = None
+    self.pos = 0
+
+  def add_child(self, c):
+    self.children.append(c)
+
+  def find_by_name(self, pattern):
+    """Returns the first node whose name matches 'name'."""
+    if re.search(pattern, self.val.name) is not None:
+      return self
+
+    for x in self.children:
+      tmp = x.find_by_name(pattern)
+      if tmp:
+        return tmp
+
+  def find_all_by_name(self, pattern):
+    result = []
+    if re.search(pattern, self.val.name):
+      result.append(self)
+    for x in self.children:
+      result += x.find_all_by_name(pattern)
+    return result
+
+  def find_all_non_fragment_nodes(self):
+    result = []
+    if not self.is_fragment() and not self.is_fragment_instance():
+      result.append(self)
+    for x in self.children:
+      result += x.find_all_non_fragment_nodes()
+    return result
+
+  def is_fragment(self):
+    return re.search(r'(.*?Fragment) (F\d+)', self.val.name) is not None
+
+  def is_fragment_instance(self):
+    return re.search(r'Instance\s(.*?)\s\(host=(.*?)\)', self.val.name) is not None
+
+  def is_regular(self):
+    id = self.id()
+    matches = id and re.search(r'[a-zA-Z]+', id)
+    return id and matches is None
+
+  def name(self):
+    matches = re.search(r'(.*?)(\s+\(((dst_)?id)=(\d+)\))?$', self.val.name)
+    if matches and matches.group(5):
+      return matches.group(1)
+    elif self.is_fragment():
+      return re.search(r'(.*?Fragment) (F\d+)', self.val.name).group(1)
+    else:
+      return self.val.name
+
+  def id(self):
+    matches = re.search(r'(.*?)(\s+\(((dst_)?id)=(\d+)\))?$', self.val.name)
+    if matches and matches.group(5) and not matches.group(4):
+      return matches.group(5)
+    elif self.is_fragment():
+      return re.search(r'(.*?Fragment) (F\d+)', self.val.name).group(2)
+    elif self.is_fragment_instance():
+      return re.search(r'Instance\s(.*?)\s\(host=(.*?)\)', self.val.name).group(1)
+    elif self.fragment:
+      return self.fragment.id() + ' ' + str(self.pos)
+
+  def is_plan_node(self):
+    matches = re.search('(.*?)(\s+\(((dst_)?id)=(\d+)\))?$', self.val.name)
+    return matches and not matches.group(4) and matches.group(5)
+
+  def find_by_id(self, pattern):
+    results = []
+    if self.id() == pattern:
+      results.append(self)
+
+    for x in self.children:
+      tmp = x.find_by_id(pattern)
+      results += tmp
+
+    return results
+
+  def find_all_fragments(self):
+    results = []
+    if self.is_fragment_instance():
+      results.append(self)
+
+    for x in self.children:
+      tmp = x.find_all_fragments()
+      results += tmp
+
+    return results
+
+  def foreach_lambda(self, method, fragment=None, fragment_instance=None, pos=0):
+    self.fragment = fragment
+    self.fragment_instance = fragment_instance
+    self.pos = pos
+    if self.is_fragment():
+      fragment = self
+    elif self.is_fragment_instance():
+      fragment_instance = self
+
+    for idx, x in enumerate(self.children):
+      x.foreach_lambda(method, fragment=fragment, fragment_instance=fragment_instance, pos=idx)
+
+    method(self) # Post execution, because some results need child to have processed
+
+  def find_metric_by_name(self, pattern):
+    node = self
+    ctr_map = node.counter_map()
+    counters = []
+    for k in ctr_map:
+      if pattern == k:
+        counters.append({'name': ctr_map[k].name, 'value': ctr_map[k].value,
+                  'unit': ctr_map[k].unit, 'node': node})
+
+    for k in node.child_counters_map():
+      v = node.child_counters_map()[k]
+      if pattern == k:
+        parent = None
+        if k in ctr_map:
+          parent = {'name': ctr_map[k].name, 'value': ctr_map[k].value,
+                  'unit': ctr_map[k].unit}
+        for cc in v:
+          counters.append({'name': ctr_map[cc].name, 'value': ctr_map[cc].value,
+                  'unit': ctr_map[cc].unit, 'parent': parent, 'node': node})
+    return counters
+
+  # Only for fragments
+  def is_averaged(self):
+    return re.search(r"Averaged", self.val.name) is not None
+
+  # Only for fragments
+  def is_coordinator(self):
+    return re.match(r'Coordinator', self.val.name) is not None
+
+  # Only for fragments
+  def host(self):
+    if self.fragment_instance:
+      c = self.fragment_instance
+    elif self.fragment:
+      c = self.fragment.children[0]
+    else:
+      return None
+    m = re.search(r'Instance\s(.*?)\s\(host=(.*?)\)', c.val.name)
+    if m:
+        #frag.instance_id = m.group(1)
+        #frag.host = m.group(2)
+        #frag_node = c
+        return m.group(2)
+
+  def info_strings(self):
+    return self.val.info_strings
+
+  def info_string_order(self):
+    return self.val.info_strings_display_order
+
+  def child_counters_map(self):
+    return self.val.child_counters_map
+
+  def counter_map(self):
+    ctr = {}
+    if self.val.counters:
+        for c in self.val.counters:
+            ctr[c.name] = c
+    return ctr
+
+  def repr(self, indent):
+    buffer = indent + self.val.name + "\n"
+    if self.val.info_strings:
+      for k in self.val.info_strings:
+        buffer += 2 * indent + " - " + k + "=" + \
+          self.val.info_strings[k][:40] + "\n"
+
+    if self.val.event_sequences:
+      for s in self.val.event_sequences:
+        buffer += 2 * indent + "- " + s.name + \
+          " S[" + ", ".join(s.labels) + "]" + "\n"
+
+    ctr = {}
+    if self.val.counters:
+      for c in self.val.counters:
+        buffer += 2 * indent + c.name + ":" + str(c) + "\n"
+        ctr[c.name] = c
+    return buffer
+
+
+def decode_thrift(val):
+  """Deserialize a binary string into the TRuntimeProfileTree structure"""
+  transport = TTransport.TMemoryBuffer(val)
+  protocol = TCompactProtocol.TCompactProtocol(transport)
+  rp = TRuntimeProfileTree()
+  rp.read(protocol)
+  return rp
+
+
+def decompress(val):
+  return val.decode("zlib")
+
+
+def summary(profile):
+  summary = profile.find_by_name('Summary')
+  execution_profile = profile.find_by_name('Execution Profile')
+  counter_map = summary.counter_map()
+  counter_map_execution_profile = execution_profile.counter_map()
+  host_list = models.host_by_metric(profile, 'PeakMemoryUsage', exprs=[max])
+  host_list = sorted(host_list, key=lambda x: x[1], reverse=True)
+  peak_memory = models.TCounter(value=host_list[0][1], unit=3) if host_list else models.TCounter(value=0, unit=3) # The value is not always present
+  return [{ 'key': 'PlanningTime', 'value': counter_map['PlanningTime'].value, 'unit': counter_map['PlanningTime'].unit }, {'key': 'RemoteFragmentsStarted', 'value': counter_map['RemoteFragmentsStarted'].value, 'unit': counter_map['RemoteFragmentsStarted'].unit}, {'key': 'TotalTime', 'value': counter_map_execution_profile['TotalTime'].value, 'unit': counter_map_execution_profile['TotalTime'].unit}, {'key': 'PeakMemoryUsage', 'value': peak_memory.value, 'unit': peak_memory.unit}]
+
+def heatmap_by_host(profile, counter_name):
+  rows = models.host_by_metric(profile,
+                               counter_name,
+                               exprs=[max, sum])
+  # Modify the data to contain the relative data as well
+  sum_sum = 0
+  max_max = 0
+  if (rows):
+    sum_sum = float(sum([v[2] for v in rows]))
+    max_max = float(max([v[1] for v in rows]))
+
+  result = []
+  for r in rows:
+    result.append([r[0], float(r[1]), float(r[2]),
+        float(r[1]) / float(max_max) if float(max_max) != 0 else 0,
+        float(r[2]) / float(sum_sum) if float(sum_sum) != 0 else 0])
+  return { 'data': result, 'max': max_max }
+
+def parse(file_name):
+  """Given a file_name, open the file and decode the first line of the file
+  into the TRuntimeProfileTree structure."""
+  with open(file_name) as fid:
+    for line in fid:
+      val = base64.decodestring(line.strip())
+      try:
+          val = decompress(val.strip())
+      except:
+          pass
+      return decode_thrift(val)
+
+def parse_data(data):
+  val = base64.decodestring(data)
+  try:
+      val = decompress(val.strip())
+  except:
+      pass
+  return decode_thrift(val)
+
+def pre_order_traversal(nodes, index, level=0):
+  # print index, nodes[index].num_children, nodes[index].name, level
+  node = Node(nodes[index])
+  pos = index
+  for x in range(node.val.num_children):
+      child_node, pos = pre_order_traversal(nodes, pos + 1, level + 1)
+      node.add_child(child_node)
+  return node, pos
+
+def analyze(profile):
+  """The runtime profile tree is pre-order flattened"""
+  node, _ = pre_order_traversal(profile.nodes, 0)
+  return node
+
+def get_plan(profile):
+  return profile.find_by_name("Sumary").val.info_strings["Plan"]
+
+
+def to_dot(profile):
+  fragments = [
+      x for x in profile.find_by_name("Execution Profile").children if re.search(
+          "Averaged|Coordinator",
+          x.val.name)]
+  return dot.graph_to_dot(fragments)
+
+
+def to_json(profile):
+  fragments = [
+      x for x in profile.find_by_name("Execution Profile").children if re.search(
+          "Averaged|Coordinator",
+          x.val.name)]
+  return json.dumps(jj.graph_to_json(fragments))
+
+
+def print_tree(node, level, indent):
+  if level == 0:
+      return
+  print node.repr(indent)
+  for c in node.children:
+      print_tree(c, level - 1, indent + "  ")
+
+
+if __name__ == '__main__':
+  pass
+  # from datetime import datetime
+  # from dateutil.parser import parse as dtparse
+
+  # db_engine = create_engine('sqlite:///profiles.db', echo=False)
+  thrift = parse(sys.argv[1])
+  root = analyze(thrift)
+  # for n in root.find_by_name("Summary").info_strings():
+  #     print n
+  #
+
+  summary = root.find_by_name("Execution Profile")
+  print_tree(summary, 4, " ")
+
+  # db.Base.metadata.create_all(db_engine)
+  # Session2 = sessionmaker(bind=db_engine)
+
+  # query_id = "ea48c505d8604dc4:d4c46af6d57afa4"
+
+  # from sqlalchemy.schema import *
+  # impala_engine = create_engine('impala://mgrund-desktop.ca.cloudera.com/default', echo=False)
+  # dstat = Table('dstat', MetaData(bind=impala_engine), autoload=True)
+
+  # Session = sessionmaker(bind=impala_engine)
+  # session = Session()
+  # data = session.query(dstat).all()
+
+  # sss = Session2()
+  # # tt = db.Dstat.__table__
+  # # cols = [c.key for c in db.Dstat.__table__.columns]
+
+  # # for i, d in enumerate(data):
+  # #     sss.add(db.Dstat(**dict(zip(cols[1:], d))))
+
+  # sss.commit()
+
+  # from sqlalchemy.sql import func
+
+  # #sss.query(db.RuntimeProfile).filter(db.RuntimeProfile.query_id==query_id)
+  # instances = db.query_node_by_metric(sss, query_id, "HASH_JOIN_NODE", "TotalTime", 5)
+
+  # for i in instances:
+  #     node = sss.query(db.Node).filter(db.Node.id==i[3]).one()
+  #     infos = {x.name: x.val for x in node.info_strings}
+
+  #     left = (dtparse(infos["StartTime"]) - datetime(1970,1,1)).total_seconds() + 7*3600
+  #     right = (dtparse(infos["StopTime"]) - datetime(1970,1,1)).total_seconds() + 7*3600
+
+  # print "HASH_JOIN_NODE", i.fid, i.host, sss.query(func.avg(db.Dstat.usr),
+  # func.max(db.Dstat.usr)).filter(db.Dstat.ts<=left,
+  # db.Dstat.ts<=right).all()
+
+  # #to_db(db_engine, root)
+
+  #summary = root.find_by_name("Execution Profile")
+
+  # #print summary.info_strings()["ExecSummary"]
+
+  #print_tree(summary, 3, "  ")
+  # #list = root.find_all_by_name("CodeGen")
+
+  # #print next(x for x in list[0].val.counters if x.name == "TotalTime").value / float(10**9)
+  # #n = root.find_by_name("Averaged Fragment F01")
+  # #print_tree(n, 2, "  ")
+
+  # #summary = root.find_by_name("ImpalaServer")
+  # #print summary.val.info_strings.keys()
+  # #print summary.val.info_strings["Plan"]
+
+# #print summary.val.info_strings["ExecSummary"]

File diff suppressed because it is too large
+ 37 - 0
desktop/libs/libanalyze/src/libanalyze/analyze_test.py


+ 47 - 0
desktop/libs/libanalyze/src/libanalyze/dot.py

@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+
+def pre_order_graph(node, nodes, edges, parent):
+    match = re.search("(.*?)\s\(id=(\d+)\)", node.val.name)
+    if match:
+        if parent:
+            edges.append("node_%s -> %s;" % (match.group(2), parent))
+        nodes.append("node_%s [label=\"%s\"];" % (match.group(2),
+                                                  match.group(1)))
+        for c in node.children:
+            pre_order_graph(c, nodes, edges, "node_%s" % (match.group(2), ))
+
+
+def graph_to_dot(fragments):
+    """Parse the list of fragements to build the graph"""
+    # get all nodes of the fragement
+    nodes = []
+    edges = []
+    for f in fragments:
+        parent = None
+        for c in f.children:
+            dst = re.search("dst_id=(\d+)", c.val.name)
+            if dst:
+                parent = "node_%s" % (dst.group(1))
+            pre_order_graph(c, nodes, edges, parent)
+
+    return """ digraph q { %s %s } """ % (
+        " ".join(nodes),
+        " ".join(edges)
+    )

+ 45 - 0
desktop/libs/libanalyze/src/libanalyze/exprs.py

@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+def expr_min(data):
+    result = (data[0], 0)
+    for i, v in enumerate(data):
+        if v < result[0]:
+            result = (v, i)
+    return result
+
+
+def expr_max(data):
+    result = (data[0], 0)
+    for i, v in enumerate(data):
+        if v > result[0]:
+            result = (v, i)
+    return result
+
+
+def expr_avg(data):
+    return (sum(data) / float(len(data)), None)
+
+
+def expr_sum(data):
+    return (sum(data), None)
+
+
+class Expr:
+
+    @classmethod
+    def evaluate(self, expr, vars):
+        return eval(compile(expr, "<string>", "eval"), vars)

+ 46 - 0
desktop/libs/libanalyze/src/libanalyze/gjson.py

@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+
+def pre_order_graph(node, nodes, edges, parent):
+    match = re.search("(.*?)\s\(id=(\d+)\)", node.val.name)
+    if match:
+        node_id = "node_%s" % match.group(2)
+        if parent:
+            edges.append([node_id, parent])
+        nodes[node_id] = {
+            "name": match.group(1)
+        }
+        for c in node.children:
+            pre_order_graph(c, nodes, edges, "node_%s" % (match.group(2), ))
+
+
+def graph_to_json(fragments):
+    """Parse the list of fragements to build the graph"""
+    # get all nodes of the fragement
+    nodes = {}
+    edges = []
+    for f in fragments:
+        parent = None
+        for c in f.children:
+            dst = re.search("dst_id=(\d+)", c.val.name)
+            if dst:
+                parent = "node_%s" % (dst.group(1))
+            pre_order_graph(c, nodes, edges, parent)
+
+    return {"nodes": nodes, "edges": edges}

+ 134 - 0
desktop/libs/libanalyze/src/libanalyze/models.py

@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+from itertools import groupby
+
+class Contributor(object):
+  def __init__(self, **kwargs):
+    self.id = None
+    self.query_id = None
+    self.type = None
+    self.wall_clock_time = None
+    self.plan_node_id = None
+    self.plan_node_name = None
+    self.reason = None
+    self.__dict__.update(kwargs)
+
+  def to_json(self):
+    return json.dumps(self.__dict__)
+
+class Reason(object):
+  def __init__(self, **kwargs):
+    self.message = None
+    self.impact = None
+    self.__dict__.update(kwargs)
+
+  def to_json(self):
+    return json.dumps(self.__dict__)
+
+class TCounter(object):
+  def __init__(self, **kwargs):
+    self.value = None
+    self.name = None
+    self.unit = None
+    self.__dict__.update(kwargs)
+
+def query_node_by_id(profile, node_id, metric_name, averaged=False):
+  """Given the query_id, searches for the corresponding query profile and
+  selects the node instances given by node_id, selects the metric given by
+  metric_name and groups by fragment and fragment instance."""
+  result = profile.find_by_id(node_id)
+  if not result:
+    return result
+  nodes = filter(lambda x: x.fragment.is_averaged() == averaged, result)
+  metric = reduce(lambda x, y: x + y.find_metric_by_name(metric_name), nodes, [])
+
+  return map(lambda x: L(x['value'], x['unit'], 0, x['node'].fragment.id(), x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], unit=x['unit'], fragment_id=0, fid=x['node'].fragment.id(), host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']), metric)
+
+def query_node_by_metric(profile, node_name, metric_name):
+  """Given the query_id, searches for the corresponding query profile and
+  selects the node instances given by node_name, selects the metric given by
+  metric_name and groups by fragment and fragment instance."""
+
+  result = profile.find_all_by_name(node_name)
+  nodes = filter(lambda x: x.fragment.is_averaged() == False, result)
+  metric = reduce(lambda x, y: x + y.find_metric_by_name(metric_name), nodes, [])
+  return map(lambda x: L(x['value'], 0, x['node'].fragment.id(), x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], unit=x['unit'], fragment_id=0, fid=x['node'].fragment.id(), host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']), metric)
+
+def query_avg_fragment_metric_by_node_nid(profile, node_nid, metric_name):
+  """
+  Given the surragate node id (i.e. unique id of the plan node in the database),
+  return the value of the fragment level metric.
+  :param node_id:
+  :param metric_name:
+  :return: the value of the metric; none if there is no result
+  """
+  result = profile.find_by_id(node_nid)
+  if not result:
+    return result
+  node = map(lambda x: x, filter(lambda x: x.fragment.is_averaged() == True, result))[0]
+  metric = node.fragment.find_metric_by_name(metric_name)
+  return metric[0]['value']
+
+def query_fragment_metric_by_node_id(node, metric_name):
+  """
+  Given the surragate node id (i.e. unique id of the plan node in the database),
+  return the value of the fragment level metric.
+  :param node_id:
+  :param metric_name:
+  :return: the value of the metric; none if there is no result
+  """
+  metrics = node.find_metric_by_name(metric_name)
+  return metrics[0]['value'] if metrics else None
+
+def query_unique_node_by_id(profile, fragment_id, fragment_instance_id, node_id):
+  result = profile.find_by_id(node_id)
+  nodes = filter(lambda x: ((x.fragment is None and x.is_fragment()) or x.fragment.id() == fragment_id) and x.fragment_instance.id() == fragment_instance_id, result)
+  return nodes[0]
+
+def host_by_metric(profile, metric_name, exprs=[max]):
+  """Queries all fragment instances for a particular associated metric value.
+  Calculates the aggregated value based on exprs."""
+  fragments = profile.find_all_fragments()
+  fragments = filter(lambda x: x.is_averaged() == False, fragments)
+  metrics = reduce(lambda x,y: x + y.find_metric_by_name(metric_name), fragments, [])
+  results = []
+  for k, g in groupby(metrics, lambda x: x['node'].host()):
+      grouped = list(g)
+      values = map(lambda x: x['value'], grouped)
+      result = [k]
+      for expr in exprs:
+        value = expr(values)
+        result.append(value)
+      results.append(result)
+
+  return results
+
+class L(list):
+  def __new__(self, *args, **kwargs):
+      return super(L, self).__new__(self, args, kwargs)
+
+  def __init__(self, *args, **kwargs):
+      if len(args) == 1 and hasattr(args[0], '__iter__'):
+          list.__init__(self, args[0])
+      else:
+          list.__init__(self, args)
+      self.__dict__.update(kwargs)
+
+  def __call__(self, **kwargs):
+      self.__dict__.update(kwargs)
+      return self

+ 530 - 0
desktop/libs/libanalyze/src/libanalyze/rules.py

@@ -0,0 +1,530 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import copy
+import glob
+import json
+import os
+import re
+import types
+import struct
+
+from dateutil.parser import parse as dtparse
+from itertools import groupby
+
+from libanalyze.utils import Timer
+
+from libanalyze import models
+from libanalyze import exprs
+from libanalyze import utils
+
+def to_double(metric_value):
+    return struct.unpack('d', struct.pack('q', metric_value))[0]
+
+class ProfileContext:
+    """This is the main wrapper around the runtime profile tree. Main accessor
+    methods are implemented here."""
+
+    def __init__(self, profile):
+        self.profile = profile
+
+    def query_duration(self):
+        node = self.profile.find_by_name("Summary")
+        return (dtparse(node.info_strings["End Time"]) -
+                dtparse(node.info_strings["Start Time"])).total_seconds()
+
+    def percentage_of_total(self, compare):
+        return compare / self.query_duration()
+
+
+class SQLOperatorReason:
+    def __init__(self, node_name, metric_names,
+                 rule, exprs=[], to_json=True, **kwargs):
+        self.node_name = node_name
+        if isinstance(metric_names, types.StringTypes):
+            self.metric_names = [metric_names]
+        else:
+            self.metric_names = metric_names
+        self.exprs = exprs
+        self.rule = copy.deepcopy(rule)
+        self.kwargs = kwargs
+        self.result = None
+        self.return_messages = []
+        self.to_json = to_json
+
+    def isStorageBound(self, node):
+        """
+        Return true if the scan node (specified by the surragate id of the scan_node) is IO bound.
+        It is considered IO bound if the enclosing fragment's "TotalStorageWaitTime" > 30% of the
+        fragment's "TotalTime"
+        :param scan_node_id: the surragat node id
+        :return:
+        """
+        storagetime = models.query_fragment_metric_by_node_id(node, 'TotalStorageWaitTime')
+        totaltime = models.query_fragment_metric_by_node_id(node, 'TotalTime')
+        if (storagetime is None or totaltime is None):
+            return False
+        return (float(storagetime) / float(totaltime)) > 0.3
+
+    def getNumInputRows(self, node):
+        """
+        Return the number of input rows for the given plan node id (surrogate node id).
+        For scan, return the #rows read.
+        For exchange, return the #rows returned == #rows processed.
+        For join, return the probe rows.
+        For table sink, it's the rows inserted
+        For all others, return the rows returned by all its children
+        :param plan_node_id:
+        :return:
+        """
+        nodeType = node.name()
+
+        if (nodeType == 'HDFS_SCAN_NODE'):
+            return node.find_metric_by_name('RowsRead')[0]['value']
+        if (nodeType == 'EXCHANGE_NODE'):
+            return node.find_metric_by_name('RowsReturned')[0]['value']
+        if (nodeType == 'HASH_JOIN_NODE'):
+            return node.find_metric_by_name('ProbeRows')[0]['value']
+        if (nodeType == 'HdfsTableSink'):
+            return node.find_metric_by_name('RowsInserted')[0]['value']
+
+        metrics = reduce(lambda x,y: x + y.find_metric_by_name('RowsReturned'), node.children, [])
+        return reduce(lambda x,y: x + y['value'], metrics, 0)
+
+    def evaluate(self, profile, plan_node_id):
+        """
+        Evaluate the impact of this cause to the query. The return is a json string with
+        this format:
+        {
+            "impact": the amount of slow down (in ns),
+            "message" : the displayed "explanation" string
+        }
+        :return:
+        """
+        impact = -1
+        if len(self.exprs):
+            assert len(self.metric_names) == 1
+
+            # metric_names can have multiple values create a dict for all of
+            # them
+            db_result = models.query_node_by_id(profile, plan_node_id, self.metric_names[0])
+            for k, g in groupby(db_result, lambda x: x.fid):
+                grouped = list(g)
+                # A list of pairs, with aggregated value and index at value for
+                # max / min like exprs
+                converted_exprs = self.check_exprs(grouped)
+                expr_vars = {
+                    "vars": dict(zip(self.exprs, map(lambda x: x[0], converted_exprs))),
+                    "idxs": dict(zip(self.exprs, map(lambda x: x[1], converted_exprs))),
+                }
+
+                expr_val = exprs.Expr.evaluate(self.rule["expr"], expr_vars)
+                if (impact is None or impact < expr_val):
+                    impact = expr_val
+        else:
+            # For each of the metrics get the result
+            with Timer() as t:
+                # Get the metric values from the db grouped by metric name
+                db_result = [models.query_node_by_id(profile, plan_node_id, m) for m in self.metric_names]
+                # Assuming that for all metric names the same number of rows have been returned transpose the array
+                all_metrics = zip(*db_result)
+
+            for row in all_metrics:
+                # Convert to double values if unit is 6(double)
+                metric_values = map(lambda x: x.value if x.unit != 6 else to_double(x.value), row)
+
+                surrogate_node = row[0].node
+                local_vars = {"vars": dict(zip(self.metric_names, metric_values))}
+                local_vars["vars"]["IOBound"] = self.isStorageBound(surrogate_node)
+                local_vars["vars"]['InputRows'] = self.getNumInputRows(surrogate_node)
+                condition = True
+                if ("condition" in self.rule):
+                    condition = exprs.Expr.evaluate(self.rule["condition"], local_vars)
+                if (condition):
+                    expr_val = exprs.Expr.evaluate(self.rule["expr"], local_vars)
+                    if (impact is None or impact < expr_val):
+                        impact = expr_val
+
+        msg = self.rule["label"] + ": " + self.rule["message"]
+        return {
+            "impact": impact,
+            "message": msg
+        }
+
+    def check_exprs(self, group):
+        """For each of the specified expressions evaluate the function"""
+        result = []
+        for e in self.exprs:
+            result.append(getattr(exprs, "expr_{0}".format(e))(
+                [g.value for g in group]))
+        return result
+
+class JoinOrderStrategyCheck(SQLOperatorReason):
+    def __init__(self): pass
+
+    def evaluate(self, profile, plan_node_id):
+        """
+        Determine if the join order/strategy is correct and evaluate the impact of this cause
+        to the query. The return is a json string with
+        this format:
+        {
+            "impact": the amount of slow down (in ns),
+            "message" : the displayed "explanation" string
+        }
+        :return:
+        """
+        self.metric_names = ["Hosts", "Broadcast", "BuildRows", "ProbeRows"]
+
+        hosts = models.query_node_by_id(profile, plan_node_id, "Hosts", True)[0][0]
+        isBroadcast = models.query_node_by_id(profile, plan_node_id, "Broadcast", True)[0][0]
+        buildRows = models.query_node_by_id(profile, plan_node_id, "BuildRows", True)[0][0]
+        probeRows = models.query_node_by_id(profile, plan_node_id, "ProbeRows", True)[0][0]
+
+        rhsRows = 0
+        lhsRows = 0
+        networkcost = 0
+        if (isBroadcast == 1):
+            networkcost = buildRows * hosts
+            rhsRows = buildRows
+            lhsRows = probeRows * hosts
+        else:
+            networkcost = (buildRows + probeRows) * hosts
+            rhsRows = buildRows * hosts
+            lhsRows = probeRows * hosts
+
+        impact = (rhsRows - lhsRows * 1.5) / hosts / 0.01
+        if (impact > 0):
+            return {
+                "impact": impact,
+                "message": "Wrong join order - RHS %d; LHS %d" % (rhsRows, lhsRows)
+            }
+
+        bcost = rhsRows * hosts
+        scost = lhsRows + rhsRows
+        impact = (networkcost - min(bcost, scost) - 1) / hosts / 0.01
+        return {
+            "impact": impact,
+            "message": "Wrong join strategy - RHS %d; LHS %d" % (rhsRows, lhsRows)
+        }
+
+class ExplodingJoinCheck(SQLOperatorReason):
+    def __init__(self): pass
+
+    def evaluate(self, profile, plan_node_id):
+        """
+        Determine if the join exploded the number of rows
+        this format:
+        {
+            "impact": the amount of slow down (in ns),
+            "message" : the displayed "explanation" string
+        }
+        :return:
+        """
+        self.metric_names = ["Hosts", "Broadcast", "BuildRows", "ProbeRows"]
+
+        hosts = models.query_node_by_id(profile, plan_node_id, "Hosts", True)[0][0]
+        probeRows = models.query_node_by_id(profile, plan_node_id, "ProbeRows", True)[0][0]
+        probeTime = models.query_node_by_id(profile, plan_node_id, "ProbeTime", True)[0][0]
+        rowsReturned = models.query_node_by_id(profile, plan_node_id, "RowsReturned", True)[0][0]
+
+        impact = 0
+        if (rowsReturned > 0):
+            impact = probeTime * (rowsReturned - probeRows) / rowsReturned
+        return {
+            "impact": impact,
+            "message": "Exploding join: %d input rows are exploded to %d output rows" % (probeRows, rowsReturned)
+        }
+
+class NNRpcCheck(SQLOperatorReason):
+    def __init__(self): pass
+
+    def evaluate(self, profile, plan_node_id):
+        """
+        Determine the impact of NN RPC latency
+        this format:
+        {
+            "impact": the amount of slow down (in ns),
+            "message" : the displayed "explanation" string
+        }
+        :return:
+        """
+        totalStorageTime = models.query_avg_fragment_metric_by_node_nid(profile, plan_node_id, "TotalStorageWaitTime")
+        hdfsRawReadTime = models.query_node_by_id(profile, plan_node_id, "TotalRawHdfsReadTime(*)", True)[0][0]
+        avgReadThreads = models.query_node_by_id(profile, plan_node_id, "AverageHdfsReadThreadConcurrency", True)[0][0]
+        avgReadThreads = max(1, to_double(avgReadThreads))
+        impact = max(0, (totalStorageTime - hdfsRawReadTime) / avgReadThreads)
+        return {
+            "impact": impact,
+            "message": "This is the time waiting for HDFS NN RPC."
+        }
+
+class TopDownAnalysis:
+
+    def __init__(self):
+        self.base_dir = os.path.join(os.path.dirname(__file__), "../..", "reasons")
+
+        # sqlOperatorReasons maps from node name (such as HDFS_SCAN_NODE) to the list
+        # of reasons that are applicable to this operator.
+        self.sqlOperatorReasons = {}
+        for r in glob.glob("{0}/*.json".format(self.base_dir)):
+            with open(r, "r") as fid:
+                json_object = json.load(fid)
+                type = json_object["type"]
+                node_names = json_object["node_name"]
+                nodes = node_names
+                if not isinstance(node_names, types.ListType):
+                    nodes = [node_names]
+                for node in nodes:
+                    self.sqlOperatorReasons.setdefault(node,[])\
+                        .append(SQLOperatorReason(**json_object))
+
+        # Manually append specially coded reaason
+        self.sqlOperatorReasons["HASH_JOIN_NODE"].append(JoinOrderStrategyCheck())
+        self.sqlOperatorReasons["HASH_JOIN_NODE"].append(ExplodingJoinCheck())
+        self.sqlOperatorReasons["HDFS_SCAN_NODE"].append(NNRpcCheck())
+
+    def getTopContributor(self, limit=5, profile=None):
+        """ Return the top N wall clock time contributor. Contributor can be planning time,
+        admission control wait time, query fragment distribution time, SQL operator, DML
+        update time, client fetch wait time.
+        For SQL operator, the "max" time used.
+        This function will return a list of map:
+        {
+            "type": (planning time, admission control time, SQL operator...etc),
+            "wall_clock_time :
+            "plan_node_id" : (only if SQL operator)
+            "plan_node_name" : (only if SQL operator)
+        }
+        """
+        execTime = sorted(profile, key=lambda x: x.wall_clock_time, reverse=True)
+        execTime = execTime[:limit]
+
+        # Sort execTime based on wall_clock_time and cut it off at limit
+        return execTime
+
+    def getTopReasons(self, contributor):
+        """
+        For the given contributor id, return the top reasons why it's slow.
+        The result will be in the form of
+        [
+            {
+                "impact" : 7000000,
+                "message" : "Predicates might be expensive (expectes speed 40m rows per sec)"
+            },
+            {
+                "impact" : 4000000,
+                "message" : "too many columns",
+            }
+        ]
+        """
+        return sorted(contributor.reason, key=lambda x: x.impact, reverse=True) if contributor.reason else contributor.reason
+
+
+    def createContributors(self, profile):
+        """ Return the models.Contributor objects. Contributor can be planning time,
+        admission control wait time, query fragment distribution time, SQL operator, DML
+        update time, client fetch wait time.
+        For SQL operator, the "max" time used.
+        This function will return a list of models.Controbutor objects and these objects are
+        persisted in the database.
+        """
+        execution_profile = profile.find_by_name('Execution Profile')
+        #summary = _profile.find_by_name("Summary")
+        counter_map = profile.find_by_name('Summary').counter_map()
+        counter_map.update(profile.find_by_name("ImpalaServer").counter_map())
+        #counter_map = summary.counter_map()
+
+        # list of non-SQL operator contributor
+        # TODO: add admission control, DML Metastore update; profile does not have it yet.
+        nonExecMetrics = ['PlanningTime', 'RemoteFragmentsStarted',
+                          'RowMaterializationTimer', 'ClientFetchWaitTimer']
+
+        contributors = []
+        for metric in nonExecMetrics:
+            contributor = models.Contributor(type=metric,
+                                             wall_clock_time=counter_map[metric].value,
+                                             plan_node_id=-1, plan_node_name="N/A")
+            #models.db.session.add(contributor)
+            contributors += [contributor]
+
+        if self.isDebugBuilt(profile):
+            contributor = models.Contributor(type="Debug Built",
+                                             wall_clock_time=9999999999999999,
+                                             plan_node_id=-1, plan_node_name="N/A")
+            #models.db.session.add(contributor)
+            contributors += [contributor]
+
+        # Get the top N contributor from query execution
+
+        # Get the plan node execution time
+        # Note: ignore DataStreamSender because its metrics is useless
+        nodes = execution_profile.find_all_non_fragment_nodes()
+        nodes = filter(lambda x: x.fragment and x.fragment.is_averaged() == False, nodes)
+        nodes = filter(lambda x: x.name() != 'DataStreamSender', nodes)
+        metrics = reduce(lambda x,y: x + y.find_metric_by_name('LocalTime'), nodes, [])
+        metrics = sorted(metrics, key=lambda x: (x['node'].id(), x['node'].name()))
+        for k, g in groupby(metrics, lambda x: (x['node'].id(), x['node'].name())):
+            grouped = list(g)
+            metric_values = map(lambda x: x['value'], grouped)
+            metric = max(metric_values)
+            contributor = models.Contributor(type="SQLOperator",
+                                 wall_clock_time=metric,
+                                 plan_node_id=grouped[0]['node'].id(), plan_node_name=grouped[0]['node'].name())
+            contributors += [contributor]
+
+
+        # Sort execTime based on wall_clock_time and cut it off at limit
+        contributors = sorted(contributors, key=lambda x: x.wall_clock_time, reverse=True)
+        return contributors
+
+    def createExecNodeReason(self, contributor, profile):
+        """
+        For the given contributor, return the top reasons why it's slow. A list of models.Reason
+        object will be created, persisted to the database and returned.
+        The result will be in the form of
+        """
+        reasons = []
+        self.sqlOperatorReasons.setdefault(contributor.plan_node_name,[])
+        for cause in self.sqlOperatorReasons[contributor.plan_node_name] + self.sqlOperatorReasons["ANY"]:
+            evaluation = cause.evaluate(profile, contributor.plan_node_id)
+            impact = evaluation["impact"]
+            if isinstance(impact, float) and (impact).is_integer():
+              evaluation["impact"] = int(impact)
+            if (evaluation["impact"] > 0):
+                reason = models.Reason(message=evaluation['message'], impact=evaluation['impact'])
+                reasons.append(reason)
+        return sorted(reasons, key=lambda x: x.impact, reverse=True)
+
+    def isDebugBuilt(self, profile):
+        summary = profile.find_by_name('Summary')
+        versionStr = summary.val.info_strings['Impala Version']
+        if "debug" in versionStr.lower():
+            return True
+        return False
+
+    def process(self, profile):
+        contributors = self.createContributors(profile)
+        for contributor in contributors:
+            if (contributor.type == "SQLOperator"):
+                reasons = self.createExecNodeReason(contributor, profile)
+            else:
+                reasons = []
+            contributor.reason = reasons
+        return contributors
+
+    def pre_process(self, profile):
+        summary = profile.find_by_name("Summary")
+        exec_summary_json = utils.parse_exec_summary(summary.val.info_strings['ExecSummary'])
+
+        # Setup Event Sequence
+        if summary:
+          for s in summary.val.event_sequences:
+              sequence_name = s.name
+              if sequence_name == "Query Timeline":
+                duration = 0
+                for i in range(len(s.labels)):
+                    event_name = s.labels[i]
+                    event_duration = s.timestamps[i] - duration
+                    event_value = s.timestamps[i]
+                    if event_name == "Planning finished":
+                      summary.val.counters.append(models.TCounter(name='PlanningTime', value=event_duration, unit=5))
+
+                    elif re.search('remote fragments started', event_name, re.IGNORECASE) is not None or re.search('fragment instances started', event_name, re.IGNORECASE) is not None or re.search(r'execution backends.*started', event_name, re.IGNORECASE)  is not None:
+                      summary.val.counters.append(models.TCounter(name='RemoteFragmentsStarted', value=event_duration, unit=5))
+
+                    duration = s.timestamps[i]
+
+        def add_host(node, exec_summary_json=exec_summary_json):
+          is_plan_node = node.is_plan_node()
+          node_id = node.id()
+           # Setup Hosts & Broadcast
+          if node_id and node.is_regular() and int(node_id) in exec_summary_json:
+
+            node.val.counters.append(models.TCounter(name='Hosts', value=exec_summary_json[int(node_id)]["hosts"], unit=0))
+            broadcast = 0
+            if exec_summary_json[int(node_id)]["broadcast"]:
+                broadcast = 1
+            node.val.counters.append(models.TCounter(name='Broadcast', value=broadcast, unit=0))
+
+          # Setup LocalTime & ChildTime
+          if node_id:
+            child_time = 0
+            for c in node.children:
+                if c.is_plan_node():
+                    child_time += c.counter_map()['TotalTime'].value
+
+            counter_map = node.counter_map()
+
+            # Load the metric data as if the object would be loaded from the DB
+            local_time = counter_map['TotalTime'].value - child_time
+
+            # Make sure to substract the wait time for the exchange node
+            if is_plan_node and re.search(r'EXCHANGE_NODE', node.val.name) is not None:
+                async_time = counter_map.get('AsyncTotalTime', models.TCounter(value=0)).value
+                local_time = counter_map['TotalTime'].value - counter_map['InactiveTotalTime'].value - async_time
+
+            # For Hash Join, if the "LocalTime" metrics
+            if is_plan_node and re.search(r'HASH_JOIN_NODE', node.val.name) is not None:
+                if ("LocalTime" in counter_map):
+                    local_time = counter_map["LocalTime"].value
+                else:
+                    local_time = counter_map["ProbeTime"].value +\
+                        counter_map["BuildTime"].value
+
+            # Add two virtual metrics for local_time and child_time
+            node.val.counters.append(models.TCounter(name='LocalTime', value=local_time, unit=5))
+            node.val.counters.append(models.TCounter(name='ChildTime', value=child_time, unit=5))
+
+        profile.foreach_lambda(add_host)
+
+    def run(self, profile):
+        self.pre_process(profile)
+        contributors = self.process(profile)
+        topContributors = self.getTopContributor(100, contributors)
+
+        topContributions = []
+        result_id = 1
+
+        if self.isDebugBuilt(profile):
+            topContributions += [{
+                "result_id" : result_id,
+                "contribution_factor_str" : "Using Debug Built",
+                "wall_clock_time" : 9999,
+                "reason" : []
+            }]
+
+        for contributor in topContributors:
+            reasons = self.getTopReasons(contributor)
+            topContributions += [{
+                    "result_id" : contributor.plan_node_id if contributor.plan_node_id != -1 else '-1',
+                    "contribution_factor_str" : contributor.type + " " +
+                                                str(contributor.plan_node_id).zfill(2) +
+                                                ":" + contributor.plan_node_name,
+                    "wall_clock_time" : contributor.wall_clock_time,
+                    "reason" : [reason.__dict__ for reason in reasons]
+                }]
+
+        result = []
+        result += [{
+                "rule": {
+                    "message": "Top contributing factors and its reasons",
+                    "label": "Top Down Analysis",
+                    "prio": 1
+                },
+                "result": topContributions,
+                "template": "alan-tpl"
+            }]
+        return result

+ 70 - 0
desktop/libs/libanalyze/src/libanalyze/utils.py

@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import time
+import re
+
+class Timer:
+    def __enter__(self):
+        self.start = time.clock()
+        return self
+
+    def __exit__(self, *args):
+        self.end = time.clock()
+        self.interval = self.end - self.start
+
+
+def parse_exec_summary(summary_string):
+    """Given an exec summary string parses the rows and organizes it by node id"""
+    cleaned = [re.sub(r'^[-|\s]+', "", m)
+               for m in summary_string.split("\n")[3:]]
+    cleaned = map(
+        lambda x: map(
+            lambda y: y.strip(),
+            re.split(
+                '\s\s+',
+                x,
+                maxsplit=8)),
+        cleaned)
+    result = {}
+    for c in cleaned:
+        # Key 0 is id and type
+        fid, ftype = c[0].split(":")
+        result[int(fid)] = {
+            "type": ftype,
+            "hosts": int(c[1]),
+            "avg": c[2],
+            "max": c[3],
+            "rows": c[4],
+            "est_rows": c[5],
+            "peak_mem": c[6],
+            "est_mem": c[7],
+            "detail": c[8],
+            "broadcast": "BROADCAST" in c[8],
+            "has_stats": "-1" in "est_rows"
+        }
+    return result
+
+
+def parse_plan_details(plan_string):
+    """Given a query plan, extracts the query details per node"""
+    result = {}
+    for line in plan_string.split("\n"):
+        match = re.search(r'^(?!F)[|-]?(\d+):.*?\[(.*?)\]', line.strip())
+        if match:
+            result[str(int(match.group(1)))] = match.group(2)
+
+    return result

File diff suppressed because it is too large
+ 0 - 0
desktop/libs/libanalyze/testdata/profile.thrift


+ 15 - 14
desktop/libs/notebook/src/notebook/static/notebook/js/notebook.ko.js

@@ -1772,6 +1772,21 @@ var EditorViewModel = (function() {
 
     self.isFetchingData = false;
 
+    self.fetchExecutionAnalysis = function() {
+      if (self.type() === 'impala') {
+        // TODO: Use real query ID
+        huePubSub.publish('editor.update.execution.analysis', {
+          analysisPossible: true,
+          compute: self.compute(),
+          queryId: notebook.getContext().id()
+        });
+      } else {
+        huePubSub.publish('editor.update.execution.analysis', {
+          analysisPossible: false
+        });
+      }
+    };
+
     self.fetchResultData = function (rows, startOver) {
       if (! self.isFetchingData) {
         if (self.status() === 'available') {
@@ -1918,20 +1933,6 @@ var EditorViewModel = (function() {
                 self.checkStatusTimeout = setTimeout(self.checkStatus, delay);
               }
             } else if (self.status() === 'available') {
-              if (self.type() === 'impala' && self.compute() && self.compute().crn && self.compute().crn.indexOf('altus') !== -1) {
-
-                // TODO: Use real query ID
-                huePubSub.publish('editor.update.execution.analysis', {
-                  analysisPossible: true,
-                  compute: self.compute(),
-                  queryId: '56433486cd84d475:3a86f97000000000'
-                });
-
-              } else {
-                huePubSub.publish('editor.update.execution.analysis', {
-                  analysisPossible: false
-                });
-              }
               self.fetchResult(100);
               self.progress(100);
               if (self.isSqlDialect()) {

+ 1 - 1
desktop/libs/notebook/src/notebook/templates/editor_components.mako

@@ -771,7 +771,7 @@ ${ sqlSyntaxDropdown.sqlSyntaxDropdown() }
         <!-- /ko -->
 
         <!-- ko if: HAS_WORKLOAD_ANALYTICS && type() === 'impala' -->
-        <li data-bind="click: function(){ currentQueryTab('executionAnalysis'); }, css: {'active': currentQueryTab() == 'executionAnalysis'}"><a class="inactive-action" href="#queryBuilderTab" data-toggle="tab">${_('Execution Analysis')}</a></li>
+        <li data-bind="click: function(){ currentQueryTab('executionAnalysis'); }, css: {'active': currentQueryTab() == 'executionAnalysis'}"><a class="inactive-action" href="#executionAnalysis" data-toggle="tab" data-bind="click: function(){ $('a[href=\'#executionAnalysis\']').tab('show'); }, event: {'shown': fetchExecutionAnalysis }"><span>${_('Execution Analysis')} </span><span></span></a></li>
         <!-- /ko -->
       </ul>
 

Some files were not shown because too many files changed in this diff