Browse Source

HUE-8737 [core] Futurize desktop/libs/libanalyze for Python 3.5

Ying Chen 6 years ago
parent
commit
aacc862e9e

+ 2 - 1
desktop/libs/libanalyze/gen-py/Metrics/constants.py

@@ -1,3 +1,4 @@
+from __future__ import absolute_import
 #
 #
 # Autogenerated by Thrift Compiler (0.9.3)
 # Autogenerated by Thrift Compiler (0.9.3)
 #
 #
@@ -7,5 +8,5 @@
 #
 #
 
 
 from thrift.Thrift import TType, TMessageType, TException, TApplicationException
 from thrift.Thrift import TType, TMessageType, TException, TApplicationException
-from ttypes import *
+from Metrics.ttypes import *
 
 

+ 1 - 0
desktop/libs/libanalyze/gen-py/Metrics/ttypes.py

@@ -6,6 +6,7 @@
 #  options string: py:new_style
 #  options string: py:new_style
 #
 #
 
 
+from builtins import object
 from thrift.Thrift import TType, TMessageType, TException, TApplicationException
 from thrift.Thrift import TType, TMessageType, TException, TApplicationException
 
 
 from thrift.transport import TTransport
 from thrift.transport import TTransport

+ 2 - 1
desktop/libs/libanalyze/gen-py/RuntimeProfile/constants.py

@@ -1,3 +1,4 @@
+from __future__ import absolute_import
 #
 #
 # Autogenerated by Thrift Compiler (0.9.3)
 # Autogenerated by Thrift Compiler (0.9.3)
 #
 #
@@ -7,5 +8,5 @@
 #
 #
 
 
 from thrift.Thrift import TType, TMessageType, TException, TApplicationException
 from thrift.Thrift import TType, TMessageType, TException, TApplicationException
-from ttypes import *
+from RuntimeProfile.ttypes import *
 
 

+ 22 - 20
desktop/libs/libanalyze/gen-py/RuntimeProfile/ttypes.py

@@ -6,6 +6,8 @@
 #  options string: py:new_style
 #  options string: py:new_style
 #
 #
 
 
+from builtins import range
+from builtins import object
 from thrift.Thrift import TType, TMessageType, TException, TApplicationException
 from thrift.Thrift import TType, TMessageType, TException, TApplicationException
 import Metrics.ttypes
 import Metrics.ttypes
 
 
@@ -107,7 +109,7 @@ class TCounter(object):
 
 
   def __repr__(self):
   def __repr__(self):
     L = ['%s=%r' % (key, value)
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
 
   def __eq__(self, other):
   def __eq__(self, other):
@@ -154,7 +156,7 @@ class TEventSequence(object):
         if ftype == TType.LIST:
         if ftype == TType.LIST:
           self.timestamps = []
           self.timestamps = []
           (_etype3, _size0) = iprot.readListBegin()
           (_etype3, _size0) = iprot.readListBegin()
-          for _i4 in xrange(_size0):
+          for _i4 in range(_size0):
             _elem5 = iprot.readI64()
             _elem5 = iprot.readI64()
             self.timestamps.append(_elem5)
             self.timestamps.append(_elem5)
           iprot.readListEnd()
           iprot.readListEnd()
@@ -164,7 +166,7 @@ class TEventSequence(object):
         if ftype == TType.LIST:
         if ftype == TType.LIST:
           self.labels = []
           self.labels = []
           (_etype9, _size6) = iprot.readListBegin()
           (_etype9, _size6) = iprot.readListBegin()
-          for _i10 in xrange(_size6):
+          for _i10 in range(_size6):
             _elem11 = iprot.readString()
             _elem11 = iprot.readString()
             self.labels.append(_elem11)
             self.labels.append(_elem11)
           iprot.readListEnd()
           iprot.readListEnd()
@@ -220,7 +222,7 @@ class TEventSequence(object):
 
 
   def __repr__(self):
   def __repr__(self):
     L = ['%s=%r' % (key, value)
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
 
   def __eq__(self, other):
   def __eq__(self, other):
@@ -280,7 +282,7 @@ class TTimeSeriesCounter(object):
         if ftype == TType.LIST:
         if ftype == TType.LIST:
           self.values = []
           self.values = []
           (_etype17, _size14) = iprot.readListBegin()
           (_etype17, _size14) = iprot.readListBegin()
-          for _i18 in xrange(_size14):
+          for _i18 in range(_size14):
             _elem19 = iprot.readI64()
             _elem19 = iprot.readI64()
             self.values.append(_elem19)
             self.values.append(_elem19)
           iprot.readListEnd()
           iprot.readListEnd()
@@ -340,7 +342,7 @@ class TTimeSeriesCounter(object):
 
 
   def __repr__(self):
   def __repr__(self):
     L = ['%s=%r' % (key, value)
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
 
   def __eq__(self, other):
   def __eq__(self, other):
@@ -482,7 +484,7 @@ class TSummaryStatsCounter(object):
 
 
   def __repr__(self):
   def __repr__(self):
     L = ['%s=%r' % (key, value)
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
 
   def __eq__(self, other):
   def __eq__(self, other):
@@ -558,7 +560,7 @@ class TRuntimeProfileNode(object):
         if ftype == TType.LIST:
         if ftype == TType.LIST:
           self.counters = []
           self.counters = []
           (_etype24, _size21) = iprot.readListBegin()
           (_etype24, _size21) = iprot.readListBegin()
-          for _i25 in xrange(_size21):
+          for _i25 in range(_size21):
             _elem26 = TCounter()
             _elem26 = TCounter()
             _elem26.read(iprot)
             _elem26.read(iprot)
             self.counters.append(_elem26)
             self.counters.append(_elem26)
@@ -579,7 +581,7 @@ class TRuntimeProfileNode(object):
         if ftype == TType.MAP:
         if ftype == TType.MAP:
           self.info_strings = {}
           self.info_strings = {}
           (_ktype28, _vtype29, _size27 ) = iprot.readMapBegin()
           (_ktype28, _vtype29, _size27 ) = iprot.readMapBegin()
-          for _i31 in xrange(_size27):
+          for _i31 in range(_size27):
             _key32 = iprot.readString()
             _key32 = iprot.readString()
             _val33 = iprot.readString()
             _val33 = iprot.readString()
             self.info_strings[_key32] = _val33
             self.info_strings[_key32] = _val33
@@ -590,7 +592,7 @@ class TRuntimeProfileNode(object):
         if ftype == TType.LIST:
         if ftype == TType.LIST:
           self.info_strings_display_order = []
           self.info_strings_display_order = []
           (_etype37, _size34) = iprot.readListBegin()
           (_etype37, _size34) = iprot.readListBegin()
-          for _i38 in xrange(_size34):
+          for _i38 in range(_size34):
             _elem39 = iprot.readString()
             _elem39 = iprot.readString()
             self.info_strings_display_order.append(_elem39)
             self.info_strings_display_order.append(_elem39)
           iprot.readListEnd()
           iprot.readListEnd()
@@ -600,11 +602,11 @@ class TRuntimeProfileNode(object):
         if ftype == TType.MAP:
         if ftype == TType.MAP:
           self.child_counters_map = {}
           self.child_counters_map = {}
           (_ktype41, _vtype42, _size40 ) = iprot.readMapBegin()
           (_ktype41, _vtype42, _size40 ) = iprot.readMapBegin()
-          for _i44 in xrange(_size40):
+          for _i44 in range(_size40):
             _key45 = iprot.readString()
             _key45 = iprot.readString()
             _val46 = set()
             _val46 = set()
             (_etype50, _size47) = iprot.readSetBegin()
             (_etype50, _size47) = iprot.readSetBegin()
-            for _i51 in xrange(_size47):
+            for _i51 in range(_size47):
               _elem52 = iprot.readString()
               _elem52 = iprot.readString()
               _val46.add(_elem52)
               _val46.add(_elem52)
             iprot.readSetEnd()
             iprot.readSetEnd()
@@ -616,7 +618,7 @@ class TRuntimeProfileNode(object):
         if ftype == TType.LIST:
         if ftype == TType.LIST:
           self.event_sequences = []
           self.event_sequences = []
           (_etype56, _size53) = iprot.readListBegin()
           (_etype56, _size53) = iprot.readListBegin()
-          for _i57 in xrange(_size53):
+          for _i57 in range(_size53):
             _elem58 = TEventSequence()
             _elem58 = TEventSequence()
             _elem58.read(iprot)
             _elem58.read(iprot)
             self.event_sequences.append(_elem58)
             self.event_sequences.append(_elem58)
@@ -627,7 +629,7 @@ class TRuntimeProfileNode(object):
         if ftype == TType.LIST:
         if ftype == TType.LIST:
           self.time_series_counters = []
           self.time_series_counters = []
           (_etype62, _size59) = iprot.readListBegin()
           (_etype62, _size59) = iprot.readListBegin()
-          for _i63 in xrange(_size59):
+          for _i63 in range(_size59):
             _elem64 = TTimeSeriesCounter()
             _elem64 = TTimeSeriesCounter()
             _elem64.read(iprot)
             _elem64.read(iprot)
             self.time_series_counters.append(_elem64)
             self.time_series_counters.append(_elem64)
@@ -638,7 +640,7 @@ class TRuntimeProfileNode(object):
         if ftype == TType.LIST:
         if ftype == TType.LIST:
           self.summary_stats_counters = []
           self.summary_stats_counters = []
           (_etype68, _size65) = iprot.readListBegin()
           (_etype68, _size65) = iprot.readListBegin()
-          for _i69 in xrange(_size65):
+          for _i69 in range(_size65):
             _elem70 = TSummaryStatsCounter()
             _elem70 = TSummaryStatsCounter()
             _elem70.read(iprot)
             _elem70.read(iprot)
             self.summary_stats_counters.append(_elem70)
             self.summary_stats_counters.append(_elem70)
@@ -681,7 +683,7 @@ class TRuntimeProfileNode(object):
     if self.info_strings is not None:
     if self.info_strings is not None:
       oprot.writeFieldBegin('info_strings', TType.MAP, 6)
       oprot.writeFieldBegin('info_strings', TType.MAP, 6)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.info_strings))
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.info_strings))
-      for kiter72,viter73 in self.info_strings.items():
+      for kiter72,viter73 in list(self.info_strings.items()):
         oprot.writeString(kiter72)
         oprot.writeString(kiter72)
         oprot.writeString(viter73)
         oprot.writeString(viter73)
       oprot.writeMapEnd()
       oprot.writeMapEnd()
@@ -696,7 +698,7 @@ class TRuntimeProfileNode(object):
     if self.child_counters_map is not None:
     if self.child_counters_map is not None:
       oprot.writeFieldBegin('child_counters_map', TType.MAP, 8)
       oprot.writeFieldBegin('child_counters_map', TType.MAP, 8)
       oprot.writeMapBegin(TType.STRING, TType.SET, len(self.child_counters_map))
       oprot.writeMapBegin(TType.STRING, TType.SET, len(self.child_counters_map))
-      for kiter75,viter76 in self.child_counters_map.items():
+      for kiter75,viter76 in list(self.child_counters_map.items()):
         oprot.writeString(kiter75)
         oprot.writeString(kiter75)
         oprot.writeSetBegin(TType.STRING, len(viter76))
         oprot.writeSetBegin(TType.STRING, len(viter76))
         for iter77 in viter76:
         for iter77 in viter76:
@@ -765,7 +767,7 @@ class TRuntimeProfileNode(object):
 
 
   def __repr__(self):
   def __repr__(self):
     L = ['%s=%r' % (key, value)
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
 
   def __eq__(self, other):
   def __eq__(self, other):
@@ -801,7 +803,7 @@ class TRuntimeProfileTree(object):
         if ftype == TType.LIST:
         if ftype == TType.LIST:
           self.nodes = []
           self.nodes = []
           (_etype84, _size81) = iprot.readListBegin()
           (_etype84, _size81) = iprot.readListBegin()
-          for _i85 in xrange(_size81):
+          for _i85 in range(_size81):
             _elem86 = TRuntimeProfileNode()
             _elem86 = TRuntimeProfileNode()
             _elem86.read(iprot)
             _elem86.read(iprot)
             self.nodes.append(_elem86)
             self.nodes.append(_elem86)
@@ -841,7 +843,7 @@ class TRuntimeProfileTree(object):
 
 
   def __repr__(self):
   def __repr__(self):
     L = ['%s=%r' % (key, value)
     L = ['%s=%r' % (key, value)
-      for key, value in self.__dict__.iteritems()]
+      for key, value in self.__dict__.items()]
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 
 
   def __eq__(self, other):
   def __eq__(self, other):

+ 9 - 6
desktop/libs/libanalyze/src/libanalyze/analyze.py

@@ -14,6 +14,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
+from __future__ import print_function
+from builtins import range
+from builtins import object
 import base64
 import base64
 import json
 import json
 import os
 import os
@@ -387,13 +390,13 @@ def metrics(profile):
 
 
   execution_profile.foreach_lambda(flatten)
   execution_profile.foreach_lambda(flatten)
 
 
-  for nodeid, node in counter_map['nodes'].iteritems():
-    host_min = {'value': sys.maxint, 'host' : None}
-    host_max = {'value': -(sys.maxint - 1), 'host' : None}
+  for nodeid, node in counter_map['nodes'].items():
+    host_min = {'value': sys.maxsize, 'host' : None}
+    host_max = {'value': -(sys.maxsize - 1), 'host' : None}
     if not node['timeline']['minmax']:
     if not node['timeline']['minmax']:
       continue
       continue
-    for host_name, host_value in node['timeline']['hosts'].iteritems():
-      for event_name, event in host_value.iteritems():
+    for host_name, host_value in node['timeline']['hosts'].items():
+      for event_name, event in host_value.items():
         if len(event):
         if len(event):
           value = event[len(event) - 1]['value']
           value = event[len(event) - 1]['value']
           if value < host_min['value']:
           if value < host_min['value']:
@@ -486,7 +489,7 @@ def to_json(profile):
 def print_tree(node, level, indent):
 def print_tree(node, level, indent):
   if level == 0:
   if level == 0:
       return
       return
-  print node.repr(indent)
+  print(node.repr(indent))
   for c in node.children:
   for c in node.children:
       print_tree(c, level - 1, indent + "  ")
       print_tree(c, level - 1, indent + "  ")
 
 

+ 11 - 3
desktop/libs/libanalyze/src/libanalyze/analyze_test.py

@@ -14,16 +14,24 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
-import cProfile, logging, os, pstats, time, StringIO
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
+import cProfile, logging, os, pstats, sys, time
 from libanalyze import analyze as a
 from libanalyze import analyze as a
 from libanalyze import rules
 from libanalyze import rules
 from nose.tools import assert_true
 from nose.tools import assert_true
 
 
+if sys.version_info[0] > 2:
+  from io import StringIO as string_io
+else:
+  from cStringIO import StringIO as string_io
+
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
 def ordered(obj):
 def ordered(obj):
   if isinstance(obj, dict):
   if isinstance(obj, dict):
-    return sorted((k, ordered(v)) for k, v in obj.items())
+    return sorted((k, ordered(v)) for k, v in list(obj.items()))
   elif isinstance(obj, list):
   elif isinstance(obj, list):
     return sorted(ordered(x) for x in obj)
     return sorted(ordered(x) for x in obj)
   else:
   else:
@@ -56,7 +64,7 @@ class AnalyzeTest(object):
     ts2 = time.time()*1000.0
     ts2 = time.time()*1000.0
     dts = ts2 - ts1
     dts = ts2 - ts1
     pr.disable()
     pr.disable()
-    s = StringIO.StringIO()
+    s = string_io()
     sortby = 'cumulative'
     sortby = 'cumulative'
     ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
     ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
     ps.print_stats()
     ps.print_stats()

+ 2 - 1
desktop/libs/libanalyze/src/libanalyze/exprs.py

@@ -14,6 +14,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
+from builtins import object
 def expr_min(data):
 def expr_min(data):
     result = (data[0], 0)
     result = (data[0], 0)
     for i, v in enumerate(data):
     for i, v in enumerate(data):
@@ -38,7 +39,7 @@ def expr_sum(data):
     return (sum(data), None)
     return (sum(data), None)
 
 
 
 
-class Expr:
+class Expr(object):
 
 
     @classmethod
     @classmethod
     def evaluate(self, expr, vars):
     def evaluate(self, expr, vars):

+ 13 - 11
desktop/libs/libanalyze/src/libanalyze/models.py

@@ -14,8 +14,10 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
+from builtins import object
 import json
 import json
 from itertools import groupby
 from itertools import groupby
+from functools import reduce
 
 
 class Contributor(object):
 class Contributor(object):
   def __init__(self, **kwargs):
   def __init__(self, **kwargs):
@@ -61,7 +63,7 @@ def query_node_by_id(profile, node_id, metric_name, averaged=False):
   nodes = _filter_averaged(result, averaged)
   nodes = _filter_averaged(result, averaged)
   metric = reduce(lambda x, y: x + y.find_metric_by_name(metric_name), nodes, [])
   metric = reduce(lambda x, y: x + y.find_metric_by_name(metric_name), nodes, [])
 
 
-  return map(lambda x: L(x['value'], x['unit'], 0, x['node'].fragment.id(), x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], unit=x['unit'], fragment_id=0, fid=x['node'].fragment.id(), host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']), metric)
+  return [L(x['value'], x['unit'], 0, x['node'].fragment.id(), x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], unit=x['unit'], fragment_id=0, fid=x['node'].fragment.id(), host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']) for x in metric]
 
 
 def query_node_by_id_value(profile, node_id, metric_name, averaged=False, default=0):
 def query_node_by_id_value(profile, node_id, metric_name, averaged=False, default=0):
   results = query_node_by_id(profile, node_id, metric_name, averaged)
   results = query_node_by_id(profile, node_id, metric_name, averaged)
@@ -81,7 +83,7 @@ def _filter_averaged(result, averaged=False):
         return 0
         return 0
     return sorted(result, cmp=by_averaged)
     return sorted(result, cmp=by_averaged)
   else:
   else:
-    return filter(lambda x: x.fragment.is_averaged() == averaged, result)
+    return [x for x in result if x.fragment.is_averaged() == averaged]
 
 
 def query_node_by_metric(profile, node_name, metric_name):
 def query_node_by_metric(profile, node_name, metric_name):
   """Given the query_id, searches for the corresponding query profile and
   """Given the query_id, searches for the corresponding query profile and
@@ -89,9 +91,9 @@ def query_node_by_metric(profile, node_name, metric_name):
   metric_name and groups by fragment and fragment instance."""
   metric_name and groups by fragment and fragment instance."""
 
 
   result = profile.find_all_by_name(node_name)
   result = profile.find_all_by_name(node_name)
-  nodes = filter(lambda x: x.fragment.is_averaged() == False, result)
+  nodes = [x for x in result if x.fragment.is_averaged() == False]
   metric = reduce(lambda x, y: x + y.find_metric_by_name(metric_name), nodes, [])
   metric = reduce(lambda x, y: x + y.find_metric_by_name(metric_name), nodes, [])
-  return map(lambda x: L(x['value'], 0, x['node'].fragment.id(), x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], unit=x['unit'], fragment_id=0, fid=x['node'].fragment.id(), host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']), metric)
+  return [L(x['value'], 0, x['node'].fragment.id(), x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], unit=x['unit'], fragment_id=0, fid=x['node'].fragment.id(), host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']) for x in metric]
 
 
 def query_element_by_metric(profile, node_name, metric_name):
 def query_element_by_metric(profile, node_name, metric_name):
   """Given the query_id, searches for the corresponding query profile and
   """Given the query_id, searches for the corresponding query profile and
@@ -99,9 +101,9 @@ def query_element_by_metric(profile, node_name, metric_name):
   metric_name and groups by fragment and fragment instance."""
   metric_name and groups by fragment and fragment instance."""
 
 
   result = profile.find_all_by_name(node_name)
   result = profile.find_all_by_name(node_name)
-  nodes = filter(lambda x: not x.fragment or x.fragment.is_averaged() == False, result)
+  nodes = [x for x in result if not x.fragment or x.fragment.is_averaged() == False]
   metric = reduce(lambda x, y: x + y.find_metric_by_name(metric_name), nodes, [])
   metric = reduce(lambda x, y: x + y.find_metric_by_name(metric_name), nodes, [])
-  return map(lambda x: L(x['value'], 0, x['node'].fragment.id() if x['node'].fragment else '', x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], unit=x['unit'], fragment_id=0, fid=x['node'].fragment.id() if x['node'].fragment else '', host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']), metric)
+  return [L(x['value'], 0, x['node'].fragment.id() if x['node'].fragment else '', x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], unit=x['unit'], fragment_id=0, fid=x['node'].fragment.id() if x['node'].fragment else '', host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']) for x in metric]
 
 
 def query_element_by_info(profile, node_name, metric_name):
 def query_element_by_info(profile, node_name, metric_name):
   """Given the query_id, searches for the corresponding query profile and
   """Given the query_id, searches for the corresponding query profile and
@@ -109,9 +111,9 @@ def query_element_by_info(profile, node_name, metric_name):
   metric_name and groups by fragment and fragment instance."""
   metric_name and groups by fragment and fragment instance."""
 
 
   result = profile.find_all_by_name(node_name)
   result = profile.find_all_by_name(node_name)
-  nodes = filter(lambda x: not x.fragment or x.fragment.is_averaged() == False, result)
+  nodes = [x for x in result if not x.fragment or x.fragment.is_averaged() == False]
   metric = reduce(lambda x, y: x + y.find_info_by_name(metric_name), nodes, [])
   metric = reduce(lambda x, y: x + y.find_info_by_name(metric_name), nodes, [])
-  return map(lambda x: L(x['value'], 0, x['node'].fragment.id() if x['node'].fragment else '', x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], fragment_id=0, fid=x['node'].fragment.id() if x['node'].fragment else '', host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']), metric)
+  return [L(x['value'], 0, x['node'].fragment.id() if x['node'].fragment else '', x['node'].host(), 0, x['node'].id(), x['node'].name(), value=x['value'], fragment_id=0, fid=x['node'].fragment.id() if x['node'].fragment else '', host=x['node'].host(), node_id=x['node'].id(), name=x['node'].name(), node=x['node']) for x in metric]
 
 
 def query_avg_fragment_metric_by_node_nid(profile, node_nid, metric_name, default):
 def query_avg_fragment_metric_by_node_nid(profile, node_nid, metric_name, default):
   """
   """
@@ -141,19 +143,19 @@ def query_fragment_metric_by_node_id(node, metric_name):
 
 
 def query_unique_node_by_id(profile, fragment_id, fragment_instance_id, node_id):
 def query_unique_node_by_id(profile, fragment_id, fragment_instance_id, node_id):
   result = profile.find_by_id(node_id)
   result = profile.find_by_id(node_id)
-  nodes = filter(lambda x: ((x.fragment is None and x.is_fragment()) or x.fragment.id() == fragment_id) and x.fragment_instance.id() == fragment_instance_id, result)
+  nodes = [x for x in result if ((x.fragment is None and x.is_fragment()) or x.fragment.id() == fragment_id) and x.fragment_instance.id() == fragment_instance_id]
   return nodes[0]
   return nodes[0]
 
 
 def host_by_metric(profile, metric_name, exprs=[max]):
 def host_by_metric(profile, metric_name, exprs=[max]):
   """Queries all fragment instances for a particular associated metric value.
   """Queries all fragment instances for a particular associated metric value.
   Calculates the aggregated value based on exprs."""
   Calculates the aggregated value based on exprs."""
   fragments = profile.find_all_fragments()
   fragments = profile.find_all_fragments()
-  fragments = filter(lambda x: x.is_averaged() == False, fragments)
+  fragments = [x for x in fragments if x.is_averaged() == False]
   metrics = reduce(lambda x,y: x + y.find_metric_by_name(metric_name), fragments, [])
   metrics = reduce(lambda x,y: x + y.find_metric_by_name(metric_name), fragments, [])
   results = L(unit=-1)
   results = L(unit=-1)
   for k, g in groupby(metrics, lambda x: x['node'].host()):
   for k, g in groupby(metrics, lambda x: x['node'].host()):
       grouped = list(g)
       grouped = list(g)
-      values = map(lambda x: x['value'], grouped)
+      values = [x['value'] for x in grouped]
       result = [k]
       result = [k]
       for expr in exprs:
       for expr in exprs:
         value = expr(values)
         value = expr(values)

+ 34 - 28
desktop/libs/libanalyze/src/libanalyze/rules.py

@@ -14,6 +14,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
+from __future__ import division
+from builtins import zip
+from builtins import range
+from builtins import object
+from past.utils import old_div
 import copy
 import copy
 import glob
 import glob
 import json
 import json
@@ -31,13 +36,14 @@ from libanalyze.utils import Timer
 from libanalyze import models
 from libanalyze import models
 from libanalyze import exprs
 from libanalyze import exprs
 from libanalyze import utils
 from libanalyze import utils
+from functools import reduce
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
 def to_double(metric_value):
 def to_double(metric_value):
     return struct.unpack('d', struct.pack('q', metric_value))[0]
     return struct.unpack('d', struct.pack('q', metric_value))[0]
 
 
-class ProfileContext:
+class ProfileContext(object):
     """This is the main wrapper around the runtime profile tree. Main accessor
     """This is the main wrapper around the runtime profile tree. Main accessor
     methods are implemented here."""
     methods are implemented here."""
 
 
@@ -50,10 +56,10 @@ class ProfileContext:
                 dtparse(node.info_strings["Start Time"])).total_seconds()
                 dtparse(node.info_strings["Start Time"])).total_seconds()
 
 
     def percentage_of_total(self, compare):
     def percentage_of_total(self, compare):
-        return compare / self.query_duration()
+        return old_div(compare, self.query_duration())
 
 
 
 
-class SQLOperatorReason:
+class SQLOperatorReason(object):
     def __init__(self, node_name, metric_names,
     def __init__(self, node_name, metric_names,
                  rule, exprs=[], to_json=True, **kwargs):
                  rule, exprs=[], to_json=True, **kwargs):
         self.node_name = node_name
         self.node_name = node_name
@@ -131,8 +137,8 @@ class SQLOperatorReason:
                 # max / min like exprs
                 # max / min like exprs
                 converted_exprs = self.check_exprs(grouped)
                 converted_exprs = self.check_exprs(grouped)
                 expr_vars = {
                 expr_vars = {
-                    "vars": dict(zip(self.exprs, map(lambda x: x[0], converted_exprs))),
-                    "idxs": dict(zip(self.exprs, map(lambda x: x[1], converted_exprs))),
+                    "vars": dict(list(zip(self.exprs, [x[0] for x in converted_exprs]))),
+                    "idxs": dict(list(zip(self.exprs, [x[1] for x in converted_exprs]))),
                 }
                 }
 
 
                 expr_val = exprs.Expr.evaluate(self.rule["expr"], expr_vars)
                 expr_val = exprs.Expr.evaluate(self.rule["expr"], expr_vars)
@@ -144,14 +150,14 @@ class SQLOperatorReason:
                 # Get the metric values from the db grouped by metric name
                 # Get the metric values from the db grouped by metric name
                 db_result = [models.query_node_by_id(profile, plan_node_id, m) for m in self.metric_names]
                 db_result = [models.query_node_by_id(profile, plan_node_id, m) for m in self.metric_names]
                 # Assuming that for all metric names the same number of rows have been returned transpose the array
                 # Assuming that for all metric names the same number of rows have been returned transpose the array
-                all_metrics = zip(*db_result)
+                all_metrics = list(zip(*db_result))
 
 
             for row in all_metrics:
             for row in all_metrics:
                 # Convert to double values if unit is 6(double)
                 # Convert to double values if unit is 6(double)
-                metric_values = map(lambda x: x.value if x.unit != 6 else to_double(x.value), row)
+                metric_values = [x.value if x.unit != 6 else to_double(x.value) for x in row]
 
 
                 surrogate_node = row[0].node
                 surrogate_node = row[0].node
-                local_vars = {"vars": dict(zip(self.metric_names, metric_values))}
+                local_vars = {"vars": dict(list(zip(self.metric_names, metric_values)))}
                 local_vars["vars"]["IOBound"] = self.isStorageBound(surrogate_node)
                 local_vars["vars"]["IOBound"] = self.isStorageBound(surrogate_node)
                 local_vars["vars"]['InputRows'] = self.getNumInputRows(surrogate_node)
                 local_vars["vars"]['InputRows'] = self.getNumInputRows(surrogate_node)
                 condition = True
                 condition = True
@@ -164,10 +170,10 @@ class SQLOperatorReason:
 
 
             if self.kwargs.get('info_names'):
             if self.kwargs.get('info_names'):
               db_result = [models.query_element_by_info(profile, plan_node_id, m) for m in self.kwargs['info_names']]
               db_result = [models.query_element_by_info(profile, plan_node_id, m) for m in self.kwargs['info_names']]
-              all_metrics = zip(*db_result)
+              all_metrics = list(zip(*db_result))
               for row in all_metrics:
               for row in all_metrics:
-                metric_values = map(lambda x: x.value, row)
-                local_vars['vars'].update(dict(zip(self.kwargs['info_names'], metric_values)))
+                metric_values = [x.value for x in row]
+                local_vars['vars'].update(dict(list(zip(self.kwargs['info_names'], metric_values))))
                 expr_data = exprs.Expr.evaluate(self.kwargs['fix']['data'], local_vars)
                 expr_data = exprs.Expr.evaluate(self.kwargs['fix']['data'], local_vars)
 
 
         return {
         return {
@@ -210,8 +216,8 @@ class SummaryReason(SQLOperatorReason):
                 # max / min like exprs
                 # max / min like exprs
                 converted_exprs = self.check_exprs(grouped)
                 converted_exprs = self.check_exprs(grouped)
                 expr_vars = {
                 expr_vars = {
-                    "vars": dict(zip(self.exprs, map(lambda x: x[0], converted_exprs))),
-                    "idxs": dict(zip(self.exprs, map(lambda x: x[1], converted_exprs))),
+                    "vars": dict(list(zip(self.exprs, [x[0] for x in converted_exprs]))),
+                    "idxs": dict(list(zip(self.exprs, [x[1] for x in converted_exprs]))),
                 }
                 }
 
 
                 expr_val = exprs.Expr.evaluate(self.rule["expr"], expr_vars)
                 expr_val = exprs.Expr.evaluate(self.rule["expr"], expr_vars)
@@ -223,13 +229,13 @@ class SummaryReason(SQLOperatorReason):
                 # Get the metric values from the db grouped by metric name
                 # Get the metric values from the db grouped by metric name
                 db_result = [models.query_element_by_metric(profile, 'Summary', m) for m in self.metric_names]
                 db_result = [models.query_element_by_metric(profile, 'Summary', m) for m in self.metric_names]
                 # Assuming that for all metric names the same number of rows have been returned transpose the array
                 # Assuming that for all metric names the same number of rows have been returned transpose the array
-                all_metrics = zip(*db_result)
+                all_metrics = list(zip(*db_result))
 
 
             for row in all_metrics:
             for row in all_metrics:
                 # Convert to double values if unit is 6(double)
                 # Convert to double values if unit is 6(double)
-                metric_values = map(lambda x: x.value if x.unit != 6 else to_double(x.value), row)
+                metric_values = [x.value if x.unit != 6 else to_double(x.value) for x in row]
 
 
-                local_vars = {"vars": dict(zip(self.metric_names, metric_values))}
+                local_vars = {"vars": dict(list(zip(self.metric_names, metric_values)))}
                 condition = True
                 condition = True
                 if ("condition" in self.rule):
                 if ("condition" in self.rule):
                     condition = exprs.Expr.evaluate(self.rule["condition"], local_vars)
                     condition = exprs.Expr.evaluate(self.rule["condition"], local_vars)
@@ -278,7 +284,7 @@ class JoinOrderStrategyCheck(SQLOperatorReason):
             rhsRows = buildRows * hosts
             rhsRows = buildRows * hosts
             lhsRows = probeRows * hosts
             lhsRows = probeRows * hosts
 
 
-        impact = (rhsRows - lhsRows * 1.5) / hosts / 0.01
+        impact = old_div((rhsRows - lhsRows * 1.5), hosts / 0.01)
         if (impact > 0):
         if (impact > 0):
             return {
             return {
                 "impact": impact,
                 "impact": impact,
@@ -288,7 +294,7 @@ class JoinOrderStrategyCheck(SQLOperatorReason):
 
 
         bcost = rhsRows * hosts
         bcost = rhsRows * hosts
         scost = lhsRows + rhsRows
         scost = lhsRows + rhsRows
-        impact = (networkcost - min(bcost, scost) - 1) / hosts / 0.01
+        impact = old_div((networkcost - min(bcost, scost) - 1), hosts / 0.01)
         return {
         return {
             "impact": impact,
             "impact": impact,
             "message": "RHS %d; LHS %d" % (rhsRows, lhsRows),
             "message": "RHS %d; LHS %d" % (rhsRows, lhsRows),
@@ -318,7 +324,7 @@ class ExplodingJoinCheck(SQLOperatorReason):
 
 
         impact = 0
         impact = 0
         if (rowsReturned > 0):
         if (rowsReturned > 0):
-            impact = probeTime * (rowsReturned - probeRows) / rowsReturned
+            impact = old_div(probeTime * (rowsReturned - probeRows), rowsReturned)
         return {
         return {
             "impact": impact,
             "impact": impact,
             "message": "%d input rows are exploded to %d output rows" % (probeRows, rowsReturned),
             "message": "%d input rows are exploded to %d output rows" % (probeRows, rowsReturned),
@@ -343,14 +349,14 @@ class NNRpcCheck(SQLOperatorReason):
         hdfsRawReadTime = models.query_node_by_id_value(profile, plan_node_id, "TotalRawHdfsReadTime(*)", True)
         hdfsRawReadTime = models.query_node_by_id_value(profile, plan_node_id, "TotalRawHdfsReadTime(*)", True)
         avgReadThreads = models.query_node_by_id_value(profile, plan_node_id, "AverageHdfsReadThreadConcurrency", True)
         avgReadThreads = models.query_node_by_id_value(profile, plan_node_id, "AverageHdfsReadThreadConcurrency", True)
         avgReadThreads = max(1, to_double(avgReadThreads))
         avgReadThreads = max(1, to_double(avgReadThreads))
-        impact = max(0, (totalStorageTime - hdfsRawReadTime) / avgReadThreads)
+        impact = max(0, old_div((totalStorageTime - hdfsRawReadTime), avgReadThreads))
         return {
         return {
             "impact": impact,
             "impact": impact,
             "message": "This is the time waiting for HDFS NN RPC.",
             "message": "This is the time waiting for HDFS NN RPC.",
             "label": "HDFS NN RPC"
             "label": "HDFS NN RPC"
         }
         }
 
 
-class TopDownAnalysis:
+class TopDownAnalysis(object):
 
 
     def __init__(self):
     def __init__(self):
         self.base_dir = os.path.join(os.path.dirname(__file__), "../..", "reasons")
         self.base_dir = os.path.join(os.path.dirname(__file__), "../..", "reasons")
@@ -364,7 +370,7 @@ class TopDownAnalysis:
                 type = json_object["type"]
                 type = json_object["type"]
                 node_names = json_object["node_name"]
                 node_names = json_object["node_name"]
                 nodes = node_names
                 nodes = node_names
-                if not isinstance(node_names, types.ListType):
+                if not isinstance(node_names, list):
                     nodes = [node_names]
                     nodes = [node_names]
                 if type == 'SQLOperator':
                 if type == 'SQLOperator':
                   for node in nodes:
                   for node in nodes:
@@ -455,13 +461,13 @@ class TopDownAnalysis:
         # Get the plan node execution time
         # Get the plan node execution time
         # Note: ignore DataStreamSender because its metrics is useless
         # Note: ignore DataStreamSender because its metrics is useless
         nodes = execution_profile.find_all_non_fragment_nodes()
         nodes = execution_profile.find_all_non_fragment_nodes()
-        nodes = filter(lambda x: x.fragment and x.fragment.is_averaged() == False, nodes)
-        nodes = filter(lambda x: x.name() != 'DataStreamSender', nodes)
+        nodes = [x for x in nodes if x.fragment and x.fragment.is_averaged() == False]
+        nodes = [x for x in nodes if x.name() != 'DataStreamSender']
         metrics = reduce(lambda x,y: x + y.find_metric_by_name('LocalTime'), nodes, [])
         metrics = reduce(lambda x,y: x + y.find_metric_by_name('LocalTime'), nodes, [])
         metrics = sorted(metrics, key=lambda x: (x['node'].id(), x['node'].name()))
         metrics = sorted(metrics, key=lambda x: (x['node'].id(), x['node'].name()))
         for k, g in groupby(metrics, lambda x: (x['node'].id(), x['node'].name())):
         for k, g in groupby(metrics, lambda x: (x['node'].id(), x['node'].name())):
             grouped = list(g)
             grouped = list(g)
-            metric_values = map(lambda x: x['value'], grouped)
+            metric_values = [x['value'] for x in grouped]
             metric = max(metric_values)
             metric = max(metric_values)
             contributor = models.Contributor(type="SQLOperator",
             contributor = models.Contributor(type="SQLOperator",
                                  wall_clock_time=metric,
                                  wall_clock_time=metric,
@@ -569,7 +575,7 @@ class TopDownAnalysis:
                   summary.val.counters.append(models.TCounter(name=sequence.get(event_name), value=event_duration, unit=5))
                   summary.val.counters.append(models.TCounter(name=sequence.get(event_name), value=event_duration, unit=5))
                   sequence.pop(event_name)
                   sequence.pop(event_name)
                 else:
                 else:
-                  for key, value in sequence.iteritems():
+                  for key, value in sequence.items():
                     if re.search(key, event_name, re.IGNORECASE):
                     if re.search(key, event_name, re.IGNORECASE):
                       summary.val.counters.append(models.TCounter(name=value, value=event_duration, unit=5))
                       summary.val.counters.append(models.TCounter(name=value, value=event_duration, unit=5))
                       sequence.pop(key)
                       sequence.pop(key)
@@ -577,9 +583,9 @@ class TopDownAnalysis:
 
 
                 duration = s.timestamps[i]
                 duration = s.timestamps[i]
 
 
-          for key, value in stats_mapping.get('Query Compilation').iteritems():
+          for key, value in stats_mapping.get('Query Compilation').items():
             summary.val.counters.append(models.TCounter(name=value, value=0, unit=5))
             summary.val.counters.append(models.TCounter(name=value, value=0, unit=5))
-          for key, value in stats_mapping.get('Query Timeline').iteritems():
+          for key, value in stats_mapping.get('Query Timeline').items():
             summary.val.counters.append(models.TCounter(name=value, value=0, unit=5))
             summary.val.counters.append(models.TCounter(name=value, value=0, unit=5))
 
 
           missing_stats = {}
           missing_stats = {}

+ 5 - 7
desktop/libs/libanalyze/src/libanalyze/utils.py

@@ -14,10 +14,12 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
+from builtins import range
+from builtins import object
 import time
 import time
 import re
 import re
 
 
-class Timer:
+class Timer(object):
     def __enter__(self):
     def __enter__(self):
         self.start = time.clock()
         self.start = time.clock()
         return self
         return self
@@ -31,14 +33,10 @@ def parse_exec_summary(summary_string):
     """Given an exec summary string parses the rows and organizes it by node id"""
     """Given an exec summary string parses the rows and organizes it by node id"""
     cleaned = [re.sub(r'^[-|\s]+', "", m)
     cleaned = [re.sub(r'^[-|\s]+', "", m)
                for m in summary_string.split("\n")[3:]]
                for m in summary_string.split("\n")[3:]]
-    cleaned = map(
-        lambda x: map(
-            lambda y: y.strip(),
-            re.split(
+    cleaned = [[y.strip() for y in re.split(
                 '\s\s+',
                 '\s\s+',
                 x,
                 x,
-                maxsplit=8)),
-        cleaned)
+                maxsplit=8)] for x in cleaned]
     result = {}
     result = {}
     for c in cleaned:
     for c in cleaned:
         # Key 0 is id and type
         # Key 0 is id and type