Przeglądaj źródła

[desktop] Initial commit of pyformance for metrics

Erick Tryzelaar 10 lat temu
rodzic
commit
1dbfc3ec4a
22 zmienionych plików z 1307 dodań i 0 usunięć
  1. 13 0
      desktop/core/ext-py/pyformance-0.3.2/PKG-INFO
  2. 6 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/__init__.py
  3. 1 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/__version__.py
  4. 5 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/__init__.py
  5. 31 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/counter.py
  6. 57 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/gauge.py
  7. 93 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/histogram.py
  8. 58 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/meter.py
  9. 119 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/timer.py
  10. 370 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/registry.py
  11. 28 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/__init__.py
  12. 68 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/carbon_reporter.py
  13. 39 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/console_reporter.py
  14. 49 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/csv_reporter.py
  15. 47 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/hosted_graphite_reporter.py
  16. 45 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/reporter.py
  17. 3 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/stats/__init__.py
  18. 69 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/stats/moving_average.py
  19. 112 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/stats/samples.py
  20. 59 0
      desktop/core/ext-py/pyformance-0.3.2/pyformance/stats/snapshot.py
  21. 5 0
      desktop/core/ext-py/pyformance-0.3.2/setup.cfg
  22. 30 0
      desktop/core/ext-py/pyformance-0.3.2/setup.py

+ 13 - 0
desktop/core/ext-py/pyformance-0.3.2/PKG-INFO

@@ -0,0 +1,13 @@
+Metadata-Version: 1.1
+Name: pyformance
+Version: 0.3.2
+Summary: Performance metrics, based on Coda Hale's Yammer metrics
+Home-page: UNKNOWN
+Author: Omer Getrel
+Author-email: omer.gertel@gmail.com
+License: Apache 2.0
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 2.7

+ 6 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/__init__.py

@@ -0,0 +1,6 @@
+__import__('pkg_resources').declare_namespace(__name__)
+
+from .registry import MetricsRegistry, global_registry, set_global_registry
+from .registry import timer, counter, meter, histogram, gauge
+from .registry import dump_metrics, clear, count_calls, meter_calls, hist_calls, time_calls
+from .meters.timer import call_too_long

+ 1 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/__version__.py

@@ -0,0 +1 @@
+__version__ = "0.3.2"

+ 5 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/__init__.py

@@ -0,0 +1,5 @@
+from .counter import Counter
+from .meter import Meter
+from .histogram import Histogram
+from .timer import Timer
+from .gauge import Gauge, CallbackGauge, SimpleGauge

+ 31 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/counter.py

@@ -0,0 +1,31 @@
+from threading import Lock
+
+
+class Counter(object):
+
+    """
+    An incrementing and decrementing metric
+    """
+
+    def __init__(self):
+        super(Counter, self).__init__()
+        self.lock = Lock()
+        self.counter = 0
+
+    def inc(self, val=1):
+        "increment counter by val (default is 1)"
+        with self.lock:
+            self.counter = self.counter + val
+
+    def dec(self, val=1):
+        "decrement counter by val (default is 1)"
+        self.inc(-val)
+
+    def get_count(self):
+        "return current value of counter"
+        return self.counter
+
+    def clear(self):
+        "reset counter to 0"
+        with self.lock:
+            self.counter = 0

+ 57 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/gauge.py

@@ -0,0 +1,57 @@
+class Gauge(object):
+
+    """
+    A base class for reading of a particular.
+    
+    For example, to instrument a queue depth:
+    
+    class QueueLengthGaguge(Gauge):
+        def __init__(self, queue):
+            super(QueueGaguge, self).__init__()
+            self.queue = queue
+        
+        def get_value(self):
+            return len(self.queue)
+    
+    """
+
+    def get_value(self):
+        "A subclass of Gauge should implement this method"
+        raise NotImplementedError()
+
+
+class CallbackGauge(Gauge):
+
+    """
+    A Gauge reading for a given callback
+    """
+
+    def __init__(self, callback):
+        "constructor expects a callable"
+        super(CallbackGauge, self).__init__()
+        self.callback = callback
+
+    def get_value(self):
+        "returns the result of callback which is executed each time"
+        return self.callback()
+
+
+class SimpleGauge(Gauge):
+
+    """
+    A gauge which holds values with simple getter- and setter-interface
+    """
+
+    def __init__(self, value=float("nan")):
+        "constructor accepts initial value"
+        super(SimpleGauge, self).__init__()
+        self._value = value
+
+    def get_value(self):
+        "getter returns current value"
+        return self._value
+
+    def set_value(self, value):
+        "setter changes current value"
+        # XXX: add locking?
+        self._value = value

+ 93 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/histogram.py

@@ -0,0 +1,93 @@
+import time
+import math
+from threading import Lock
+from ..stats. samples import ExpDecayingSample, DEFAULT_SIZE, DEFAULT_ALPHA
+
+
+class Histogram(object):
+
+    """
+    A metric which calculates the distribution of a value.
+    """
+
+    def __init__(self, size=DEFAULT_SIZE, alpha=DEFAULT_ALPHA, clock=time):
+        """
+        Creates a new instance of a L{Histogram}.
+        """
+        super(Histogram, self).__init__()
+        self.lock = Lock()
+        self.clock = clock
+        self.sample = ExpDecayingSample(size, alpha, clock)
+        self.clear()
+
+    def add(self, value):
+        """
+        Add value to histogram
+        
+        :type value: float
+        """
+        with self.lock:
+            self.sample.update(value)
+            self.counter = self.counter + 1
+            self.max = value if value > self.max else self.max
+            self.min = value if value < self.min else self.min
+            self.sum = self.sum + value
+            self._update_var(value)
+
+    def clear(self):
+        "reset histogram to initial state"
+        with self.lock:
+            self.sample.clear()
+            self.counter = 0.0
+            self.max = -2147483647.0
+            self.min = 2147483647.0
+            self.sum = 0.0
+            self.var = [-1.0, 0.0]
+
+    def get_count(self):
+        "get current value of counter"
+        return self.counter
+
+    def get_sum(self):
+        "get current sum"
+        return self.sum
+
+    def get_max(self):
+        "get current maximum"
+        return self.max
+
+    def get_min(self):
+        "get current minimum"
+        return self.min
+
+    def get_mean(self):
+        "get current mean"
+        if self.counter > 0:
+            return self.sum / self.counter
+        return 0
+
+    def get_stddev(self):
+        "get current standard deviation"
+        if self.counter > 0:
+            return math.sqrt(self.get_var())
+        return 0
+
+    def get_var(self):
+        "get current variance"
+        if self.counter > 1:
+            return self.var[1] / (self.counter - 1)
+        return 0
+
+    def get_snapshot(self):
+        "get snapshot instance which holds the percentiles"
+        return self.sample.get_snapshot()
+
+    def _update_var(self, value):
+        old_m, old_s = self.var
+        new_m, new_s = [0.0, 0.0]
+        if old_m == -1:
+            new_m = value
+        else:
+            new_m = old_m + ((value - old_m) / self.counter)
+            new_s = old_s + ((value - old_m) * (value - new_m))
+        self.var = [new_m, new_s]

+ 58 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/meter.py

@@ -0,0 +1,58 @@
+import time
+from threading import Lock
+from ..stats.moving_average import ExpWeightedMovingAvg
+
+
+class Meter(object):
+
+    """
+    A meter metric which measures mean throughput and one-, five-, and fifteen-minute
+    exponentially-weighted moving average throughputs.
+    """
+
+    def __init__(self, clock=time):
+        super(Meter, self).__init__()
+        self.lock = Lock()
+        self.clock = clock
+        self.clear()
+
+    def clear(self):
+        with self.lock:
+            self.start_time = self.clock.time()
+            self.counter = 0.0
+            self.m1rate = ExpWeightedMovingAvg(period=1, clock=self.clock)
+            self.m5rate = ExpWeightedMovingAvg(period=5, clock=self.clock)
+            self.m15rate = ExpWeightedMovingAvg(period=15, clock=self.clock)
+
+    def get_one_minute_rate(self):
+        return self.m1rate.get_rate()
+
+    def get_five_minute_rate(self):
+        return self.m5rate.get_rate()
+
+    def get_fifteen_minute_rate(self):
+        return self.m15rate.get_rate()
+
+    def tick(self):
+        self.m1rate.tick()
+        self.m5rate.tick()
+        self.m15rate.tick()
+
+    def mark(self, value=1):
+        with self.lock:
+            self.counter += value
+            self.m1rate.add(value)
+            self.m5rate.add(value)
+            self.m15rate.add(value)
+
+    def get_count(self):
+        return self.counter
+
+    def get_mean_rate(self):
+        if self.counter == 0:
+            return 0
+        elapsed = self.clock.time() - self.start_time
+        return self.counter / elapsed
+
+    def _convertNsRate(self, ratePerNs):
+        return ratePerNs

+ 119 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/meters/timer.py

@@ -0,0 +1,119 @@
+import time
+try:
+    from blinker import Namespace
+except ImportError:
+    Namespace = None
+from .histogram import Histogram, DEFAULT_SIZE, DEFAULT_ALPHA
+from .meter import Meter
+
+if Namespace is not None:
+    timer_signals = Namespace()
+    call_too_long = timer_signals.signal("call_too_long")
+else:
+    call_too_long = None
+
+
+class Timer(object):
+
+    """
+    A timer metric which aggregates timing durations and provides duration statistics, plus
+    throughput statistics via Meter and Histogram.
+      
+    """
+
+    def __init__(self, threshold=None, size=DEFAULT_SIZE, alpha=DEFAULT_ALPHA, clock=time):
+        super(Timer, self).__init__()
+        self.meter = Meter(clock=clock)
+        self.hist = Histogram(clock=clock)
+        self.threshold = threshold
+
+    def get_count(self):
+        "get count from internal histogram"
+        return self.hist.get_count()
+
+    def get_sum(self):
+        "get sum from internal histogram"
+        return self.hist.get_sum()
+
+    def get_max(self):
+        "get max from internal histogram"
+        return self.hist.get_max()
+
+    def get_min(self):
+        "get min from internal histogram"
+        return self.hist.get_min()
+
+    def get_mean(self):
+        "get mean from internal histogram"
+        return self.hist.get_mean()
+
+    def get_stddev(self):
+        "get stddev from internal histogram"
+        return self.hist.get_stddev()
+
+    def get_var(self):
+        "get var from internal histogram"
+        return self.hist.get_var()
+
+    def get_snapshot(self):
+        "get snapshot from internal histogram"
+        return self.hist.get_snapshot()
+
+    def get_mean_rate(self):
+        "get mean rate from internal meter"
+        return self.meter.get_mean_rate()
+
+    def get_one_minute_rate(self):
+        "get 1 minut rate from internal meter"
+        return self.meter.get_one_minute_rate()
+
+    def get_five_minute_rate(self):
+        "get 5 minute rate from internal meter"
+        return self.meter.get_five_minute_rate()
+
+    def get_fifteen_minute_rate(self):
+        "get 15 rate from internal meter"
+        return self.meter.get_fifteen_minute_rate()
+
+    def _update(self, seconds):
+        if seconds >= 0:
+            self.hist.add(seconds)
+            self.meter.mark()
+
+    def time(self, *args, **kwargs):
+        """
+        Parameters will be sent to signal, if fired.
+        Returns a timer context instance which can be used from a with-statement.
+        Without with-statement you have to call the stop method on the context
+        """
+        return TimerContext(self, self.meter.clock, *args, **kwargs)
+
+    def clear(self):
+        "clear internal histogram and meter"
+        self.hist.clear()
+        self.meter.clear()
+
+
+class TimerContext(object):
+
+    def __init__(self, timer, clock, *args, **kwargs):
+        super(TimerContext, self).__init__()
+        self.clock = clock
+        self.timer = timer
+        self.start_time = self.clock.time()
+        self.kwargs = kwargs
+        self.args = args
+
+    def stop(self):
+        elapsed = self.clock.time() - self.start_time
+        self.timer._update(elapsed)
+        if self.timer.threshold and self.timer.threshold < elapsed and call_too_long is not None:
+            call_too_long.send(
+                self.timer, elapsed=elapsed, *self.args, **self.kwargs)
+        return elapsed
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, t, v, tb):
+        self.stop()

+ 370 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/registry.py

@@ -0,0 +1,370 @@
+import functools
+import re
+import time
+import sys
+from .meters import Counter, Histogram, Meter, Timer, Gauge, CallbackGauge, SimpleGauge
+
+
+class MetricsRegistry(object):
+
+    """
+    A single interface used to gather metrics on a service. It keeps track of
+    all the relevant Counters, Meters, Histograms, and Timers. It does not have
+    a reference back to its service. The service would create a
+    L{MetricsRegistry} to manage all of its metrics tools.
+    """
+    def __init__(self, clock = time):
+        """
+        Creates a new L{MetricsRegistry} instance.
+        """
+        self._timers = {}
+        self._meters = {}
+        self._counters = {}
+        self._histograms = {}
+        self._gauges = {}
+        self._clock = clock
+
+    def add(self, key, metric):
+        """
+        Use this method to manually add custom metric instances to the registry
+        which are not created with their constructor's default arguments,
+        e.g. Histograms with a different size.
+        
+        :param key: name of the metric
+        :type key: C{str}
+        :param metric: instance of Histogram, Meter, Gauge, Timer or Counter
+        """
+        class_map = { 
+           Histogram: self._histograms,
+           Meter: self._meters,
+           Gauge: self.gauges,
+           Timer: self._timers,
+           Counter: self._counters,
+        }
+        for cls, registry in class_map.iteritems():
+            if isinstance(metric, cls):
+                if key in registry:
+                    raise LookupError("Metric %r already registered" % key)
+                registry[key] = registry
+                return
+        raise TypeError("Invalid class. Could not register metric %r" % key)
+
+    def counter(self, key):
+        """
+        Gets a counter based on a key, creates a new one if it does not exist.
+
+        :param key: name of the metric
+        :type key: C{str}
+
+        :return: L{Counter}
+        """
+        if key not in self._counters:
+            self._counters[key] = Counter()
+        return self._counters[key]
+
+    def histogram(self, key):
+        """
+        Gets a histogram based on a key, creates a new one if it does not exist.
+
+        :param key: name of the metric
+        :type key: C{str}
+
+        :return: L{Histogram}
+        """
+        if key not in self._histograms:
+            self._histograms[key] = Histogram(clock=self._clock)
+        return self._histograms[key]
+
+    def gauge(self, key, gauge=None, default=float("nan")):
+        if key not in self._gauges:
+            if gauge is None:
+                gauge = SimpleGauge(
+                    default)  # raise TypeError("gauge required for registering")
+            elif not isinstance(gauge, Gauge):
+                if not callable(gauge):
+                    raise TypeError("gauge getter not callable")
+                gauge = CallbackGauge(gauge)
+            self._gauges[key] = gauge
+        return self._gauges[key]
+
+    def meter(self, key):
+        """
+        Gets a meter based on a key, creates a new one if it does not exist.
+
+        :param key: name of the metric
+        :type key: C{str}
+
+        :return: L{Meter}
+        """
+        if key not in self._meters:
+            self._meters[key] = Meter(clock=self._clock)
+        return self._meters[key]
+
+    def timer(self, key):
+        """
+        Gets a timer based on a key, creates a new one if it does not exist.
+
+        :param key: name of the metric
+        :type key: C{str}
+
+        :return: L{Timer}
+        """
+        if key not in self._timers:
+            self._timers[key] = Timer(clock=self._clock)
+        return self._timers[key]
+
+    def clear(self):
+        self._meters.clear()
+        self._counters.clear()
+        self._gauges.clear()
+        self._timers.clear()
+        self._histograms.clear()
+
+    def _get_counter_metrics(self, key):
+        if key in self._counters:
+            counter = self._counters[key]
+            return {"count": counter.get_count()}
+        return {}
+
+    def _get_gauge_metrics(self, key):
+        if key in self._gauges:
+            gauge = self._gauges[key]
+            return {"value": gauge.get_value()}
+        return {}
+
+    def _get_histogram_metrics(self, key):
+        if key in self._histograms:
+            histogram = self._histograms[key]
+            snapshot = histogram.get_snapshot()
+            res = {"avg": histogram.get_mean(),
+                   "count": histogram.get_count(),
+                   "max": histogram.get_max(),
+                   "min": histogram.get_min(),
+                   "std_dev": histogram.get_stddev(),
+                   "75_percentile": snapshot.get_75th_percentile(),
+                   "95_percentile": snapshot.get_95th_percentile(),
+                   "99_percentile": snapshot.get_99th_percentile(),
+                   "999_percentile": snapshot.get_999th_percentile()}
+            return res
+        return {}
+
+    def _get_meter_metrics(self, key):
+        if key in self._meters:
+            meter = self._meters[key]
+            res = {"count": meter.get_count(),
+                   "15m_rate": meter.get_fifteen_minute_rate(),
+                   "5m_rate": meter.get_five_minute_rate(),
+                   "1m_rate": meter.get_one_minute_rate(),
+                   "mean_rate": meter.get_mean_rate()}
+            return res
+        return {}
+
+    def _get_timer_metrics(self, key):
+        if key in self._timers:
+            timer = self._timers[key]
+            snapshot = timer.get_snapshot()
+            res = {"avg": timer.get_mean(),
+                   "sum": timer.get_sum(),
+                   "count": timer.get_count(),
+                   "max": timer.get_max(),
+                   "min": timer.get_min(),
+                   "std_dev": timer.get_stddev(),
+                   "15m_rate": timer.get_fifteen_minute_rate(),
+                   "5m_rate": timer.get_five_minute_rate(),
+                   "1m_rate": timer.get_one_minute_rate(),
+                   "mean_rate": timer.get_mean_rate(),
+                   "75_percentile": snapshot.get_75th_percentile(),
+                   "95_percentile": snapshot.get_95th_percentile(),
+                   "99_percentile": snapshot.get_99th_percentile(),
+                   "999_percentile": snapshot.get_999th_percentile()}
+            return res
+        return {}
+
+    def get_metrics(self, key):
+        """
+        Gets all the metrics for a specified key.
+
+        :param key: name of the metric
+        :type key: C{str}
+
+        :return: C{dict}
+        """
+        metrics = {}
+        for getter in (self._get_counter_metrics, self._get_histogram_metrics,
+                       self._get_meter_metrics, self._get_timer_metrics,
+                       self._get_gauge_metrics):
+            metrics.update(getter(key))
+        return metrics
+
+    def dump_metrics(self):
+        """
+        Formats all of the metrics and returns them as a dict.
+
+        :return: C{list} of C{dict} of metrics
+        """
+        metrics = {}
+        for metric_type in (self._counters,
+                            self._histograms,
+                            self._meters,
+                            self._timers,
+                            self._gauges):
+            for key in metric_type.keys():
+                metrics[key] = self.get_metrics(key)
+
+        return metrics
+
+
+class RegexRegistry(MetricsRegistry):
+
+    """
+    A single interface used to gather metrics on a service. This class uses a regex to combine
+    measures that match a pattern. For example, if you have a REST API, instead of defining
+    a timer for each method, you can use a regex to capture all API calls and group them.
+    A pattern like '^/api/(?P<model>)/\d+/(?P<verb>)?$' will group and measure the following:
+        /api/users/1 -> users
+        /api/users/1/edit -> users/edit
+        /api/users/2/edit -> users/edit
+    """
+    def __init__(self, pattern = None, clock = time):
+        super(RegexRegistry, self).__init__(clock)
+        if pattern is not None:
+            self.pattern = re.compile(pattern)
+        else:
+            self.pattern = re.compile('^$')
+
+    def _get_key(self, key):
+        matches = self.pattern.finditer(key)
+        key = '/'.join((v for match in matches for v in match.groups() if v))
+        return key
+
+    def timer(self, key):
+        return super(RegexRegistry, self).timer(self._get_key(key))
+
+    def histogram(self, key):
+        return super(RegexRegistry, self).histogram(self._get_key(key))
+
+    def counter(self, key):
+        return super(RegexRegistry, self).counter(self._get_key(key))
+
+    def gauge(self, key, gauge=None):
+        return super(RegexRegistry, self).gauge(self._get_key(key), gauge)
+
+    def meter(self, key):
+        return super(RegexRegistry, self).meter(self._get_key(key))
+
+
+_global_registry = MetricsRegistry()
+
+
+def global_registry():
+    return _global_registry
+
+
+def set_global_registry(registry):
+    global _global_registry
+    _global_registry = registry
+
+
+def counter(key):
+    return _global_registry.counter(key)
+
+
+def histogram(key):
+    return _global_registry.histogram(key)
+
+
+def meter(key):
+    return _global_registry.meter(key)
+
+
+def timer(key):
+    return _global_registry.timer(key)
+
+
+def gauge(key, gauge=None):
+    return _global_registry.gauge(key, gauge)
+
+
+def dump_metrics():
+    return _global_registry.dump_metrics()
+
+
+def clear():
+    return _global_registry.clear()
+
+def get_qualname(obj):
+    if sys.version_info[0] > 2:
+        return obj.__qualname__
+    return obj.__name__
+
+def count_calls(fn):
+    """
+    Decorator to track the number of times a function is called.
+
+    :param fn: the function to be decorated
+    :type fn: C{func}
+
+    :return: the decorated function
+    :rtype: C{func}
+    """
+    @functools.wraps(fn)
+    def wrapper(*args, **kwargs):
+        counter("%s_calls" % get_qualname(fn)).inc()
+        return fn(*args, **kwargs)
+    return wrapper
+
+
+def meter_calls(fn):
+    """
+    Decorator to the rate at which a function is called.
+
+    :param fn: the function to be decorated
+    :type fn: C{func}
+
+    :return: the decorated function
+    :rtype: C{func}
+    """
+    @functools.wraps(fn)
+    def wrapper(*args, **kwargs):
+        meter("%s_calls" % get_qualname(fn)).mark()
+        return fn(*args, **kwargs)
+    return wrapper
+
+
+def hist_calls(fn):
+    """
+    Decorator to check the distribution of return values of a function.
+
+    :param fn: the function to be decorated
+    :type fn: C{func}
+
+    :return: the decorated function
+    :rtype: C{func}
+    """
+    @functools.wraps(fn)
+    def wrapper(*args, **kwargs):
+        _histogram = histogram("%s_calls" % get_qualname(fn))
+        rtn = fn(*args, **kwargs)
+        if type(rtn) in (int, float):
+            _histogram.update(rtn)
+        return rtn
+    return wrapper
+
+
+def time_calls(fn):
+    """
+    Decorator to time the execution of the function.
+
+    :param fn: the function to be decorated
+    :type fn: C{func}
+
+    :return: the decorated function
+    :rtype: C{func}
+    """
+    @functools.wraps(fn)
+    def wrapper(*args, **kwargs):
+        _timer = timer("%s_calls" % get_qualname(fn))
+        with _timer.time(fn = get_qualname(fn)):
+            return fn(*args, **kwargs)
+    return wrapper
+

+ 28 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/__init__.py

@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+# lazy import reporters to minimize startup time
+
+
+def HostedGraphiteReporter(*args, **kwargs):
+    from .hosted_graphite_reporter import HostedGraphiteReporter as cls
+    return cls(*args, **kwargs)
+
+
+def CarbonReporter(*args, **kwargs):
+    from .carbon_reporter import CarbonReporter as cls
+    return cls(*args, **kwargs)
+
+
+def UdpCarbonReporter(*args, **kwargs):
+    from .carbon_reporter import UdpCarbonReporter as cls
+    return cls(*args, **kwargs)
+
+
+def ConsoleReporter(*args, **kwargs):
+    from .console_reporter import ConsoleReporter as cls
+    return cls(*args, **kwargs)
+
+
+def CsvReporter(*args, **kwargs):
+    from .csv_reporter import CsvReporter as cls
+    return cls(*args, **kwargs)

+ 68 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/carbon_reporter.py

@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+import socket
+import sys
+
+from .reporter import Reporter
+
+DEFAULT_CARBON_SERVER = '0.0.0.0'
+DEFAULT_CARBON_PORT = 2003
+
+
+class CarbonReporter(Reporter):
+
+    """
+    Carbon is the network daemon to collect metrics for Graphite
+    """
+
+    def __init__(self, registry=None, reporting_interval=5, prefix="",
+                 server=DEFAULT_CARBON_SERVER, port=DEFAULT_CARBON_PORT, socket_factory=socket.socket,
+                 clock=None):
+        super(CarbonReporter, self).__init__(
+            registry, reporting_interval, clock)
+        self.prefix = prefix
+        self.server = server
+        self.port = port
+        self.socket_factory = socket_factory
+
+    def report_now(self, registry=None, timestamp=None):
+        metrics = self._collect_metrics(registry or self.registry, timestamp)
+        if metrics:
+            # XXX: keep connection open 
+            sock = self.socket_factory()
+            sock.connect((self.server, self.port))
+
+            if sys.version_info[0] > 2:
+                sock.sendall(metrics.encode())
+            else:
+                sock.sendall(metrics)
+
+            sock.close()
+
+    def _collect_metrics(self, registry, timestamp=None):
+        timestamp = timestamp or int(round(self.clock.time()))
+        metrics = registry.dump_metrics()
+        metrics_data = []
+        for key in metrics.keys():
+            for value_key in metrics[key].keys():
+                metricLine = "%s%s.%s %s %s\n" % (
+                    self.prefix, key, value_key, metrics[key][value_key], timestamp)
+                metrics_data.append(metricLine)
+        return ''.join(metrics_data)
+
+    
+class UdpCarbonReporter(CarbonReporter):
+    
+    """
+    The default CarbonReporter uses TCP.
+    This sub-class uses UDP instead which might be unreliable but it is faster
+    """
+    
+    def report_now(self, registry=None, timestamp=None):
+        metrics = self._collect_metrics(registry or self.registry, timestamp)
+        if metrics:
+            sock = self.socket_factory(socket.AF_INET, socket.SOCK_DGRAM)
+
+            if sys.version_info[0] > 2:
+                sock.sendto(metrics.encode(), (self.server, self.port))
+            else:
+                sock.sendto(metrics, (self.server, self.port))

+ 39 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/console_reporter.py

@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+from __future__ import print_function
+import sys
+import datetime
+from .reporter import Reporter
+
+
+class ConsoleReporter(Reporter):
+
+    """
+    Show metrics in a human readable form.
+    This is useful for debugging if you want to read the current state on the console.
+    """
+
+    def __init__(self, registry=None, reporting_interval=30, stream=sys.stderr, clock=None):
+        super(ConsoleReporter, self).__init__(
+            registry, reporting_interval, clock)
+        self.stream = stream
+
+    def report_now(self, registry=None, timestamp=None):
+        metrics = self._collect_metrics(registry or self.registry, timestamp)
+        for line in metrics:
+            print(line, file=self.stream)
+
+    def _collect_metrics(self, registry, timestamp=None):
+        timestamp = timestamp or int(round(self.clock.time()))
+        dt = datetime.datetime(1970, 1, 1) + \
+            datetime.timedelta(seconds=timestamp)
+        metrics = registry.dump_metrics()
+        metrics_data = ["== %s ===================================" %
+                        dt.strftime("%Y-%m-%d %H:%M:%S")]
+        for key in metrics.keys():
+            values = metrics[key]
+            metrics_data.append("%s:" % key)
+            for value_key in values.keys():
+                metrics_data.append(
+                    "%20s = %s" % (value_key, values[value_key]))
+        metrics_data.append("")
+        return metrics_data

+ 49 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/csv_reporter.py

@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+import sys
+import os
+import datetime
+from .reporter import Reporter
+
+
+class CsvReporter(Reporter):
+
+    """
+    Show metrics in comma-separated-files.
+    Each metrics gets its own file
+    """
+
+    def __init__(self, registry=None, reporting_interval=30, path=None, separator="\t", clock=None):
+        super(CsvReporter, self).__init__(
+            registry, reporting_interval, clock)
+        self.path = path or os.getcwd()
+        if not os.path.exists(self.path):
+            os.makedirs(self.path)
+        self.separator = separator
+        self.files = {}
+
+    def report_now(self, registry=None, timestamp=None):
+        self._save_metrics(registry or self.registry, timestamp)
+
+    def _save_metrics(self, registry, timestamp=None):
+        timestamp = timestamp or int(round(self.clock.time()))
+        dt = datetime.datetime(1970, 1, 1) + \
+            datetime.timedelta(seconds=timestamp)
+        date = dt.strftime("%Y-%m-%d %H:%M:%S")
+        metrics = registry.dump_metrics()
+        for key in metrics.keys():
+            values = metrics[key]
+            value_keys = list(sorted(values.keys()))
+            target = os.path.join(self.path, "%s.csv" % key)
+            f = self.files.get(target, None)
+            if f is None:
+                if not os.path.exists(target):
+                    f = open(target, "w")
+                    f.write("%s\n" % self.separator.join(["timestamp"] + value_keys))
+                else:
+                    f = open(target, "a")
+                self.files[target] = f
+            cols = [date]
+            for vk in value_keys:
+                cols.append(values[vk])
+            f.write("%s\n" % self.separator.join(map(str, cols)))
+            f.flush()

+ 47 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/hosted_graphite_reporter.py

@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+from __future__ import print_function
+import sys
+import urllib2
+import base64
+
+from .meters import Counter, Histogram, Meter, Timer
+from .registry import MetricsRegistry
+from .reporter import Reporter
+
+
+class HostedGraphiteReporter(Reporter):
+
+    """
+    This reporter requires an api key from http://www.hostedgraphite.com
+    """
+
+    def __init__(
+        self, hosted_graphite_api_key, registry=None, reporting_interval=10, url="https://hostedgraphite.com/api/v1/sink",
+            clock=None):
+        super(HostedGraphiteReporter, self).__init__(
+            registry, reporting_interval, clock)
+        self.url = url
+        self.api_key = hosted_graphite_api_key
+
+    def report_now(self, registry=None, timestamp=None):
+        metrics = self._collect_metrics(registry or self._registry, timestamp)
+        if metrics:
+            try:
+                # XXX: better use http-keepalive/pipelining somehow?
+                request = urllib2.Request(self.url, metrics)
+                request.add_header("Authorization", "Basic %s" %
+                                   base64.encodestring(self.api_key).strip())
+                result = urllib2.urlopen(request)
+            except Exception as e:
+                print(e, file=sys.stderr)
+
+    def _collect_metrics(self, registry, timestamp=None):
+        timestamp = timestamp or int(round(self.clock.time()))
+        metrics = registry.dump_metrics()
+        metrics_data = []
+        for key in metrics.keys():
+            for value_key in metrics[key].keys():
+                metric_line = "%s.%s %s %s\n" % (
+                    key, value_key, metrics[key][value_key], timestamp)
+                metrics_data.append(metric_line)
+        return ''.join(metrics_data)

+ 45 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/reporters/reporter.py

@@ -0,0 +1,45 @@
+import time
+from threading import Thread, Event
+from ..registry import global_registry, get_qualname
+
+
+class Reporter(object):
+
+    def create_thread(self):
+        # noinspection PyAttributeOutsideInit
+        self._loop_thread = Thread(target=self._loop, name="pyformance reporter {0}".format(get_qualname(type(self))))
+        self._loop_thread.setDaemon(True)
+
+    def __init__(self, registry=None, reporting_interval=30, clock=None):
+        self.registry = registry or global_registry()
+        self.reporting_interval = reporting_interval
+        self.clock = clock or time
+        self._stopped = Event()
+        self.create_thread()
+
+    def start(self):
+        if self._stopped.is_set():
+            return False
+
+        r = str(self._loop_thread)
+        if "stopped" in r:
+            # has to be recreated in a celery worker
+            self.create_thread()
+        elif "started" in r:
+            # already started
+            return False
+
+        self._loop_thread.start()
+        return True
+
+    def stop(self):
+        self._stopped.set()
+
+    def _loop(self):
+        while not self._stopped.is_set():
+            self.report_now(self.registry)
+            time.sleep(self.reporting_interval)
+        # self._stopped.clear()
+
+    def report_now(self, registry=None, timestamp=None):
+        raise NotImplementedError(self.report_now)

+ 3 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/stats/__init__.py

@@ -0,0 +1,3 @@
+from .samples import ExpDecayingSample
+from .moving_average import ExpWeightedMovingAvg
+from .snapshot import Snapshot

+ 69 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/stats/moving_average.py

@@ -0,0 +1,69 @@
+import math
+import time
+
+
+class ExpWeightedMovingAvg(object):
+
+    """
+    An exponentially-weighted moving average.
+    """
+    INTERVAL = 5.0  # seconds
+    SECONDS_PER_MINUTE = 60.0
+
+    def __init__(self, period, interval=INTERVAL, clock=time):
+        """
+        Create a new EWMA with a specific smoothing constant.
+
+        :type period: C{int}
+        :param period: the time in minutes it takes to reach a given significance level
+        :type interval: C{int}
+        :param interval: the expected tick interval, defaults to 5s
+        """
+        super(ExpWeightedMovingAvg, self).__init__()
+        self.clock = clock
+        self.uncounted = 0.0
+        self.interval = interval
+        self.rate = -1
+        self.period = period * ExpWeightedMovingAvg.SECONDS_PER_MINUTE
+        self.last_tick = self.clock.time()
+
+    def get_rate(self):
+        if self.clock.time() - self.last_tick >= self.interval:
+            self.tick()
+        if self.rate >= 0:
+            return self.rate
+        return 0
+
+    def add(self, value):
+        self.uncounted += value
+
+    def tick(self):
+        """
+        Mark the passage of time and decay the current rate accordingly.
+        """
+        prev = self.last_tick
+        now = self.clock.time()
+        interval = now - prev
+        if interval <= 0:
+            return
+
+        instant_rate = self.uncounted / interval
+        self.uncounted = 0.0
+
+        if self.rate >= 0:
+            self.rate += (self._alpha(interval) * (instant_rate - self.rate))
+        else:
+            self.rate = instant_rate
+
+        self.last_tick = now
+
+    def _alpha(self, interval):
+        """
+        Calculate the alpha based on the time since the last tick. This is
+        necessary because a single threaded Python program loses precision  
+        under high load, so we can't assume a consistant I{EWMA._interval}.
+
+        :type interval: C{float}
+        :param interval: the interval we use to calculate the alpha
+        """
+        return 1 - math.exp(-interval / self.period)

+ 112 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/stats/samples.py

@@ -0,0 +1,112 @@
+import time
+import random
+import math
+import heapq
+from .snapshot import Snapshot
+
+DEFAULT_SIZE = 1028
+DEFAULT_ALPHA = 0.015
+
+
+class ExpDecayingSample(object):
+
+    """
+    An exponentially-decaying random sample of longs. Uses Cormode et al's
+    forward-decaying priority reservoir sampling method to produce a
+    statistically representative sample, exponentially biased towards newer
+    entries.
+
+    @see: <a href="http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf">
+          Cormode et al. Forward Decay: A Practical Time Decay Model for
+          Streaming Systems. ICDE '09: Proceedings of the 2009 IEEE
+          International Conference on Data Engineering (2009)</a>
+    """
+
+    RESCALE_THREASHOLD = 3600.0  # 1 hour
+
+    def __init__(self, size=DEFAULT_SIZE, alpha=DEFAULT_ALPHA, clock=time):
+        """
+        Creates a new L{ExponentiallyDecayingSample}.
+
+        :type size: C{int}
+        :param size: the number of samples to keep in the sampling reservoir
+        :type alpha: C{float}
+        :param alpha: the exponential decay factor; the higher this is, the more
+                      biased the sample will be towards newer values
+        :type clock: C{function}
+        :param clock: the function used to return the current time, default to
+                      seconds since the epoch; to be used with other time
+                      units, or with the twisted clock for our testing purposes
+        """
+        super(ExpDecayingSample, self).__init__()
+        self.clock = clock
+        self.size = size
+        self.alpha = alpha
+        self.clear()
+
+    def clear(self):
+        self.values = {}
+        self.priorities = []
+        self.counter = 0
+        self.start_time = self.clock.time()
+        self.next_time = self.clock.time() + \
+            ExpDecayingSample.RESCALE_THREASHOLD
+
+    def get_size(self):
+        return self.counter if self.counter < self.size else self.size
+
+    def update(self, value):
+        """
+        Adds a value to the sample.
+
+        :type value: C{int} or C{float}
+        :param value: the value to be added
+        """
+        if self.size == 0:
+            return
+        self._rescale_if_necessary()
+        priority = self._weight(
+            self.clock.time() - self.start_time) / random.random()
+        new_counter = self.counter + 1
+        self.counter = new_counter
+
+        if new_counter <= self.size:
+            self.values[priority] = value
+            heapq.heappush(self.priorities, priority)
+        else:
+            first = heapq.heappop(self.priorities)
+            if first < priority:
+                if priority not in self.values:
+                    self.values[priority] = value
+                    heapq.heappush(self.priorities, priority)
+                    while first not in self.values:
+                        first = heapq.heappop(self.priorities)
+                    del self.values[first]
+            else:
+                heapq.heappush(self.priorities, first)
+
+    def _rescale_if_necessary(self):
+        if self.clock.time() >= self.next_time:
+            self._rescale()
+
+    def _rescale(self):
+        self.next_time = self.clock.time() + \
+            ExpDecayingSample.RESCALE_THREASHOLD
+        old_start_time = self.start_time
+        self.start_time = self.clock.time()
+        new_values = {}
+        new_priorities = []
+        for key, val in self.values.items():
+            priority = key * \
+                math.exp(-self.alpha * (self.start_time - old_start_time))
+            new_values[priority] = val
+            heapq.heappush(new_priorities, priority)
+        self.values = new_values
+        self.priorities = new_priorities
+        self.counter = len(self.values)
+
+    def _weight(self, value):
+        return math.exp(self.alpha * value)
+
+    def get_snapshot(self):
+        return Snapshot(self.values.values())

+ 59 - 0
desktop/core/ext-py/pyformance-0.3.2/pyformance/stats/snapshot.py

@@ -0,0 +1,59 @@
+class Snapshot(object):
+
+    """
+    This class is used by the histogram meter
+    """
+
+    MEDIAN = 0.5
+    P75_Q = 0.75
+    P95_Q = 0.95
+    P99_Q = 0.99
+    P999_Q = 0.999
+
+    def __init__(self, values):
+        super(Snapshot, self).__init__()
+        self.values = sorted(values)
+
+    def get_size(self):
+        "get current size"
+        return len(self.values)
+
+    def get_median(self):
+        "get current median"
+        return self.get_percentile(Snapshot.MEDIAN)
+
+    def get_75th_percentile(self):
+        "get current 75th percentile"
+        return self.get_percentile(Snapshot.P75_Q)
+
+    def get_95th_percentile(self):
+        "get current 95th percentile"
+        return self.get_percentile(Snapshot.P95_Q)
+
+    def get_99th_percentile(self):
+        "get current 99th percentile"
+        return self.get_percentile(Snapshot.P99_Q)
+
+    def get_999th_percentile(self):
+        "get current 999th percentile"
+        return self.get_percentile(Snapshot.P999_Q)
+
+    def get_percentile(self, percentile):
+        """
+        get custom percentile
+        
+        :param percentile: float value between 0 and 1
+        """
+        if percentile < 0 or percentile > 1:
+            raise ValueError("{0} is not in [0..1]".format(percentile))
+        length = len(self.values)
+        if length == 0:
+            return 0
+        pos = percentile * (length + 1)
+        if pos < 1:
+            return self.values[0]
+        if pos >= length:
+            return self.values[-1]
+        lower = self.values[int(pos) - 1]
+        upper = self.values[int(pos)]
+        return lower + (pos - int(pos)) * (upper - lower)

+ 5 - 0
desktop/core/ext-py/pyformance-0.3.2/setup.cfg

@@ -0,0 +1,5 @@
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+

+ 30 - 0
desktop/core/ext-py/pyformance-0.3.2/setup.py

@@ -0,0 +1,30 @@
+import os
+import functools
+import platform
+from setuptools import setup, find_packages
+
+_IN_PACKAGE_DIR = functools.partial(os.path.join, "pyformance")
+
+with open(_IN_PACKAGE_DIR("__version__.py")) as version_file:
+    exec(version_file.read())
+
+install_requires = []  # optional: ["blinker==1.2"]
+if platform.python_version() < '2.7':
+    install_requires.append('unittest2')
+
+setup(name="pyformance",
+      classifiers=[
+          "Development Status :: 4 - Beta",
+          "Intended Audience :: Developers",
+          "Programming Language :: Python :: 2.7",
+      ],
+      description="Performance metrics, based on Coda Hale's Yammer metrics",
+      license="Apache 2.0",
+      author="Omer Getrel",
+      author_email="omer.gertel@gmail.com",
+      version=__version__,
+      packages=find_packages(exclude=["tests"]),
+      data_files=[],
+      install_requires=install_requires,
+      scripts=[],
+      )