Переглянути джерело

[libs] Adding pydruid 0.4.5

Romain 5 роки тому
батько
коміт
33ab9f080c

+ 0 - 0
desktop/core/ext-py/pydruid/__init__.py


+ 162 - 0
desktop/core/ext-py/pydruid/async_client.py

@@ -0,0 +1,162 @@
+#
+# Copyright 2016 Metamarkets Group Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import division
+from __future__ import absolute_import
+
+import json
+from pydruid.client import BaseDruidClient
+
+try:
+    from tornado import gen
+    from tornado.httpclient import AsyncHTTPClient, HTTPError
+except ImportError:
+    print('Warning: unable to import Tornado. The asynchronous client will not work.')
+
+
+class AsyncPyDruid(BaseDruidClient):
+    """
+    Asynchronous PyDruid client which mirrors functionality of the synchronous
+    PyDruid, but it executes queries
+    asynchronously (using an asynchronous http client from Tornado framework).
+
+    Returns Query objects that can be used for exporting query results into
+    TSV files or pandas.DataFrame objects
+    for subsequent analysis.
+
+    :param str url: URL of Broker node in the Druid cluster
+    :param str endpoint: Endpoint that Broker listens for queries on
+
+    Example
+
+    .. code-block:: python
+        :linenos:
+
+            >>> from pydruid.async_client import *
+
+            >>> query = AsyncPyDruid('http://localhost:8083', 'druid/v2/')
+
+            >>> top = yield query.topn(
+                    datasource='twitterstream',
+                    granularity='all',
+                    intervals='2013-10-04/pt1h',
+                    aggregations={"count": doublesum("count")},
+                    dimension='user_name',
+                    filter = Dimension('user_lang') == 'en',
+                    metric='count',
+                    threshold=2
+                )
+
+            >>> print json.dumps(top.query_dict, indent=2)
+            >>> {
+                  "metric": "count",
+                  "aggregations": [
+                    {
+                      "type": "doubleSum",
+                      "fieldName": "count",
+                      "name": "count"
+                    }
+                  ],
+                  "dimension": "user_name",
+                  "filter": {
+                    "type": "selector",
+                    "dimension": "user_lang",
+                    "value": "en"
+                  },
+                  "intervals": "2013-10-04/pt1h",
+                  "dataSource": "twitterstream",
+                  "granularity": "all",
+                  "threshold": 2,
+                  "queryType": "topN"
+                }
+
+            >>> print top.result
+            >>> [{'timestamp': '2013-10-04T00:00:00.000Z',
+                'result': [{'count': 7.0, 'user_name': 'user_1'},
+                {'count': 6.0, 'user_name': 'user_2'}]}]
+
+            >>> df = top.export_pandas()
+            >>> print df
+            >>>    count                 timestamp      user_name
+                0      7  2013-10-04T00:00:00.000Z         user_1
+                1      6  2013-10-04T00:00:00.000Z         user_2
+    """
+
+    def __init__(self, url, endpoint):
+        super(AsyncPyDruid, self).__init__(url, endpoint)
+
+    @gen.coroutine
+    def _post(self, query):
+        http_client = AsyncHTTPClient()
+        try:
+            headers, querystr, url = self._prepare_url_headers_and_body(query)
+            response = yield http_client.fetch(
+                url, method='POST', headers=headers, body=querystr)
+        except HTTPError as e:
+            self.__handle_http_error(e, query)
+        else:
+            query.parse(response.body.decode("utf-8"))
+            raise gen.Return(query)
+
+    @staticmethod
+    def __handle_http_error(e, query):
+        err = None
+        if e.code == 500:
+            # has Druid returned an error?
+            try:
+                err = json.loads(e.response.body.decode("utf-8"))
+            except ValueError:
+                pass
+            else:
+                err = err.get('error', None)
+        raise IOError('{0} \n Druid Error: {1} \n Query is: {2}'.format(
+                e, err, json.dumps(query.query_dict, indent=4)))
+
+    @gen.coroutine
+    def topn(self, **kwargs):
+        query = self.query_builder.topn(kwargs)
+        result = yield self._post(query)
+        raise gen.Return(result)
+
+    @gen.coroutine
+    def timeseries(self, **kwargs):
+        query = self.query_builder.timeseries(kwargs)
+        result = yield self._post(query)
+        raise gen.Return(result)
+
+    @gen.coroutine
+    def groupby(self, **kwargs):
+        query = self.query_builder.groupby(kwargs)
+        result = yield self._post(query)
+        raise gen.Return(result)
+
+    @gen.coroutine
+    def segment_metadata(self, **kwargs):
+        query = self.query_builder.segment_metadata(kwargs)
+        result = yield self._post(query)
+        raise gen.Return(result)
+
+    @gen.coroutine
+    def time_boundary(self, **kwargs):
+        query = self.query_builder.time_boundary(kwargs)
+        result = yield self._post(query)
+        raise gen.Return(result)
+
+    @gen.coroutine
+    def select(self, **kwargs):
+        query = self.query_builder.select(kwargs)
+        result = yield self._post(query)
+        raise gen.Return(result)

+ 560 - 0
desktop/core/ext-py/pydruid/client.py

@@ -0,0 +1,560 @@
+#
+# Copyright 2013 Metamarkets Group Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import division
+from __future__ import absolute_import
+
+import json
+import re
+
+from six.moves import urllib
+
+from pydruid.query import QueryBuilder
+from base64 import b64encode
+
+
+# extract error from the <PRE> tag inside the HTML response
+HTML_ERROR = re.compile('<pre>\s*(.*?)\s*</pre>', re.IGNORECASE)
+
+
+class BaseDruidClient(object):
+    def __init__(self, url, endpoint):
+        self.url = url
+        self.endpoint = endpoint
+        self.query_builder = QueryBuilder()
+        self.username = None
+        self.password = None
+
+    def set_basic_auth_credentials(self, username, password):
+        self.username = username
+        self.password = password
+
+    def _prepare_url_headers_and_body(self, query):
+        querystr = json.dumps(query.query_dict).encode('utf-8')
+        if self.url.endswith('/'):
+            url = self.url + self.endpoint
+        else:
+            url = self.url + '/' + self.endpoint
+        headers = {'Content-Type': 'application/json'}
+        if (self.username is not None) and (self.password is not None):
+            authstring = '{}:{}'.format(self.username, self.password)
+            b64string = b64encode(authstring.encode()).decode()
+            headers['Authorization'] = 'Basic {}'.format(b64string)
+
+        return headers, querystr, url
+
+    def _post(self, query):
+        """
+        Fills Query object with results.
+
+        :param Query query: query to execute
+
+        :return: Query filled with results
+        :rtype: Query
+        """
+        raise NotImplementedError("Subclasses must implement this method")
+
+    # --------- Query implementations ---------
+
+    def topn(self, **kwargs):
+        """
+        A TopN query returns a set of the values in a given dimension,
+        sorted by a specified metric. Conceptually, a topN can be
+        thought of as an approximate GroupByQuery over a single
+        dimension with an Ordering spec. TopNs are
+        faster and more resource efficient than GroupBy for this use case.
+
+        Required key/value pairs:
+
+        :param str datasource: Data source to query
+        :param str granularity: Aggregate data by hour, day, minute, etc.,
+        :param intervals: ISO-8601 intervals of data to query
+        :type intervals: str or list
+        :param dict aggregations: A map from aggregator name to one of
+          the pydruid.utils.aggregators e.g., doublesum
+        :param str dimension: Dimension to run the query against
+        :param str metric: Metric over which to sort the specified dimension by
+        :param int threshold: How many of the top items to return
+
+        :return: The query result
+        :rtype: Query
+
+        Optional key/value pairs:
+
+        :param pydruid.utils.filters.Filter filter: Indicates which rows
+          of data to include in the query
+        :param post_aggregations:   A dict with string key = 'post_aggregator_name',
+          and value pydruid.utils.PostAggregator
+        :param dict context: A dict of query context options
+
+        Example:
+
+        .. code-block:: python
+            :linenos:
+
+                >>> top = client.topn(
+                            datasource='twitterstream',
+                            granularity='all',
+                            intervals='2013-06-14/pt1h',
+                            aggregations={"count": doublesum("count")},
+                            dimension='user_name',
+                            metric='count',
+                            filter=Dimension('user_lang') == 'en',
+                            threshold=1,
+                            context={"timeout": 1000}
+                        )
+                >>> print top
+                >>> [{'timestamp': '2013-06-14T00:00:00.000Z',
+                    'result': [{'count': 22.0, 'user': "cool_user"}}]}]
+        """
+        query = self.query_builder.topn(kwargs)
+        return self._post(query)
+
+    def timeseries(self, **kwargs):
+        """
+        A timeseries query returns the values of the requested metrics (in aggregate)
+        for each timestamp.
+
+        Required key/value pairs:
+
+        :param str datasource: Data source to query
+        :param str granularity: Time bucket to aggregate data by hour, day, minute, etc.,
+        :param intervals: ISO-8601 intervals for which to run the query on
+        :type intervals: str or list
+        :param dict aggregations: A map from aggregator name to one of the
+          ``pydruid.utils.aggregators`` e.g., ``doublesum``
+
+        :return: The query result
+        :rtype: Query
+
+        Optional key/value pairs:
+
+        :param pydruid.utils.filters.Filter filter: Indicates which rows of
+          data to include in the query
+        :param post_aggregations:   A dict with string key =
+          'post_aggregator_name', and value pydruid.utils.PostAggregator
+        :param dict context: A dict of query context options
+
+        Example:
+
+        .. code-block:: python
+            :linenos:
+
+                >>> counts = client.timeseries(
+                        datasource=twitterstream,
+                        granularity='hour',
+                        intervals='2013-06-14/pt1h',
+                        aggregations=\
+                            {"count": doublesum("count"), "rows": count("rows")},
+                        post_aggregations=\
+                            {'percent': (Field('count') / Field('rows')) * Const(100))},
+                        context={"timeout": 1000}
+                    )
+                >>> print counts
+                >>> [{'timestamp': '2013-06-14T00:00:00.000Z',
+                    'result': {'count': 9619.0, 'rows': 8007,
+                    'percent': 120.13238416385663}}]
+        """
+        query = self.query_builder.timeseries(kwargs)
+        return self._post(query)
+
+    def groupby(self, **kwargs):
+        """
+        A group-by query groups a results set (the requested aggregate
+        metrics) by the specified dimension(s).
+
+        Required key/value pairs:
+
+        :param str datasource: Data source to query
+        :param str granularity: Time bucket to aggregate data by hour, day, minute, etc.,
+        :param intervals: ISO-8601 intervals for which to run the query on
+        :type intervals: str or list
+        :param dict aggregations: A map from aggregator name to one of the
+          ``pydruid.utils.aggregators`` e.g., ``doublesum``
+        :param list dimensions: The dimensions to group by
+
+        :return: The query result
+        :rtype: Query
+
+        Optional key/value pairs:
+
+        :param pydruid.utils.filters.Filter filter: Indicates which rows of
+          data to include in the query
+        :param pydruid.utils.having.Having having: Indicates which groups
+          in results set of query to keep
+        :param post_aggregations:   A dict with string key = 'post_aggregator_name',
+          and value pydruid.utils.PostAggregator
+        :param dict context: A dict of query context options
+        :param dict limit_spec: A dict of parameters defining how to limit
+          the rows returned, as specified in the Druid api documentation
+
+        Example:
+
+        .. code-block:: python
+            :linenos:
+
+                >>> group = client.groupby(
+                        datasource='twitterstream',
+                        granularity='hour',
+                        intervals='2013-10-04/pt1h',
+                        dimensions=["user_name", "reply_to_name"],
+                        filter=~(Dimension("reply_to_name") == "Not A Reply"),
+                        aggregations={"count": doublesum("count")},
+                        context={"timeout": 1000}
+                        limit_spec={
+                            "type": "default",
+                            "limit": 50,
+                            "columns" : ["count"]
+                        }
+                    )
+                >>> for k in range(2):
+                    ...     print group[k]
+                >>> {
+                    'timestamp': '2013-10-04T00:00:00.000Z',
+                    'version': 'v1',
+                    'event': {
+                        'count': 1.0,
+                        'user_name': 'user_1',
+                        'reply_to_name': 'user_2',
+                    }
+                }
+                >>> {
+                    'timestamp': '2013-10-04T00:00:00.000Z',
+                    'version': 'v1',
+                    'event': {
+                        'count': 1.0,
+                        'user_name': 'user_2',
+                        'reply_to_name':
+                        'user_3',
+                    }
+                }
+        """
+        query = self.query_builder.groupby(kwargs)
+        return self._post(query)
+
+    def segment_metadata(self, **kwargs):
+        """
+        A segment meta-data query returns per segment information about:
+
+        * Cardinality of all the columns present
+        * Column type
+        * Estimated size in bytes
+        * Estimated size in bytes of each column
+        * Interval the segment covers
+        * Segment ID
+
+        Required key/value pairs:
+
+        :param str datasource: Data source to query
+        :param intervals: ISO-8601 intervals for which to run the query on
+        :type intervals: str or list
+
+        Optional key/value pairs:
+
+        :param dict context: A dict of query context options
+
+        :return: The query result
+        :rtype: Query
+
+        Example:
+
+        .. code-block:: python
+            :linenos:
+
+                >>> meta = client.segment_metadata(
+                    datasource='twitterstream', intervals = '2013-10-04/pt1h')
+                >>> print meta[0].keys()
+                >>> ['intervals', 'id', 'columns', 'size']
+                >>> print meta[0]['columns']['tweet_length']
+                >>> {
+                    'errorMessage': None,
+                    'cardinality': None,
+                    'type': 'FLOAT',
+                    'size': 30908008,
+                }
+
+        """
+        query = self.query_builder.segment_metadata(kwargs)
+        return self._post(query)
+
+    def time_boundary(self, **kwargs):
+        """
+        A time boundary query returns the min and max timestamps present in a data source.
+
+        Required key/value pairs:
+
+        :param str datasource: Data source to query
+
+        Optional key/value pairs:
+
+        :param dict context: A dict of query context options
+
+        :return: The query result
+        :rtype: Query
+
+        Example:
+
+        .. code-block:: python
+            :linenos:
+
+                >>> bound = client.time_boundary(datasource='twitterstream')
+                >>> print bound
+                >>> [{
+                    'timestamp': '2011-09-14T15:00:00.000Z',
+                    'result': {
+                        'minTime': '2011-09-14T15:00:00.000Z',
+                        'maxTime': '2014-03-04T23:44:00.000Z',
+                    }
+                }]
+        """
+        query = self.query_builder.time_boundary(kwargs)
+        return self._post(query)
+
+    def select(self, **kwargs):
+        """
+        A select query returns raw Druid rows and supports pagination.
+
+        Required key/value pairs:
+
+        :param str datasource: Data source to query
+        :param str granularity: Time bucket to aggregate data by hour, day, minute, etc.
+        :param dict paging_spec: Indicates offsets into different scanned segments
+        :param intervals: ISO-8601 intervals for which to run the query on
+        :type intervals: str or list
+
+        Optional key/value pairs:
+
+        :param pydruid.utils.filters.Filter filter: Indicates which rows of
+          data to include in the query
+        :param list dimensions: The list of dimensions to select. If left
+          empty, all dimensions are returned
+        :param list metrics: The list of metrics to select. If left empty,
+          all metrics are returned
+        :param dict context: A dict of query context options
+
+        :return: The query result
+        :rtype: Query
+
+        Example:
+
+        .. code-block:: python
+            :linenos:
+
+                >>> raw_data = client.select(
+                        datasource=twitterstream,
+                        granularity='all',
+                        intervals='2013-06-14/pt1h',
+                        paging_spec={'pagingIdentifies': {}, 'threshold': 1},
+                        context={"timeout": 1000}
+                    )
+                >>> print(raw_data)
+                >>> [{
+                    'timestamp': '2013-06-14T00:00:00.000Z',
+                    'result': {
+                        'pagingIdentifiers': {
+                            'twitterstream_...08:00:00.000Z_v1': 1,
+                            'events': [{
+                                'segmentId': 'twitterstr...000Z_v1',
+                                'offset': 0,
+                                'event': {
+                                    'timestamp': '2013-06-14T00:00:00.000Z',
+                                    'dim': 'value',
+                                }
+                            }]
+                        }
+                }]
+        """
+        query = self.query_builder.select(kwargs)
+        return self._post(query)
+
+    def export_tsv(self, dest_path):
+        """
+        Export the current query result to a tsv file.
+
+        .. deprecated::
+            Use Query.export_tsv() method instead.
+        """
+        if self.query_builder.last_query is None:
+            raise AttributeError(
+                "There was no query executed by this client yet. Can't export!")
+        else:
+            return self.query_builder.last_query.export_tsv(dest_path)
+
+    def export_pandas(self):
+        """
+        Export the current query result to a Pandas DataFrame object.
+
+        .. deprecated::
+            Use Query.export_pandas() method instead
+        """
+        if self.query_builder.last_query is None:
+            raise AttributeError(
+                "There was no query executed by this client yet. Can't export!")
+        else:
+            return self.query_builder.last_query.export_pandas()
+
+
+class PyDruid(BaseDruidClient):
+    """
+    PyDruid contains the functions for creating and executing Druid queries.
+    Returns Query objects that can be used for exporting query results
+    into TSV files or pandas.DataFrame objects for subsequent analysis.
+
+    :param str url: URL of Broker node in the Druid cluster
+    :param str endpoint: Endpoint that Broker listens for queries on
+
+    Example
+
+    .. code-block:: python
+        :linenos:
+
+            >>> from pydruid.client import *
+
+            >>> query = PyDruid('http://localhost:8083', 'druid/v2/')
+
+            >>> top = query.topn(
+                    datasource='twitterstream',
+                    granularity='all',
+                    intervals='2013-10-04/pt1h',
+                    aggregations={"count": doublesum("count")},
+                    dimension='user_name',
+                    filter = Dimension('user_lang') == 'en',
+                    metric='count',
+                    threshold=2
+                )
+
+            >>> print json.dumps(top.query_dict, indent=2)
+            >>> {
+                  "metric": "count",
+                  "aggregations": [
+                    {
+                      "type": "doubleSum",
+                      "fieldName": "count",
+                      "name": "count"
+                    }
+                  ],
+                  "dimension": "user_name",
+                  "filter": {
+                    "type": "selector",
+                    "dimension": "user_lang",
+                    "value": "en"
+                  },
+                  "intervals": "2013-10-04/pt1h",
+                  "dataSource": "twitterstream",
+                  "granularity": "all",
+                  "threshold": 2,
+                  "queryType": "topN"
+                }
+
+            >>> print top.result
+            >>> [{
+                'timestamp': '2013-10-04T00:00:00.000Z',
+                'result': [
+                    {
+                        'count': 7.0,
+                        'user_name': 'user_1',
+                    },
+                    {
+                        'count': 6.0,
+                        'user_name': 'user_2',
+                    },
+                ]}]
+
+            >>> df = top.export_pandas()
+            >>> print df
+            >>>    count                 timestamp      user_name
+                0      7  2013-10-04T00:00:00.000Z         user_1
+                1      6  2013-10-04T00:00:00.000Z         user_2
+    """
+    def __init__(self, url, endpoint):
+        super(PyDruid, self).__init__(url, endpoint)
+
+    def _post(self, query):
+        try:
+            headers, querystr, url = self._prepare_url_headers_and_body(query)
+            req = urllib.request.Request(url, querystr, headers)
+            res = urllib.request.urlopen(req)
+            data = res.read().decode("utf-8")
+            res.close()
+        except urllib.error.HTTPError as e:
+            err = e.reason
+            if e.code == 500:
+                # has Druid returned an error?
+                try:
+                    err = json.loads(err)
+                except ValueError:
+                    if HTML_ERROR.search(err):
+                        err = HTML_ERROR.search(err).group(1)
+                except (ValueError, AttributeError, KeyError):
+                    pass
+
+            raise IOError('{0} \n Druid Error: {1} \n Query is: {2}'.format(
+                    e, err, json.dumps(
+                        query.query_dict,
+                        indent=4,
+                        sort_keys=True,
+                        separators=(',', ': '))))
+        else:
+            query.parse(data)
+            return query
+
+    def scan(self, **kwargs):
+        """
+        A scan query returns raw Druid rows
+
+        Required key/value pairs:
+
+        :param str datasource: Data source to query
+        :param str granularity: Time bucket to aggregate data by hour, day, minute, etc.
+        :param int limit: The maximum number of rows to return
+        :param intervals: ISO-8601 intervals for which to run the query on
+        :type intervals: str or list
+
+        Optional key/value pairs:
+
+        :param pydruid.utils.filters.Filter filter: Indicates which rows of
+          data to include in the query
+        :param list dimensions: The list of dimensions to select. If left
+          empty, all dimensions are returned
+        :param list metrics: The list of metrics to select. If left empty,
+          all metrics are returned
+        :param dict context: A dict of query context options
+
+        :return: The query result
+        :rtype: Query
+
+        Example:
+
+        .. code-block:: python
+            :linenos:
+
+                >>> raw_data = client.scan(
+                        datasource=twitterstream,
+                        granularity='all',
+                        intervals='2013-06-14/pt1h',
+                        limit=1,
+                        context={"timeout": 1000}
+                    )
+                >>> print raw_data
+                >>> [{
+                    u'segmentId': u'zzzz',
+                    u'columns': [u'__time', 'status', 'region'],
+                    'events': [{
+                        u'status': u'ok', 'region': u'SF', u'__time': 1509494400000,
+                    }]
+                }]
+        """
+        query = self.query_builder.scan(kwargs)
+        return self._post(query)

+ 202 - 0
desktop/core/ext-py/pydruid/console.py

@@ -0,0 +1,202 @@
+from __future__ import unicode_literals
+
+import os
+import re
+import sys
+
+from prompt_toolkit import prompt, AbortAction
+from prompt_toolkit.history import FileHistory
+from prompt_toolkit.contrib.completers import WordCompleter
+from pygments.lexers import SqlLexer
+from pygments.style import Style
+from pygments.token import Token
+from pygments.styles.default import DefaultStyle
+from six.moves.urllib import parse
+from tabulate import tabulate
+
+from pydruid.db.api import connect
+
+
+keywords = [
+    'EXPLAIN PLAN FOR',
+    'WITH',
+    'SELECT',
+    'ALL',
+    'DISTINCT',
+    'FROM',
+    'WHERE',
+    'GROUP BY',
+    'HAVING',
+    'ORDER BY',
+    'ASC',
+    'DESC',
+    'LIMIT',
+]
+
+aggregate_functions = [
+    'COUNT',
+    'SUM',
+    'MIN',
+    'MAX',
+    'AVG',
+    'APPROX_COUNT_DISTINCT',
+    'APPROX_QUANTILE',
+]
+
+numeric_functions = [
+    'ABS',
+    'CEIL',
+    'EXP',
+    'FLOOR',
+    'LN',
+    'LOG10',
+    'POW',
+    'SQRT',
+]
+
+string_functions = [
+    'CHARACTER_LENGTH',
+    'LOOKUP',
+    'LOWER',
+    'REGEXP_EXTRACT',
+    'REPLACE',
+    'SUBSTRING',
+    'TRIM',
+    'BTRIM',
+    'RTRIM',
+    'LTRIM',
+    'UPPER',
+]
+
+time_functions = [
+    'CURRENT_TIMESTAMP',
+    'CURRENT_DATE',
+    'TIME_FLOOR',
+    'TIME_SHIFT',
+    'TIME_EXTRACT',
+    'TIME_PARSE',
+    'TIME_FORMAT',
+    'MILLIS_TO_TIMESTAMP',
+    'TIMESTAMP_TO_MILLIS',
+    'EXTRACT',
+    'FLOOR',
+    'CEIL',
+]
+
+other_functions = [
+    'CAST',
+    'CASE',
+    'WHEN',
+    'THEN',
+    'END',
+    'NULLIF',
+    'COALESCE',
+]
+
+
+replacements = {
+    '^SHOW SCHEMAS': 'SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA',
+    '^SHOW TABLES': 'SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES',
+    '^DESC (?P<table>[^;\s]*)': r"""
+        SELECT COLUMN_NAME,
+               ORDINAL_POSITION,
+               COLUMN_DEFAULT,
+               IS_NULLABLE,
+               DATA_TYPE
+          FROM INFORMATION_SCHEMA.COLUMNS
+         WHERE TABLE_NAME='\1'
+    """.strip(),
+}
+
+
+class DocumentStyle(Style):
+    styles = {
+        Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',
+        Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',
+        Token.Menu.Completions.ProgressButton: 'bg:#003333',
+        Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',
+    }
+    styles.update(DefaultStyle.styles)
+
+
+def get_connection_kwargs(url):
+    parts = parse.urlparse(url)
+    if ':' in parts.netloc:
+        host, port = parts.netloc.split(':', 1)
+        port = int(port)
+    else:
+        host = parts.netloc
+        port = 8082
+
+    return {
+        'host': host,
+        'port': port,
+        'path': parts.path,
+        'scheme': parts.scheme,
+    }
+
+
+def get_tables(connection):
+    cursor = connection.cursor()
+    return [
+        row.TABLE_NAME for row in
+        cursor.execute('SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES')
+    ]
+
+
+def get_autocomplete(connection):
+    return (
+        keywords +
+        aggregate_functions +
+        numeric_functions +
+        string_functions +
+        time_functions +
+        other_functions +
+        get_tables(connection)
+    )
+
+
+def main():
+    history = FileHistory(os.path.expanduser('~/.pydruid_history'))
+
+    try:
+        url = sys.argv[1]
+    except IndexError:
+        url = 'http://localhost:8082/druid/v2/sql/'
+    kwargs = get_connection_kwargs(url)
+    connection = connect(**kwargs)
+    cursor = connection.cursor()
+
+    words = get_autocomplete(connection)
+    sql_completer = WordCompleter(words, ignore_case=True)
+
+    while True:
+        try:
+            query = prompt(
+                '> ', lexer=SqlLexer, completer=sql_completer,
+                style=DocumentStyle, history=history,
+                on_abort=AbortAction.RETRY)
+        except EOFError:
+            break  # Control-D pressed.
+
+        # run query
+        query = query.strip('; ')
+        if query:
+            # shortcuts
+            for pattern, repl in replacements.items():
+                query = re.sub(pattern, repl, query)
+
+            try:
+                result = cursor.execute(query)
+            except Exception as e:
+                print(e)
+                continue
+
+            headers = [t[0] for t in cursor.description or []]
+            print(tabulate(result, headers=headers))
+
+    print('GoodBye!')
+
+
+if __name__ == '__main__':
+    main()

+ 37 - 0
desktop/core/ext-py/pydruid/db/__init__.py

@@ -0,0 +1,37 @@
+from pydruid.db.api import connect
+from pydruid.db.exceptions import (
+    DataError,
+    DatabaseError,
+    Error,
+    IntegrityError,
+    InterfaceError,
+    InternalError,
+    NotSupportedError,
+    OperationalError,
+    ProgrammingError,
+    Warning,
+)
+
+
+__all__ = [
+    'connect',
+    'apilevel',
+    'threadsafety',
+    'paramstyle',
+    'DataError',
+    'DatabaseError',
+    'Error',
+    'IntegrityError',
+    'InterfaceError',
+    'InternalError',
+    'NotSupportedError',
+    'OperationalError',
+    'ProgrammingError',
+    'Warning',
+]
+
+
+apilevel = '2.0'
+# Threads may share the module and connections
+threadsafety = 2
+paramstyle = 'pyformat'

+ 352 - 0
desktop/core/ext-py/pydruid/db/api.py

@@ -0,0 +1,352 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from collections import namedtuple
+import itertools
+import json
+from six import string_types
+from six.moves.urllib import parse
+
+import requests
+
+from pydruid.db import exceptions
+
+
+class Type(object):
+    STRING = 1
+    NUMBER = 2
+    BOOLEAN = 3
+
+
+def connect(host='localhost', port=8082, path='/druid/v2/sql/', scheme='http'):
+    """
+    Constructor for creating a connection to the database.
+
+        >>> conn = connect('localhost', 8082)
+        >>> curs = conn.cursor()
+
+    """
+    return Connection(host, port, path, scheme)
+
+
+def check_closed(f):
+    """Decorator that checks if connection/cursor is closed."""
+
+    def g(self, *args, **kwargs):
+        if self.closed:
+            raise exceptions.Error(
+                '{klass} already closed'.format(klass=self.__class__.__name__))
+        return f(self, *args, **kwargs)
+    return g
+
+
+def check_result(f):
+    """Decorator that checks if the cursor has results from `execute`."""
+
+    def g(self, *args, **kwargs):
+        if self._results is None:
+            raise exceptions.Error('Called before `execute`')
+        return f(self, *args, **kwargs)
+    return g
+
+
+def get_description_from_row(row):
+    """
+    Return description from a single row.
+
+    We only return the name, type (inferred from the data) and if the values
+    can be NULL. String columns in Druid are NULLable. Numeric columns are NOT
+    NULL.
+    """
+    return [
+        (
+            name,                            # name
+            get_type(value),                 # type_code
+            None,                            # [display_size]
+            None,                            # [internal_size]
+            None,                            # [precision]
+            None,                            # [scale]
+            get_type(value) == Type.STRING,  # [null_ok]
+        )
+        for name, value in row.items()
+    ]
+
+
+def get_type(value):
+    """Infer type from value."""
+    if isinstance(value, string_types) or value is None:
+        return Type.STRING
+    elif isinstance(value, (int, float)):
+        return Type.NUMBER
+    elif isinstance(value, bool):
+        return Type.BOOLEAN
+
+    raise exceptions.Error(
+        'Value of unknown type: {value}'.format(value=value))
+
+
+class Connection(object):
+
+    """Connection to a Druid database."""
+
+    def __init__(
+        self,
+        host='localhost',
+        port=8082,
+        path='/druid/v2/sql/',
+        scheme='http',
+    ):
+        netloc = '{host}:{port}'.format(host=host, port=port)
+        self.url = parse.urlunparse(
+            (scheme, netloc, path, None, None, None))
+        self.closed = False
+        self.cursors = []
+
+    @check_closed
+    def close(self):
+        """Close the connection now."""
+        self.closed = True
+        for cursor in self.cursors:
+            try:
+                cursor.close()
+            except exceptions.Error:
+                pass  # already closed
+
+    @check_closed
+    def commit(self):
+        """
+        Commit any pending transaction to the database.
+
+        Not supported.
+        """
+        pass
+
+    @check_closed
+    def cursor(self):
+        """Return a new Cursor Object using the connection."""
+        cursor = Cursor(self.url)
+        self.cursors.append(cursor)
+
+        return cursor
+
+    @check_closed
+    def execute(self, operation, parameters=None):
+        cursor = self.cursor()
+        return cursor.execute(operation, parameters)
+
+    def __enter__(self):
+        return self.cursor()
+
+    def __exit__(self, *exc):
+        self.close()
+
+
+class Cursor(object):
+
+    """Connection cursor."""
+
+    def __init__(self, url):
+        self.url = url
+
+        # This read/write attribute specifies the number of rows to fetch at a
+        # time with .fetchmany(). It defaults to 1 meaning to fetch a single
+        # row at a time.
+        self.arraysize = 1
+
+        self.closed = False
+
+        # this is updated only after a query
+        self.description = None
+
+        # this is set to an iterator after a successfull query
+        self._results = None
+
+    @property
+    @check_result
+    @check_closed
+    def rowcount(self):
+        # consume the iterator
+        results = list(self._results)
+        n = len(results)
+        self._results = iter(results)
+        return n
+
+    @check_closed
+    def close(self):
+        """Close the cursor."""
+        self.closed = True
+
+    @check_closed
+    def execute(self, operation, parameters=None):
+        query = apply_parameters(operation, parameters or {})
+
+        # `_stream_query` returns a generator that produces the rows; we need
+        # to consume the first row so that `description` is properly set, so
+        # let's consume it and insert it back.
+        results = self._stream_query(query)
+        try:
+            first_row = next(results)
+            self._results = itertools.chain([first_row], results)
+        except StopIteration:
+            self._results = iter([])
+
+        return self
+
+    @check_closed
+    def executemany(self, operation, seq_of_parameters=None):
+        raise exceptions.NotSupportedError(
+            '`executemany` is not supported, use `execute` instead')
+
+    @check_result
+    @check_closed
+    def fetchone(self):
+        """
+        Fetch the next row of a query result set, returning a single sequence,
+        or `None` when no more data is available.
+        """
+        try:
+            return self.next()
+        except StopIteration:
+            return None
+
+    @check_result
+    @check_closed
+    def fetchmany(self, size=None):
+        """
+        Fetch the next set of rows of a query result, returning a sequence of
+        sequences (e.g. a list of tuples). An empty sequence is returned when
+        no more rows are available.
+        """
+        size = size or self.arraysize
+        return list(itertools.islice(self, size))
+
+    @check_result
+    @check_closed
+    def fetchall(self):
+        """
+        Fetch all (remaining) rows of a query result, returning them as a
+        sequence of sequences (e.g. a list of tuples). Note that the cursor's
+        arraysize attribute can affect the performance of this operation.
+        """
+        return list(self)
+
+    @check_closed
+    def setinputsizes(self, sizes):
+        # not supported
+        pass
+
+    @check_closed
+    def setoutputsizes(self, sizes):
+        # not supported
+        pass
+
+    @check_closed
+    def __iter__(self):
+        return self
+
+    @check_closed
+    def __next__(self):
+        return next(self._results)
+
+    next = __next__
+
+    def _stream_query(self, query):
+        """
+        Stream rows from a query.
+
+        This method will yield rows as the data is returned in chunks from the
+        server.
+        """
+        self.description = None
+
+        headers = {'Content-Type': 'application/json'}
+        payload = {'query': query}
+        r = requests.post(self.url, stream=True, headers=headers, json=payload)
+        if r.encoding is None:
+            r.encoding = 'utf-8'
+
+        # raise any error messages
+        if r.status_code != 200:
+            payload = r.json()
+            msg = (
+                '{error} ({errorClass}): {errorMessage}'.format(**payload)
+            )
+            raise exceptions.ProgrammingError(msg)
+
+        # Druid will stream the data in chunks of 8k bytes, splitting the JSON
+        # between them; setting `chunk_size` to `None` makes it use the server
+        # size
+        chunks = r.iter_content(chunk_size=None, decode_unicode=True)
+        Row = None
+        for row in rows_from_chunks(chunks):
+            # update description
+            if self.description is None:
+                self.description = get_description_from_row(row)
+
+            # return row in namedtuple
+            if Row is None:
+                Row = namedtuple('Row', row.keys(), rename=True)
+            yield Row(*row.values())
+
+
+def rows_from_chunks(chunks):
+    """
+    A generator that yields rows from JSON chunks.
+
+    Druid will return the data in chunks, but they are not aligned with the
+    JSON objects. This function will parse all complete rows inside each chunk,
+    yielding them as soon as possible.
+    """
+    body = ''
+    for chunk in chunks:
+        if chunk:
+            body = ''.join((body, chunk))
+
+        # find last complete row
+        boundary = 0
+        brackets = 0
+        in_string = False
+        for i, char in enumerate(body):
+            if char == '"':
+                if not in_string:
+                    in_string = True
+                elif body[i - 1] != '\\':
+                    in_string = False
+
+            if in_string:
+                continue
+
+            if char == '{':
+                brackets += 1
+            elif char == '}':
+                brackets -= 1
+                if brackets == 0 and i > boundary:
+                    boundary = i + 1
+
+        rows = body[:boundary].lstrip('[,')
+        body = body[boundary:]
+
+        for row in json.loads('[{rows}]'.format(rows=rows)):
+            yield row
+
+
+def apply_parameters(operation, parameters):
+    escaped_parameters = {
+        key: escape(value) for key, value in parameters.items()
+    }
+    return operation % escaped_parameters
+
+
+def escape(value):
+    if value == '*':
+        return value
+    elif isinstance(value, string_types):
+        return "'{}'".format(value.replace("'", "''"))
+    elif isinstance(value, (int, float)):
+        return value
+    elif isinstance(value, bool):
+        return 'TRUE' if value else 'FALSE'
+    elif isinstance(value, (list, tuple)):
+        return ', '.join(escape(element) for element in value)

+ 44 - 0
desktop/core/ext-py/pydruid/db/exceptions.py

@@ -0,0 +1,44 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+
+class Error(Exception):
+    pass
+
+
+class Warning(Exception):
+    pass
+
+
+class InterfaceError(Error):
+    pass
+
+
+class DatabaseError(Error):
+    pass
+
+
+class InternalError(DatabaseError):
+    pass
+
+
+class OperationalError(DatabaseError):
+    pass
+
+
+class ProgrammingError(DatabaseError):
+    pass
+
+
+class IntegrityError(DatabaseError):
+    pass
+
+
+class DataError(DatabaseError):
+    pass
+
+
+class NotSupportedError(DatabaseError):
+    pass

+ 250 - 0
desktop/core/ext-py/pydruid/db/sqlalchemy.py

@@ -0,0 +1,250 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from sqlalchemy.engine import default
+from sqlalchemy.sql import compiler
+from sqlalchemy import types
+
+import pydruid.db
+from pydruid.db import exceptions
+
+
+RESERVED_SCHEMAS = ['INFORMATION_SCHEMA']
+
+
+type_map = {
+    'char': types.String,
+    'varchar': types.String,
+    'float': types.Float,
+    'decimal': types.Float,
+    'real': types.Float,
+    'double': types.Float,
+    'boolean': types.Boolean,
+    'tinyint': types.BigInteger,
+    'smallint': types.BigInteger,
+    'integer': types.BigInteger,
+    'bigint': types.BigInteger,
+    'timestamp': types.TIMESTAMP,
+    'date': types.DATE,
+    'other': types.BLOB,
+}
+
+
+class UniversalSet(object):
+    def __contains__(self, item):
+        return True
+
+
+class DruidIdentifierPreparer(compiler.IdentifierPreparer):
+    reserved_words = UniversalSet()
+
+
+class DruidCompiler(compiler.SQLCompiler):
+    pass
+
+
+class DruidTypeCompiler(compiler.GenericTypeCompiler):
+    def visit_REAL(self, type_, **kwargs):
+        return "DOUBLE"
+
+    def visit_NUMERIC(self, type_, **kwargs):
+        return "LONG"
+
+    visit_DECIMAL = visit_NUMERIC
+    visit_INTEGER = visit_NUMERIC
+    visit_SMALLINT = visit_NUMERIC
+    visit_BIGINT = visit_NUMERIC
+    visit_BOOLEAN = visit_NUMERIC
+    visit_TIMESTAMP = visit_NUMERIC
+    visit_DATE = visit_NUMERIC
+
+    def visit_CHAR(self, type_, **kwargs):
+        return "STRING"
+
+    visit_NCHAR = visit_CHAR
+    visit_VARCHAR = visit_CHAR
+    visit_NVARCHAR = visit_CHAR
+    visit_TEXT = visit_CHAR
+
+    def visit_DATETIME(self, type_, **kwargs):
+        raise exceptions.NotSupportedError('Type DATETIME is not supported')
+
+    def visit_TIME(self, type_, **kwargs):
+        raise exceptions.NotSupportedError('Type TIME is not supported')
+
+    def visit_BINARY(self, type_, **kwargs):
+        raise exceptions.NotSupportedError('Type BINARY is not supported')
+
+    def visit_VARBINARY(self, type_, **kwargs):
+        raise exceptions.NotSupportedError('Type VARBINARY is not supported')
+
+    def visit_BLOB(self, type_, **kwargs):
+        raise exceptions.NotSupportedError('Type BLOB is not supported')
+
+    def visit_CLOB(self, type_, **kwargs):
+        raise exceptions.NotSupportedError('Type CBLOB is not supported')
+
+    def visit_NCLOB(self, type_, **kwargs):
+        raise exceptions.NotSupportedError('Type NCBLOB is not supported')
+
+
+class DruidDialect(default.DefaultDialect):
+
+    name = 'druid'
+    scheme = 'http'
+    driver = 'rest'
+    preparer = DruidIdentifierPreparer
+    statement_compiler = DruidCompiler
+    type_compiler = DruidTypeCompiler
+    supports_alter = False
+    supports_pk_autoincrement = False
+    supports_default_values = False
+    supports_empty_insert = False
+    supports_unicode_statements = True
+    supports_unicode_binds = True
+    returns_unicode_strings = True
+    description_encoding = None
+    supports_native_boolean = True
+
+    @classmethod
+    def dbapi(cls):
+        return pydruid.db
+
+    def create_connect_args(self, url):
+        kwargs = {
+            'host': url.host,
+            'port': url.port or 8082,
+            'path': url.database,
+            'scheme': self.scheme,
+        }
+        return ([], kwargs)
+
+    def get_schema_names(self, connection, **kwargs):
+        # Each Druid datasource appears as a table in the "druid" schema. This
+        # is also the default schema, so Druid datasources can be referenced as
+        # either druid.dataSourceName or simply dataSourceName.
+        result = connection.execute(
+            'SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA')
+
+        return [
+            row.SCHEMA_NAME for row in result
+            if row.SCHEMA_NAME not in RESERVED_SCHEMAS
+        ]
+
+    def has_table(self, connection, table_name, schema=None):
+        query = """
+            SELECT COUNT(*) > 0 AS exists_
+              FROM INFORMATION_SCHEMA.TABLES
+             WHERE TABLE_NAME = '{table_name}'
+        """.format(table_name=table_name)
+
+        result = connection.execute(query)
+        return result.fetchone().exists_
+
+    def get_table_names(self, connection, schema=None, **kwargs):
+        query = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES"
+        if schema:
+            query = "{query} WHERE TABLE_SCHEMA = '{schema}'".format(
+                query=query, schema=schema)
+
+        result = connection.execute(query)
+        return [row.TABLE_NAME for row in result]
+
+    def get_view_names(self, connection, schema=None, **kwargs):
+        return []
+
+    def get_table_options(self, connection, table_name, schema=None, **kwargs):
+        return {}
+
+    def get_columns(self, connection, table_name, schema=None, **kwargs):
+        query = """
+            SELECT COLUMN_NAME,
+                   DATA_TYPE,
+                   IS_NULLABLE,
+                   COLUMN_DEFAULT
+              FROM INFORMATION_SCHEMA.COLUMNS
+             WHERE TABLE_NAME = '{table_name}'
+        """.format(table_name=table_name)
+        if schema:
+            query = "{query} AND TABLE_SCHEMA = '{schema}'".format(
+                query=query, schema=schema)
+
+        result = connection.execute(query)
+
+        return [
+            {
+                'name': row.COLUMN_NAME,
+                'type': type_map[row.DATA_TYPE.lower()],
+                'nullable': get_is_nullable(row.IS_NULLABLE),
+                'default': get_default(row.COLUMN_DEFAULT),
+            }
+            for row in result
+        ]
+
+    def get_pk_constraint(self, connection, table_name, schema=None, **kwargs):
+        return {'constrained_columns': [], 'name': None}
+
+    def get_foreign_keys(self, connection, table_name, schema=None, **kwargs):
+        return []
+
+    def get_check_constraints(
+        self,
+        connection,
+        table_name,
+        schema=None,
+        **kwargs
+    ):
+        return []
+
+    def get_table_comment(self, connection, table_name, schema=None, **kwargs):
+        return {'text': ''}
+
+    def get_indexes(self, connection, table_name, schema=None, **kwargs):
+        return []
+
+    def get_unique_constraints(
+        self,
+        connection,
+        table_name,
+        schema=None,
+        **kwargs
+    ):
+        return []
+
+    def get_view_definition(
+        self,
+        connection,
+        view_name,
+        schema=None,
+        **kwargs
+    ):
+        pass
+
+    def do_rollback(self, dbapi_connection):
+        pass
+
+    def _check_unicode_returns(self, connection, additional_tests=None):
+        return True
+
+    def _check_unicode_description(self, connection):
+        return True
+
+
+DruidHTTPDialect = DruidDialect
+
+
+class DruidHTTPSDialect(DruidDialect):
+
+    scheme = 'https'
+
+
+def get_is_nullable(druid_is_nullable):
+    # this should be 'YES' or 'NO'; we default to no
+    return druid_is_nullable.lower() == 'yes'
+
+
+def get_default(druid_column_default):
+    # currently unused, returns ''
+    return str(druid_column_default) if druid_column_default != '' else None

+ 446 - 0
desktop/core/ext-py/pydruid/query.py

@@ -0,0 +1,446 @@
+#
+# Copyright 2016 Metamarkets Group Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import six
+import json
+import collections
+from pydruid.utils.aggregators import build_aggregators
+from pydruid.utils.filters import Filter
+from pydruid.utils.having import Having
+from pydruid.utils.dimensions import build_dimension
+from pydruid.utils.postaggregator import Postaggregator
+from pydruid.utils.query_utils import UnicodeWriter
+
+
+class Query(collections.MutableSequence):
+    """
+    Query objects are produced by PyDruid clients and can be used for
+    exporting query results into TSV files or
+    pandas.DataFrame objects for subsequent analysis. They also hold
+    information about the issued query.
+
+    Query acts as a wrapper over raw result list of dictionaries.
+
+    :ivar str result_json: JSON object representing a query result. Initial value: None
+    :ivar list result: Query result parsed into a list of dicts. Initial value: None
+    :ivar str query_type: Name of most recently run query, e.g., topN. Initial value: None
+    :ivar dict query_dict: JSON object representing the query. Initial value: None
+    """
+
+    def __init__(self, query_dict, query_type):
+        super(Query, self).__init__()
+        self.query_dict = query_dict
+        self.query_type = query_type
+        self.result = None
+        self.result_json = None
+
+    def parse(self, data):
+        if data:
+            self.result_json = data
+            res = json.loads(self.result_json)
+            self.result = res
+        else:
+            raise IOError('{Error parsing result: {0} for {1} query'.format(
+                    self.result_json, self.query_type))
+
+    def export_tsv(self, dest_path):
+        """
+        Export the current query result to a tsv file.
+
+        :param str dest_path: file to write query results to
+        :raise NotImplementedError:
+
+        Example
+
+        .. code-block:: python
+            :linenos:
+
+                >>> top = client.topn(
+                        datasource='twitterstream',
+                        granularity='all',
+                        intervals='2013-10-04/pt1h',
+                        aggregations={"count": doublesum("count")},
+                        dimension='user_name',
+                        filter = Dimension('user_lang') == 'en',
+                        metric='count',
+                        threshold=2
+                    )
+
+                >>> top.export_tsv('top.tsv')
+                >>> !cat top.tsv
+                >>> count	user_name	timestamp
+                    7.0	user_1	2013-10-04T00:00:00.000Z
+                    6.0	user_2	2013-10-04T00:00:00.000Z
+        """
+        if six.PY3:
+            f = open(dest_path, 'w', newline='', encoding='utf-8')
+        else:
+            f = open(dest_path, 'wb')
+        w = UnicodeWriter(f)
+
+        if self.query_type == "timeseries":
+            header = list(self.result[0]['result'].keys())
+            header.append('timestamp')
+        elif self.query_type == 'topN':
+            header = list(self.result[0]['result'][0].keys())
+            header.append('timestamp')
+        elif self.query_type == "groupBy":
+            header = list(self.result[0]['event'].keys())
+            header.append('timestamp')
+            header.append('version')
+        else:
+            raise NotImplementedError(
+                'TSV export not implemented for query type: {0}'.format(self.query_type))
+
+        w.writerow(header)
+
+        if self.result:
+            if self.query_type == "topN" or self.query_type == "timeseries":
+                for item in self.result:
+                    timestamp = item['timestamp']
+                    result = item['result']
+                    if type(result) is list:  # topN
+                        for line in result:
+                            w.writerow(list(line.values()) + [timestamp])
+                    else:  # timeseries
+                        w.writerow(list(result.values()) + [timestamp])
+            elif self.query_type == "groupBy":
+                for item in self.result:
+                    timestamp = item['timestamp']
+                    version = item['version']
+                    w.writerow(
+                        list(item['event'].values()) + [timestamp] + [version])
+
+        f.close()
+
+    def export_pandas(self):
+        """
+        Export the current query result to a Pandas DataFrame object.
+
+        :return: The DataFrame representing the query result
+        :rtype: DataFrame
+        :raise NotImplementedError:
+
+        Example
+
+        .. code-block:: python
+            :linenos:
+
+                >>> top = client.topn(
+                        datasource='twitterstream',
+                        granularity='all',
+                        intervals='2013-10-04/pt1h',
+                        aggregations={"count": doublesum("count")},
+                        dimension='user_name',
+                        filter = Dimension('user_lang') == 'en',
+                        metric='count',
+                        threshold=2
+                    )
+
+                >>> df = top.export_pandas()
+                >>> print df
+                >>>    count                 timestamp      user_name
+                    0      7  2013-10-04T00:00:00.000Z         user_1
+                    1      6  2013-10-04T00:00:00.000Z         user_2
+        """
+        import pandas
+
+        if self.result:
+            if self.query_type == "timeseries":
+                nres = [list(v['result'].items()) + [('timestamp', v['timestamp'])]
+                        for v in self.result]
+                nres = [dict(v) for v in nres]
+            elif self.query_type == "topN":
+                nres = []
+                for item in self.result:
+                    timestamp = item['timestamp']
+                    results = item['result']
+                    tres = [dict(list(res.items()) + [('timestamp', timestamp)])
+                            for res in results]
+                    nres += tres
+            elif self.query_type == "groupBy":
+                nres = [list(v['event'].items()) + [('timestamp', v['timestamp'])]
+                        for v in self.result]
+                nres = [dict(v) for v in nres]
+            elif self.query_type == "select":
+                nres = []
+                for item in self.result:
+                    nres += [e.get('event') for e in item['result'].get('events')]
+            elif self.query_type == "scan":
+                nres = []
+                for item in self.result:
+                    nres += [e for e in item.get('events')]
+            else:
+                raise NotImplementedError(
+                    'Pandas export not implemented for query '
+                    'type: {0}'.format(self.query_type))
+
+            df = pandas.DataFrame(nres)
+            return df
+
+    def __str__(self):
+        return str(self.result)
+
+    def __len__(self):
+        return len(self.result)
+
+    def __delitem__(self, index):
+        del self.result[index]
+
+    def insert(self, index, value):
+        self.result.insert(index, value)
+
+    def __setitem__(self, index, value):
+        self.result[index] = value
+
+    def __getitem__(self, index):
+        return self.result[index]
+
+
+class QueryBuilder(object):
+    def __init__(self):
+        self.last_query = None
+
+    @staticmethod
+    def parse_datasource(datasource, query_type):
+        """
+        Parse an input datasource object into valid dictionary
+
+        Input can be a string, in which case it is simply returned, or a
+        list, when it is turned into a UNION datasource.
+
+        :param datasource: datasource parameter
+        :param string query_type: query type
+        :raise ValueError: if input is not string or list of strings
+        """
+        if not (
+                    isinstance(datasource, six.string_types) or
+                    (
+                        isinstance(datasource, list) and
+                        all([isinstance(x, six.string_types) for x in datasource])
+                    )
+                ):
+            raise ValueError(
+                'Datasource definition not valid. Must be string or list of strings')
+        if isinstance(datasource, six.string_types):
+            return datasource
+        else:
+            return {'type': 'union', 'dataSources': datasource}
+
+    @staticmethod
+    def validate_query(query_type, valid_parts, args):
+        """
+        Validate the query parts so only allowed objects are sent.
+
+        Each query type can have an optional 'context' object attached which
+        is used to set certain query context settings, etc. timeout or
+        priority. As each query can have this object, there's
+        no need for it to be sent - it might as well be added here.
+
+        :param string query_type: a type of query
+        :param list valid_parts: a list of valid object names
+        :param dict args: the dict of args to be sent
+        :raise ValueError: if an invalid object is given
+        """
+        valid_parts = valid_parts[:] + ['context']
+        for key, val in six.iteritems(args):
+            if key not in valid_parts:
+                raise ValueError(
+                        'Query component: {0} is not valid for query type: {1}.'
+                        .format(key, query_type) +
+                        'The list of valid components is: \n {0}'
+                        .format(valid_parts))
+
+    def build_query(self, query_type, args):
+        """
+        Build query based on given query type and arguments.
+
+        :param string query_type: a type of query
+        :param dict args: the dict of args to be sent
+        :return: the resulting query
+        :rtype: Query
+        """
+        query_dict = {'queryType': query_type}
+
+        for key, val in six.iteritems(args):
+            if key == 'aggregations':
+                query_dict[key] = build_aggregators(val)
+            elif key == 'post_aggregations':
+                query_dict['postAggregations'] = \
+                    Postaggregator.build_post_aggregators(val)
+            elif key == 'context':
+                query_dict['context'] = val
+            elif key == 'datasource':
+                query_dict['dataSource'] = self.parse_datasource(val, query_type)
+            elif key == 'paging_spec':
+                query_dict['pagingSpec'] = val
+            elif key == 'limit_spec':
+                query_dict['limitSpec'] = val
+            elif key == "filter" and val is not None:
+                query_dict[key] = Filter.build_filter(val)
+            elif key == "having" and val is not None:
+                query_dict[key] = Having.build_having(val)
+            elif key == 'dimension' and val is not None:
+                query_dict[key] = build_dimension(val)
+            elif key == 'dimensions':
+                query_dict[key] = [build_dimension(v) for v in val]
+            else:
+                query_dict[key] = val
+
+        self.last_query = Query(query_dict, query_type)
+        return self.last_query
+
+    def topn(self, args):
+        """
+        A TopN query returns a set of the values in a given dimension,
+        sorted by a specified metric. Conceptually, a
+        topN can be thought of as an approximate GroupByQuery over a
+        single dimension with an Ordering spec. TopNs are
+        faster and more resource efficient than GroupBy for this use case.
+
+        :param dict args: dict of arguments
+
+        :return: topn query
+        :rtype: Query
+        """
+        query_type = 'topN'
+        valid_parts = [
+            'datasource', 'granularity', 'filter', 'aggregations',
+            'post_aggregations', 'intervals', 'dimension', 'threshold',
+            'metric'
+        ]
+        self.validate_query(query_type, valid_parts, args)
+        return self.build_query(query_type, args)
+
+    def timeseries(self, args):
+        """
+        A timeseries query returns the values of the requested metrics
+        (in aggregate) for each timestamp.
+
+        :param dict args: dict of args
+
+        :return: timeseries query
+        :rtype: Query
+        """
+        query_type = 'timeseries'
+        valid_parts = [
+            'datasource', 'granularity', 'filter', 'aggregations', 'descending',
+            'post_aggregations', 'intervals'
+        ]
+        self.validate_query(query_type, valid_parts, args)
+        return self.build_query(query_type, args)
+
+    def groupby(self, args):
+        """
+        A group-by query groups a results set (the requested aggregate
+        metrics) by the specified dimension(s).
+
+        :param dict args: dict of args
+
+        :return: group by query
+        :rtype: Query
+        """
+        query_type = 'groupBy'
+        valid_parts = [
+            'datasource', 'granularity', 'filter', 'aggregations',
+            'having', 'post_aggregations', 'intervals', 'dimensions',
+            'limit_spec',
+        ]
+        self.validate_query(query_type, valid_parts, args)
+        return self.build_query(query_type, args)
+
+    def segment_metadata(self, args):
+        """
+        * Column type
+        * Estimated size in bytes
+        * Estimated size in bytes of each column
+        * Interval the segment covers
+        * Segment ID
+
+        :param dict args: dict of args
+
+        :return: segment metadata query
+        :rtype: Query
+        """
+        query_type = 'segmentMetadata'
+        valid_parts = ['datasource', 'intervals', 'analysisTypes', 'merge']
+        self.validate_query(query_type, valid_parts, args)
+        return self.build_query(query_type, args)
+
+    def time_boundary(self, args):
+        """
+        A time boundary query returns the min and max timestamps present in a data source.
+
+        :param dict args: dict of args
+
+        :return: time boundary query
+        :rtype: Query
+        """
+        query_type = 'timeBoundary'
+        valid_parts = ['datasource']
+        self.validate_query(query_type, valid_parts, args)
+        return self.build_query(query_type, args)
+
+    def select(self, args):
+        """
+        A select query returns raw Druid rows and supports pagination.
+
+        :param dict args: dict of args
+
+        :return: select query
+        :rtype: Query
+        """
+        query_type = 'select'
+        valid_parts = [
+            'datasource', 'granularity', 'filter', 'dimensions', 'metrics',
+            'paging_spec', 'intervals'
+        ]
+        self.validate_query(query_type, valid_parts, args)
+        return self.build_query(query_type, args)
+
+    def search(self, args):
+        """
+        A search query returns dimension values that match the search specification.
+
+        :param dict args: dict of args
+
+        :return: search query
+        :rtype: Query
+        """
+        query_type = 'search'
+        valid_parts = [
+            'datasource', 'granularity', 'filter', 'searchDimensions', 'query',
+            'limit', 'intervals', 'sort'
+        ]
+        self.validate_query(query_type, valid_parts, args)
+        return self.build_query(query_type, args)
+
+    def scan(self, args):
+        """
+        A scan query returns raw Druid rows
+
+        :param dict args: dict of args
+
+        :return: select query
+        :rtype: Query
+        """
+        query_type = 'scan'
+        valid_parts = [
+            'datasource', 'granularity', 'filter', 'dimensions', 'metrics',
+            'intervals', 'limit',
+        ]
+        self.validate_query(query_type, valid_parts, args)
+        return self.build_query(query_type, args)

+ 0 - 0
desktop/core/ext-py/pydruid/utils/__init__.py


+ 109 - 0
desktop/core/ext-py/pydruid/utils/aggregators.py

@@ -0,0 +1,109 @@
+#
+# Copyright 2013 Metamarkets Group Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from six import iteritems
+
+from .filters import Filter
+
+
+def thetasketch(raw_column, isinputthetasketch=False, size=16384):
+    return {
+        "type": "thetaSketch",
+        "fieldName": raw_column,
+        "isInputThetaSketch": isinputthetasketch,
+        "size": size,
+    }
+
+
+def min(raw_metric):
+    """
+    .. note:: Deprecated use `longMin`, `doubleMin' instead
+    """
+    return {"type": "min", "fieldName": raw_metric}
+
+
+def max(raw_metric):
+    """
+    .. note:: Deprecated use `longMax`, `doubleMax' instead
+    """
+    return {"type": "max", "fieldName": raw_metric}
+
+
+def longsum(raw_metric):
+    return {"type": "longSum", "fieldName": raw_metric}
+
+
+def longmin(raw_metric):
+    return {"type": "longMin", "fieldName": raw_metric}
+
+
+def longmax(raw_metric):
+    return {"type": "longMax", "fieldName": raw_metric}
+
+
+def doublesum(raw_metric):
+    return {"type": "doubleSum", "fieldName": raw_metric}
+
+
+def doublemin(raw_metric):
+    return {"type": "doubleMin", "fieldName": raw_metric}
+
+
+def doublemax(raw_metric):
+    return {"type": "doubleMax", "fieldName": raw_metric}
+
+
+def count(raw_metric):
+    return {"type": "count", "fieldName": raw_metric}
+
+
+def hyperunique(raw_metric):
+    return {"type": "hyperUnique", "fieldName": raw_metric}
+
+
+def cardinality(raw_column, by_row=False):
+    if type(raw_column) is not list:
+        raw_column = [raw_column]
+    return {"type": "cardinality", "fieldNames": raw_column, "byRow": by_row}
+
+
+def filtered(filter, agg):
+    return {"type": "filtered",
+            "filter": Filter.build_filter(filter),
+            "aggregator": agg}
+
+
+def javascript(columns_list, fn_aggregate, fn_combine, fn_reset):
+    return {
+        "type": "javascript",
+        "fieldNames": columns_list,
+        "fnAggregate": fn_aggregate,
+        "fnCombine": fn_combine,
+        "fnReset": fn_reset,
+    }
+
+
+def build_aggregators(agg_input):
+    return [_build_aggregator(name, kwargs)
+            for (name, kwargs) in iteritems(agg_input)]
+
+
+def _build_aggregator(name, kwargs):
+    if kwargs["type"] == "filtered":
+        kwargs["aggregator"] = _build_aggregator(name, kwargs["aggregator"])
+    else:
+        kwargs.update({"name": name})
+
+    return kwargs

+ 199 - 0
desktop/core/ext-py/pydruid/utils/dimensions.py

@@ -0,0 +1,199 @@
+def build_dimension(dim):
+    if isinstance(dim, DimensionSpec):
+        dim = dim.build()
+
+    return dim
+
+
+class DimensionSpec(object):
+
+    def __init__(self, dimension, output_name,
+                 extraction_function=None, filter_spec=None):
+        self._dimension = dimension
+        self._output_name = output_name
+        self._extraction_function = extraction_function
+        self._filter_spec = filter_spec
+
+    def build(self):
+        dimension_spec = {
+            'type': 'default',
+            'dimension': self._dimension,
+            'outputName': self._output_name
+        }
+
+        if self._extraction_function is not None:
+            dimension_spec['type'] = 'extraction'
+            dimension_spec['extractionFn'] = self._extraction_function.build()
+
+        if self._filter_spec is not None:
+            dimension_spec = self._filter_spec.build(dimension_spec)
+
+        return dimension_spec
+
+
+class FilteredSpec(object):
+
+    filter_type = None
+
+    def build(self, delegate):
+        dimension_spec = {
+            'type': self.filter_type,
+            'delegate': delegate,
+        }
+        return dimension_spec
+
+
+class ListFilteredSpec(FilteredSpec):
+
+    filter_type = 'listFiltered'
+
+    def __init__(self, values, is_whitelist=True):
+        self._values = values
+        self._is_whitelist = is_whitelist
+
+    def build(self, dimension_spec):
+        filtered_dimension_spec = super(ListFilteredSpec, self).build(dimension_spec)
+        filtered_dimension_spec['values'] = self._values
+
+        if not self._is_whitelist:
+            filtered_dimension_spec['isWhitelist'] = False
+
+        return filtered_dimension_spec
+
+
+class RegexFilteredSpec(FilteredSpec):
+
+    filter_type = 'regexFiltered'
+
+    def __init__(self, pattern):
+        self._pattern = pattern
+
+    def build(self, dimension_spec):
+        filtered_dimension_spec = super(RegexFilteredSpec, self).build(dimension_spec)
+        filtered_dimension_spec['pattern'] = self._pattern
+
+        return filtered_dimension_spec
+
+
+class ExtractionFunction(object):
+
+    extraction_type = None
+
+    def build(self):
+        return {'type': self.extraction_type}
+
+
+class BaseRegexExtraction(ExtractionFunction):
+
+    def __init__(self, expr):
+        super(BaseRegexExtraction, self).__init__()
+        self._expr = expr
+
+    def build(self):
+        extractor = super(BaseRegexExtraction, self).build()
+        extractor['expr'] = self._expr
+
+        return extractor
+
+
+class RegexExtraction(BaseRegexExtraction):
+
+    extraction_type = 'regex'
+
+
+class PartialExtraction(BaseRegexExtraction):
+
+    extraction_type = 'partial'
+
+
+class JavascriptExtraction(ExtractionFunction):
+
+    extraction_type = 'javascript'
+
+    def __init__(self, func, injective=False):
+        super(JavascriptExtraction, self).__init__()
+        self._func = func
+        self._injective = injective
+
+    def build(self):
+        extractor = super(JavascriptExtraction, self).build()
+        extractor['function'] = self._func
+        extractor['injective'] = self._injective
+
+        return extractor
+
+
+class TimeFormatExtraction(ExtractionFunction):
+
+    extraction_type = 'timeFormat'
+
+    def __init__(self, format, locale=None, time_zone=None):
+        super(TimeFormatExtraction, self).__init__()
+        self._format = format
+        self._locale = locale
+        self._time_zone = time_zone
+
+    def build(self):
+        extractor = super(TimeFormatExtraction, self).build()
+        extractor['format'] = self._format
+        if self._locale:
+            extractor['locale'] = self._locale
+        if self._time_zone:
+            extractor['timeZone'] = self._time_zone
+
+        return extractor
+
+
+class LookupExtraction(ExtractionFunction):
+
+    extraction_type = 'lookup'
+    lookup_type = None
+
+    def __init__(self, retain_missing_values=False,
+                 replace_missing_values=None, injective=False):
+        super(LookupExtraction, self).__init__()
+        self._retain_missing_values = retain_missing_values
+        self._replace_missing_values = replace_missing_values
+        self._injective = injective
+
+    def build(self):
+        extractor = super(LookupExtraction, self).build()
+        extractor['lookup'] = self.build_lookup()
+        extractor['retainMissingValue'] = self._retain_missing_values
+        extractor['replaceMissingValueWith'] = self._replace_missing_values
+        extractor['injective'] = self._injective
+
+        return extractor
+
+    def build_lookup(self):
+        return {'type': self.lookup_type}
+
+
+class MapLookupExtraction(LookupExtraction):
+
+    lookup_type = 'map'
+
+    def __init__(self, mapping, **kwargs):
+        super(MapLookupExtraction, self).__init__(**kwargs)
+        self._mapping = mapping
+
+    def build_lookup(self):
+        lookup = super(MapLookupExtraction, self).build_lookup()
+        lookup['map'] = self._mapping
+
+        return lookup
+
+
+class NamespaceLookupExtraction(LookupExtraction):
+
+    lookup_type = 'namespace'
+
+    def __init__(self, namespace, **kwargs):
+        super(NamespaceLookupExtraction, self).__init__(**kwargs)
+        self._namespace = namespace
+
+    def build_lookup(self):
+        lookup = super(NamespaceLookupExtraction, self).build_lookup()
+        lookup['namespace'] = self._namespace
+
+        return lookup

+ 187 - 0
desktop/core/ext-py/pydruid/utils/filters.py

@@ -0,0 +1,187 @@
+#
+# Copyright 2013 Metamarkets Group Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+try:
+    import simplejson as json
+except ImportError:
+    import json
+
+from .dimensions import build_dimension
+
+
+class Filter:
+
+    # filter types supporting extraction function
+    _FILTERS_WITH_EXTR_FN = ('selector', 'regex', 'javascript', 'in', 'bound',
+                             'interval', 'extraction')
+
+    def __init__(self, extraction_function=None, **args):
+
+        type_ = args.get('type', 'selector')
+
+        if extraction_function is not None:
+            if type_ not in self._FILTERS_WITH_EXTR_FN:
+                raise ValueError('Filter of type {0} doesn\'t support '
+                                 'extraction function'.format(type_))
+        elif type_ == 'extraction':
+            raise ValueError('Filter of type extraction requires extraction '
+                             'function')
+
+        self.extraction_function = extraction_function
+
+        self.filter = {"filter": {"type": type_}}
+
+        if type_ == "selector":
+            self.filter["filter"].update({"dimension": args["dimension"],
+                                          "value": args["value"]})
+        elif type_ == "javascript":
+            self.filter["filter"].update({"dimension": args["dimension"],
+                                          "function": args["function"]})
+        elif type_ == "and":
+            self.filter["filter"].update({"fields": args['fields']})
+        elif type_ == "or":
+            self.filter["filter"].update({"fields": args["fields"]})
+        elif type_ == "not":
+            self.filter["filter"].update({"field": args["field"]})
+        elif type_ == "in":
+            self.filter["filter"].update({"dimension": args["dimension"],
+                                          "values": args["values"]})
+        elif type_ == "regex":
+            self.filter['filter'].update({"dimension": args["dimension"],
+                                          "pattern": args["pattern"]})
+        elif type_ == "bound":
+            self.filter["filter"].update({
+                "dimension": args["dimension"],
+                "lower": args["lower"],
+                "lowerStrict": args["lowerStrict"],
+                "upper": args["upper"],
+                "upperStrict": args["upperStrict"],
+                "alphaNumeric": args["alphaNumeric"]
+            })
+        elif type_ == "columnComparison":
+            self.filter['filter'].update({'dimensions': args['dimensions']})
+        elif type_ == "interval":
+            self.filter['filter'].update({'dimension': args['dimension'],
+                                          'intervals': args['intervals']})
+        elif type_ == "extraction":
+            self.filter["filter"].update({"dimension": args["dimension"],
+                                          "value": args["value"]})
+        else:
+            raise NotImplementedError(
+                'Filter type: {0} does not exist'.format(type_))
+
+    def show(self):
+        print(json.dumps(self.filter, indent=4))
+
+    def __and__(self, x):
+        if self.filter['filter']['type'] == 'and':
+            # if `self` is already `and`, don't create a new filter
+            # but just append `x` to the filter fields.
+            self.filter['filter']['fields'].append(x)
+            return self
+        return Filter(type="and", fields=[self, x])
+
+    def __or__(self, x):
+        if self.filter['filter']['type'] == 'or':
+            # if `self` is already `or`, don't create a new filter
+            # but just append `x` to the filter fields.
+            self.filter['filter']['fields'].append(x)
+            return self
+        return Filter(type="or", fields=[self, x])
+
+    def __invert__(self):
+        return Filter(type="not", field=self)
+
+    @staticmethod
+    def build_filter(filter_obj):
+        filter = filter_obj.filter['filter']
+        if filter['type'] in ['and', 'or']:
+            filter = filter.copy()  # make a copy so we don't overwrite `fields`
+            filter['fields'] = [Filter.build_filter(f) for f in filter['fields']]
+        elif filter['type'] in ['not']:
+            filter = filter.copy()
+            filter['field'] = Filter.build_filter(filter['field'])
+        elif filter['type'] in ['columnComparison']:
+            filter = filter.copy()
+            filter['dimensions'] = [build_dimension(d) for d in filter['dimensions']]
+
+        if filter_obj.extraction_function is not None:
+            if filter is filter_obj.filter['filter']:  # copy if not yet copied
+                filter = filter.copy()
+            filter['extractionFn'] = filter_obj.extraction_function.build()
+
+        return filter
+
+
+class Dimension:
+    def __init__(self, dim):
+        self.dimension = dim
+
+    def __eq__(self, other):
+        return Filter(dimension=self.dimension, value=other)
+
+    def __ne__(self, other):
+        return ~Filter(dimension=self.dimension, value=other)
+
+
+class JavaScript:
+    def __init__(self, dim):
+        self.dimension = dim
+
+    def __eq__(self, func):
+        return Filter(type='javascript', dimension=self.dimension, function=func)
+
+
+class Bound(Filter):
+    """
+    Bound filter can be used to filter by comparing dimension values to an
+    upper value or/and a lower value.
+
+    :ivar str dimension: Dimension to filter on.
+    :ivar str lower: Lower bound.
+    :ivar str upper: Upper bound.
+    :ivar bool lowerStrict: Strict lower inclusion. Initial value: False
+    :ivar bool upperStrict: Strict upper inclusion. Initial value: False
+    :ivar bool alphaNumeric: Numeric comparison. Initial value: False
+    :ivar ExtractionFunction extraction_function: extraction function to use,
+                                                  if not None
+    """
+    def __init__(
+            self, dimension, lower, upper, lowerStrict=False,
+            upperStrict=False, alphaNumeric=False, extraction_function=None):
+        Filter.__init__(
+            self,
+            type='bound', dimension=dimension,
+            lower=lower, upper=upper,
+            lowerStrict=lowerStrict, upperStrict=upperStrict,
+            alphaNumeric=alphaNumeric, extraction_function=extraction_function)
+
+
+class Interval(Filter):
+    """
+    Interval filter can be used to filter by comparing dimension(__time)
+    values to a list of intervals.
+
+    :ivar str dimension: Dimension to filter on.
+    :ivar list intervals: List of ISO-8601 intervals of data to filter out.
+    :ivar ExtractionFunction extraction_function: extraction function to use,
+                                                  if not None
+    """
+    def __init__(self, dimension, intervals, extraction_function=None):
+
+        Filter.__init__(
+            self,
+            type='interval', dimension=dimension,
+            intervals=intervals, extraction_function=extraction_function)

+ 85 - 0
desktop/core/ext-py/pydruid/utils/having.py

@@ -0,0 +1,85 @@
+#
+# Copyright 2013 Metamarkets Group Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+try:
+    import simplejson as json
+except ImportError:
+    import json
+
+
+class Having:
+    def __init__(self, **args):
+
+        if args['type'] in ('equalTo', 'lessThan', 'greaterThan'):
+            self.having = {'having': {'type': args['type'],
+                                      'aggregation': args['aggregation'],
+                                      'value': args['value']}}
+
+        elif args['type'] == 'and':
+            self.having = {'having': {'type': 'and',
+                                      'havingSpecs': args['havingSpecs']}}
+
+        elif args['type'] == 'or':
+            self.having = {'having': {'type': 'or',
+                                      'havingSpecs': args['havingSpecs']}}
+
+        elif args['type'] == 'not':
+            self.having = {'having': {'type': 'not',
+                                      'havingSpec': args['havingSpec']}}
+        else:
+            raise NotImplemented(
+                'Having type: {0} does not exist'.format(args['type']))
+
+    def show(self):
+        print(json.dumps(self.having, indent=4))
+
+    def _combine(self, typ, x):
+        # collapse nested and/ors
+        if self.having['having']['type'] == typ:
+            havingSpecs = self.having['having']['havingSpecs'] + [x.having['having']]
+            return Having(type=typ, havingSpecs=havingSpecs)
+        elif x.having['having']['type'] == typ:
+            havingSpecs = [self.having['having']] + x.having['having']['havingSpecs']
+            return Having(type=typ, havingSpecs=havingSpecs)
+        else:
+            return Having(type=typ,
+                          havingSpecs=[self.having['having'], x.having['having']])
+
+    def __and__(self, x):
+        return self._combine('and', x)
+
+    def __or__(self, x):
+        return self._combine('or', x)
+
+    def __invert__(self):
+        return Having(type='not', havingSpec=self.having['having'])
+
+    @staticmethod
+    def build_having(having_obj):
+        return having_obj.having['having']
+
+
+class Aggregation:
+    def __init__(self, agg):
+        self.aggregation = agg
+
+    def __eq__(self, other):
+        return Having(type='equalTo', aggregation=self.aggregation, value=other)
+
+    def __lt__(self, other):
+        return Having(type='lessThan', aggregation=self.aggregation, value=other)
+
+    def __gt__(self, other):
+        return Having(type='greaterThan', aggregation=self.aggregation, value=other)

+ 212 - 0
desktop/core/ext-py/pydruid/utils/postaggregator.py

@@ -0,0 +1,212 @@
+#
+# Copyright 2013 Metamarkets Group Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import division
+
+import six
+
+
+class Postaggregator:
+    def __init__(self, fn, fields, name):
+        self.post_aggregator = {'type': 'arithmetic',
+                                'name': name,
+                                'fn': fn,
+                                'fields': fields}
+        self.name = name
+
+    def __mul__(self, other):
+        return Postaggregator('*', self.fields(other),
+                              self.name + 'mul' + other.name)
+
+    def __sub__(self, other):
+        return Postaggregator('-', self.fields(other),
+                              self.name + 'sub' + other.name)
+
+    def __add__(self, other):
+        return Postaggregator('+', self.fields(other),
+                              self.name + 'add' + other.name)
+
+    def __div__(self, other):
+        return Postaggregator('/', self.fields(other),
+                              self.name + 'div' + other.name)
+
+    def __truediv__(self, other):
+        return self.__div__(other)
+
+    def fields(self, other):
+        return [self.post_aggregator, other.post_aggregator]
+
+    @staticmethod
+    def build_post_aggregators(postaggs):
+        def rename_postagg(new_name, post_aggregator):
+            post_aggregator['name'] = new_name
+            return post_aggregator
+
+        return [rename_postagg(new_name, postagg.post_aggregator)
+                for (new_name, postagg) in six.iteritems(postaggs)]
+
+
+class Quantile(Postaggregator):
+    def __init__(self, name, probability):
+        Postaggregator.__init__(self, None, None, name)
+        self.post_aggregator = {
+            'type': 'quantile', 'fieldName': name, 'probability': probability}
+
+
+class Quantiles(Postaggregator):
+    def __init__(self, name, probabilities):
+        Postaggregator.__init__(self, None, None, name)
+        self.post_aggregator = {
+            'type': 'quantiles', 'fieldName': name,
+            'probabilities': probabilities}
+
+
+class Field(Postaggregator):
+    def __init__(self, name):
+        Postaggregator.__init__(self, None, None, name)
+        self.post_aggregator = {
+            'type': 'fieldAccess', 'fieldName': name}
+
+
+class Const(Postaggregator):
+    def __init__(self, value, output_name=None):
+
+        if output_name is None:
+            name = 'const'
+        else:
+            name = output_name
+
+        Postaggregator.__init__(self, None, None, name)
+        self.post_aggregator = {
+            'type': 'constant', 'name': name, 'value': value}
+
+
+class HyperUniqueCardinality(Postaggregator):
+    def __init__(self, name):
+        Postaggregator.__init__(self, None, None, name)
+        self.post_aggregator = {
+            'type': 'hyperUniqueCardinality', 'fieldName': name}
+
+
+class DoubleGreatest(Postaggregator):
+    def __init__(self, fields, output_name=None):
+
+        if output_name is None:
+            name = 'doubleGreatest'
+        else:
+            name = output_name
+
+        Postaggregator.__init__(self, None, None, name)
+        self.post_aggregator = {
+                'type': 'doubleGreatest',
+                'name': name,
+                'fields': [f.post_aggregator for f in fields]}
+
+
+class DoubleLeast(Postaggregator):
+    def __init__(self, fields, output_name=None):
+
+        if output_name is None:
+            name = 'doubleLeast'
+        else:
+            name = output_name
+
+        Postaggregator.__init__(self, None, None, name)
+        self.post_aggregator = {
+                'type': 'doubleLeast',
+                'name': name,
+                'fields': [f.post_aggregator for f in fields]}
+
+
+class LongGreatest(Postaggregator):
+    def __init__(self, fields, output_name=None):
+
+        if output_name is None:
+            name = 'longGreatest'
+        else:
+            name = output_name
+
+        Postaggregator.__init__(self, None, None, name)
+        self.post_aggregator = {
+                'type': 'longGreatest',
+                'name': name,
+                'fields': [f.post_aggregator for f in fields]}
+
+
+class LongLeast(Postaggregator):
+    def __init__(self, fields, output_name=None):
+
+        if output_name is None:
+            name = 'longLeast'
+        else:
+            name = output_name
+
+        Postaggregator.__init__(self, None, None, name)
+        self.post_aggregator = {
+                'type': 'longLeast',
+                'name': name,
+                'fields': [f.post_aggregator for f in fields]}
+
+
+class ThetaSketchOp(object):
+    def __init__(self, fn, fields, name):
+        self.post_aggregator = {'type': 'thetaSketchSetOp',
+                                'name': name,
+                                'func': fn,
+                                'fields': fields}
+        self.name = name
+
+    def __or__(self, other):
+        return ThetaSketchOp('UNION', self.fields(other),
+                             self.name + '_OR_' + other.name)
+
+    def __and__(self, other):
+        return ThetaSketchOp('INTERSECT', self.fields(other),
+                             self.name + '_AND_' + other.name)
+
+    def __ne__(self, other):
+        return ThetaSketchOp('NOT', self.fields(other),
+                             self.name + '_NOT_' + other.name)
+
+    def fields(self, other):
+        return [self.post_aggregator, other.post_aggregator]
+
+    @staticmethod
+    def build_post_aggregators(thetasketchops):
+        def rename_thetasketchop(new_name, thetasketchop):
+            thetasketchop['name'] = new_name
+            return thetasketchop
+
+        return [rename_thetasketchop(new_name, thetasketchop.post_aggregator)
+                for (new_name, thetasketchop) in six.iteritems(thetasketchops)]
+
+
+class ThetaSketch(ThetaSketchOp):
+    def __init__(self, name):
+        ThetaSketchOp.__init__(self, None, None, name)
+        self.post_aggregator = {
+            'type': 'fieldAccess', 'fieldName': name}
+
+
+class ThetaSketchEstimate(Postaggregator):
+    def __init__(self, fields):
+        field = fields.post_aggregator \
+            if type(fields) in [ThetaSketch, ThetaSketchOp] else fields
+        self.post_aggregator = {
+            'type': 'thetaSketchEstimate',
+            'name': 'thetasketchestimate',
+            'field': field,
+        }
+        self.name = 'thetasketchestimate'

+ 46 - 0
desktop/core/ext-py/pydruid/utils/query_utils.py

@@ -0,0 +1,46 @@
+#
+# Copyright 2013 Metamarkets Group Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import csv
+import codecs
+import six
+# A special CSV writer which will write rows to TSV file "f", which is encoded in utf-8.
+# this is necessary because the values in druid are not all ASCII.
+
+
+class UnicodeWriter(object):
+
+    # delimiter="\t"
+    def __init__(self, f, dialect="excel-tab", encoding="utf-8", **kwds):
+        self.stream = f
+        self.writer = csv.writer(self.stream, dialect=dialect, **kwds)
+        self.encoder = codecs.getincrementalencoder(encoding)()
+
+    def __encode(self, data):
+        data = str(data) if isinstance(data, six.integer_types) else data
+        if not six.PY3:
+            data = data.encode('utf-8') \
+                if isinstance(data, unicode) else data  # noqa
+            data = data.decode('utf-8')
+            return self.encoder.encode(data)
+        return data
+
+    def writerow(self, row):
+        row = [self.__encode(s) for s in row]
+        self.writer.writerow(row)
+
+    def writerows(self, rows):
+        for row in rows:
+            self.writerow(row)