async_client.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. #
  2. # Copyright 2016 Metamarkets Group Inc.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from __future__ import division
  17. from __future__ import absolute_import
  18. import json
  19. from pydruid.client import BaseDruidClient
  20. try:
  21. from tornado import gen
  22. from tornado.httpclient import AsyncHTTPClient, HTTPError
  23. except ImportError:
  24. print('Warning: unable to import Tornado. The asynchronous client will not work.')
  25. class AsyncPyDruid(BaseDruidClient):
  26. """
  27. Asynchronous PyDruid client which mirrors functionality of the synchronous
  28. PyDruid, but it executes queries
  29. asynchronously (using an asynchronous http client from Tornado framework).
  30. Returns Query objects that can be used for exporting query results into
  31. TSV files or pandas.DataFrame objects
  32. for subsequent analysis.
  33. :param str url: URL of Broker node in the Druid cluster
  34. :param str endpoint: Endpoint that Broker listens for queries on
  35. Example
  36. .. code-block:: python
  37. :linenos:
  38. >>> from pydruid.async_client import *
  39. >>> query = AsyncPyDruid('http://localhost:8083', 'druid/v2/')
  40. >>> top = yield query.topn(
  41. datasource='twitterstream',
  42. granularity='all',
  43. intervals='2013-10-04/pt1h',
  44. aggregations={"count": doublesum("count")},
  45. dimension='user_name',
  46. filter = Dimension('user_lang') == 'en',
  47. metric='count',
  48. threshold=2
  49. )
  50. >>> print json.dumps(top.query_dict, indent=2)
  51. >>> {
  52. "metric": "count",
  53. "aggregations": [
  54. {
  55. "type": "doubleSum",
  56. "fieldName": "count",
  57. "name": "count"
  58. }
  59. ],
  60. "dimension": "user_name",
  61. "filter": {
  62. "type": "selector",
  63. "dimension": "user_lang",
  64. "value": "en"
  65. },
  66. "intervals": "2013-10-04/pt1h",
  67. "dataSource": "twitterstream",
  68. "granularity": "all",
  69. "threshold": 2,
  70. "queryType": "topN"
  71. }
  72. >>> print top.result
  73. >>> [{'timestamp': '2013-10-04T00:00:00.000Z',
  74. 'result': [{'count': 7.0, 'user_name': 'user_1'},
  75. {'count': 6.0, 'user_name': 'user_2'}]}]
  76. >>> df = top.export_pandas()
  77. >>> print df
  78. >>> count timestamp user_name
  79. 0 7 2013-10-04T00:00:00.000Z user_1
  80. 1 6 2013-10-04T00:00:00.000Z user_2
  81. """
  82. def __init__(self, url, endpoint):
  83. super(AsyncPyDruid, self).__init__(url, endpoint)
  84. @gen.coroutine
  85. def _post(self, query):
  86. http_client = AsyncHTTPClient()
  87. try:
  88. headers, querystr, url = self._prepare_url_headers_and_body(query)
  89. response = yield http_client.fetch(
  90. url, method='POST', headers=headers, body=querystr)
  91. except HTTPError as e:
  92. self.__handle_http_error(e, query)
  93. else:
  94. query.parse(response.body.decode("utf-8"))
  95. raise gen.Return(query)
  96. @staticmethod
  97. def __handle_http_error(e, query):
  98. err = None
  99. if e.code == 500:
  100. # has Druid returned an error?
  101. try:
  102. err = json.loads(e.response.body.decode("utf-8"))
  103. except ValueError:
  104. pass
  105. else:
  106. err = err.get('error', None)
  107. raise IOError('{0} \n Druid Error: {1} \n Query is: {2}'.format(
  108. e, err, json.dumps(query.query_dict, indent=4)))
  109. @gen.coroutine
  110. def topn(self, **kwargs):
  111. query = self.query_builder.topn(kwargs)
  112. result = yield self._post(query)
  113. raise gen.Return(result)
  114. @gen.coroutine
  115. def timeseries(self, **kwargs):
  116. query = self.query_builder.timeseries(kwargs)
  117. result = yield self._post(query)
  118. raise gen.Return(result)
  119. @gen.coroutine
  120. def groupby(self, **kwargs):
  121. query = self.query_builder.groupby(kwargs)
  122. result = yield self._post(query)
  123. raise gen.Return(result)
  124. @gen.coroutine
  125. def segment_metadata(self, **kwargs):
  126. query = self.query_builder.segment_metadata(kwargs)
  127. result = yield self._post(query)
  128. raise gen.Return(result)
  129. @gen.coroutine
  130. def time_boundary(self, **kwargs):
  131. query = self.query_builder.time_boundary(kwargs)
  132. result = yield self._post(query)
  133. raise gen.Return(result)
  134. @gen.coroutine
  135. def select(self, **kwargs):
  136. query = self.query_builder.select(kwargs)
  137. result = yield self._post(query)
  138. raise gen.Return(result)