client.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
  1. #
  2. # Copyright 2013 Metamarkets Group Inc.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from __future__ import division
  17. from __future__ import absolute_import
  18. import json
  19. import re
  20. from six.moves import urllib
  21. from pydruid.query import QueryBuilder
  22. from base64 import b64encode
  23. # extract error from the <PRE> tag inside the HTML response
  24. HTML_ERROR = re.compile('<pre>\s*(.*?)\s*</pre>', re.IGNORECASE)
  25. class BaseDruidClient(object):
  26. def __init__(self, url, endpoint):
  27. self.url = url
  28. self.endpoint = endpoint
  29. self.query_builder = QueryBuilder()
  30. self.username = None
  31. self.password = None
  32. def set_basic_auth_credentials(self, username, password):
  33. self.username = username
  34. self.password = password
  35. def _prepare_url_headers_and_body(self, query):
  36. querystr = json.dumps(query.query_dict).encode('utf-8')
  37. if self.url.endswith('/'):
  38. url = self.url + self.endpoint
  39. else:
  40. url = self.url + '/' + self.endpoint
  41. headers = {'Content-Type': 'application/json'}
  42. if (self.username is not None) and (self.password is not None):
  43. authstring = '{}:{}'.format(self.username, self.password)
  44. b64string = b64encode(authstring.encode()).decode()
  45. headers['Authorization'] = 'Basic {}'.format(b64string)
  46. return headers, querystr, url
  47. def _post(self, query):
  48. """
  49. Fills Query object with results.
  50. :param Query query: query to execute
  51. :return: Query filled with results
  52. :rtype: Query
  53. """
  54. raise NotImplementedError("Subclasses must implement this method")
  55. # --------- Query implementations ---------
  56. def topn(self, **kwargs):
  57. """
  58. A TopN query returns a set of the values in a given dimension,
  59. sorted by a specified metric. Conceptually, a topN can be
  60. thought of as an approximate GroupByQuery over a single
  61. dimension with an Ordering spec. TopNs are
  62. faster and more resource efficient than GroupBy for this use case.
  63. Required key/value pairs:
  64. :param str datasource: Data source to query
  65. :param str granularity: Aggregate data by hour, day, minute, etc.,
  66. :param intervals: ISO-8601 intervals of data to query
  67. :type intervals: str or list
  68. :param dict aggregations: A map from aggregator name to one of
  69. the pydruid.utils.aggregators e.g., doublesum
  70. :param str dimension: Dimension to run the query against
  71. :param str metric: Metric over which to sort the specified dimension by
  72. :param int threshold: How many of the top items to return
  73. :return: The query result
  74. :rtype: Query
  75. Optional key/value pairs:
  76. :param pydruid.utils.filters.Filter filter: Indicates which rows
  77. of data to include in the query
  78. :param post_aggregations: A dict with string key = 'post_aggregator_name',
  79. and value pydruid.utils.PostAggregator
  80. :param dict context: A dict of query context options
  81. Example:
  82. .. code-block:: python
  83. :linenos:
  84. >>> top = client.topn(
  85. datasource='twitterstream',
  86. granularity='all',
  87. intervals='2013-06-14/pt1h',
  88. aggregations={"count": doublesum("count")},
  89. dimension='user_name',
  90. metric='count',
  91. filter=Dimension('user_lang') == 'en',
  92. threshold=1,
  93. context={"timeout": 1000}
  94. )
  95. >>> print top
  96. >>> [{'timestamp': '2013-06-14T00:00:00.000Z',
  97. 'result': [{'count': 22.0, 'user': "cool_user"}}]}]
  98. """
  99. query = self.query_builder.topn(kwargs)
  100. return self._post(query)
  101. def timeseries(self, **kwargs):
  102. """
  103. A timeseries query returns the values of the requested metrics (in aggregate)
  104. for each timestamp.
  105. Required key/value pairs:
  106. :param str datasource: Data source to query
  107. :param str granularity: Time bucket to aggregate data by hour, day, minute, etc.,
  108. :param intervals: ISO-8601 intervals for which to run the query on
  109. :type intervals: str or list
  110. :param dict aggregations: A map from aggregator name to one of the
  111. ``pydruid.utils.aggregators`` e.g., ``doublesum``
  112. :return: The query result
  113. :rtype: Query
  114. Optional key/value pairs:
  115. :param pydruid.utils.filters.Filter filter: Indicates which rows of
  116. data to include in the query
  117. :param post_aggregations: A dict with string key =
  118. 'post_aggregator_name', and value pydruid.utils.PostAggregator
  119. :param dict context: A dict of query context options
  120. Example:
  121. .. code-block:: python
  122. :linenos:
  123. >>> counts = client.timeseries(
  124. datasource=twitterstream,
  125. granularity='hour',
  126. intervals='2013-06-14/pt1h',
  127. aggregations=\
  128. {"count": doublesum("count"), "rows": count("rows")},
  129. post_aggregations=\
  130. {'percent': (Field('count') / Field('rows')) * Const(100))},
  131. context={"timeout": 1000}
  132. )
  133. >>> print counts
  134. >>> [{'timestamp': '2013-06-14T00:00:00.000Z',
  135. 'result': {'count': 9619.0, 'rows': 8007,
  136. 'percent': 120.13238416385663}}]
  137. """
  138. query = self.query_builder.timeseries(kwargs)
  139. return self._post(query)
  140. def groupby(self, **kwargs):
  141. """
  142. A group-by query groups a results set (the requested aggregate
  143. metrics) by the specified dimension(s).
  144. Required key/value pairs:
  145. :param str datasource: Data source to query
  146. :param str granularity: Time bucket to aggregate data by hour, day, minute, etc.,
  147. :param intervals: ISO-8601 intervals for which to run the query on
  148. :type intervals: str or list
  149. :param dict aggregations: A map from aggregator name to one of the
  150. ``pydruid.utils.aggregators`` e.g., ``doublesum``
  151. :param list dimensions: The dimensions to group by
  152. :return: The query result
  153. :rtype: Query
  154. Optional key/value pairs:
  155. :param pydruid.utils.filters.Filter filter: Indicates which rows of
  156. data to include in the query
  157. :param pydruid.utils.having.Having having: Indicates which groups
  158. in results set of query to keep
  159. :param post_aggregations: A dict with string key = 'post_aggregator_name',
  160. and value pydruid.utils.PostAggregator
  161. :param dict context: A dict of query context options
  162. :param dict limit_spec: A dict of parameters defining how to limit
  163. the rows returned, as specified in the Druid api documentation
  164. Example:
  165. .. code-block:: python
  166. :linenos:
  167. >>> group = client.groupby(
  168. datasource='twitterstream',
  169. granularity='hour',
  170. intervals='2013-10-04/pt1h',
  171. dimensions=["user_name", "reply_to_name"],
  172. filter=~(Dimension("reply_to_name") == "Not A Reply"),
  173. aggregations={"count": doublesum("count")},
  174. context={"timeout": 1000}
  175. limit_spec={
  176. "type": "default",
  177. "limit": 50,
  178. "columns" : ["count"]
  179. }
  180. )
  181. >>> for k in range(2):
  182. ... print group[k]
  183. >>> {
  184. 'timestamp': '2013-10-04T00:00:00.000Z',
  185. 'version': 'v1',
  186. 'event': {
  187. 'count': 1.0,
  188. 'user_name': 'user_1',
  189. 'reply_to_name': 'user_2',
  190. }
  191. }
  192. >>> {
  193. 'timestamp': '2013-10-04T00:00:00.000Z',
  194. 'version': 'v1',
  195. 'event': {
  196. 'count': 1.0,
  197. 'user_name': 'user_2',
  198. 'reply_to_name':
  199. 'user_3',
  200. }
  201. }
  202. """
  203. query = self.query_builder.groupby(kwargs)
  204. return self._post(query)
  205. def segment_metadata(self, **kwargs):
  206. """
  207. A segment meta-data query returns per segment information about:
  208. * Cardinality of all the columns present
  209. * Column type
  210. * Estimated size in bytes
  211. * Estimated size in bytes of each column
  212. * Interval the segment covers
  213. * Segment ID
  214. Required key/value pairs:
  215. :param str datasource: Data source to query
  216. :param intervals: ISO-8601 intervals for which to run the query on
  217. :type intervals: str or list
  218. Optional key/value pairs:
  219. :param dict context: A dict of query context options
  220. :return: The query result
  221. :rtype: Query
  222. Example:
  223. .. code-block:: python
  224. :linenos:
  225. >>> meta = client.segment_metadata(
  226. datasource='twitterstream', intervals = '2013-10-04/pt1h')
  227. >>> print meta[0].keys()
  228. >>> ['intervals', 'id', 'columns', 'size']
  229. >>> print meta[0]['columns']['tweet_length']
  230. >>> {
  231. 'errorMessage': None,
  232. 'cardinality': None,
  233. 'type': 'FLOAT',
  234. 'size': 30908008,
  235. }
  236. """
  237. query = self.query_builder.segment_metadata(kwargs)
  238. return self._post(query)
  239. def time_boundary(self, **kwargs):
  240. """
  241. A time boundary query returns the min and max timestamps present in a data source.
  242. Required key/value pairs:
  243. :param str datasource: Data source to query
  244. Optional key/value pairs:
  245. :param dict context: A dict of query context options
  246. :return: The query result
  247. :rtype: Query
  248. Example:
  249. .. code-block:: python
  250. :linenos:
  251. >>> bound = client.time_boundary(datasource='twitterstream')
  252. >>> print bound
  253. >>> [{
  254. 'timestamp': '2011-09-14T15:00:00.000Z',
  255. 'result': {
  256. 'minTime': '2011-09-14T15:00:00.000Z',
  257. 'maxTime': '2014-03-04T23:44:00.000Z',
  258. }
  259. }]
  260. """
  261. query = self.query_builder.time_boundary(kwargs)
  262. return self._post(query)
  263. def select(self, **kwargs):
  264. """
  265. A select query returns raw Druid rows and supports pagination.
  266. Required key/value pairs:
  267. :param str datasource: Data source to query
  268. :param str granularity: Time bucket to aggregate data by hour, day, minute, etc.
  269. :param dict paging_spec: Indicates offsets into different scanned segments
  270. :param intervals: ISO-8601 intervals for which to run the query on
  271. :type intervals: str or list
  272. Optional key/value pairs:
  273. :param pydruid.utils.filters.Filter filter: Indicates which rows of
  274. data to include in the query
  275. :param list dimensions: The list of dimensions to select. If left
  276. empty, all dimensions are returned
  277. :param list metrics: The list of metrics to select. If left empty,
  278. all metrics are returned
  279. :param dict context: A dict of query context options
  280. :return: The query result
  281. :rtype: Query
  282. Example:
  283. .. code-block:: python
  284. :linenos:
  285. >>> raw_data = client.select(
  286. datasource=twitterstream,
  287. granularity='all',
  288. intervals='2013-06-14/pt1h',
  289. paging_spec={'pagingIdentifies': {}, 'threshold': 1},
  290. context={"timeout": 1000}
  291. )
  292. >>> print(raw_data)
  293. >>> [{
  294. 'timestamp': '2013-06-14T00:00:00.000Z',
  295. 'result': {
  296. 'pagingIdentifiers': {
  297. 'twitterstream_...08:00:00.000Z_v1': 1,
  298. 'events': [{
  299. 'segmentId': 'twitterstr...000Z_v1',
  300. 'offset': 0,
  301. 'event': {
  302. 'timestamp': '2013-06-14T00:00:00.000Z',
  303. 'dim': 'value',
  304. }
  305. }]
  306. }
  307. }]
  308. """
  309. query = self.query_builder.select(kwargs)
  310. return self._post(query)
  311. def export_tsv(self, dest_path):
  312. """
  313. Export the current query result to a tsv file.
  314. .. deprecated::
  315. Use Query.export_tsv() method instead.
  316. """
  317. if self.query_builder.last_query is None:
  318. raise AttributeError(
  319. "There was no query executed by this client yet. Can't export!")
  320. else:
  321. return self.query_builder.last_query.export_tsv(dest_path)
  322. def export_pandas(self):
  323. """
  324. Export the current query result to a Pandas DataFrame object.
  325. .. deprecated::
  326. Use Query.export_pandas() method instead
  327. """
  328. if self.query_builder.last_query is None:
  329. raise AttributeError(
  330. "There was no query executed by this client yet. Can't export!")
  331. else:
  332. return self.query_builder.last_query.export_pandas()
  333. class PyDruid(BaseDruidClient):
  334. """
  335. PyDruid contains the functions for creating and executing Druid queries.
  336. Returns Query objects that can be used for exporting query results
  337. into TSV files or pandas.DataFrame objects for subsequent analysis.
  338. :param str url: URL of Broker node in the Druid cluster
  339. :param str endpoint: Endpoint that Broker listens for queries on
  340. Example
  341. .. code-block:: python
  342. :linenos:
  343. >>> from pydruid.client import *
  344. >>> query = PyDruid('http://localhost:8083', 'druid/v2/')
  345. >>> top = query.topn(
  346. datasource='twitterstream',
  347. granularity='all',
  348. intervals='2013-10-04/pt1h',
  349. aggregations={"count": doublesum("count")},
  350. dimension='user_name',
  351. filter = Dimension('user_lang') == 'en',
  352. metric='count',
  353. threshold=2
  354. )
  355. >>> print json.dumps(top.query_dict, indent=2)
  356. >>> {
  357. "metric": "count",
  358. "aggregations": [
  359. {
  360. "type": "doubleSum",
  361. "fieldName": "count",
  362. "name": "count"
  363. }
  364. ],
  365. "dimension": "user_name",
  366. "filter": {
  367. "type": "selector",
  368. "dimension": "user_lang",
  369. "value": "en"
  370. },
  371. "intervals": "2013-10-04/pt1h",
  372. "dataSource": "twitterstream",
  373. "granularity": "all",
  374. "threshold": 2,
  375. "queryType": "topN"
  376. }
  377. >>> print top.result
  378. >>> [{
  379. 'timestamp': '2013-10-04T00:00:00.000Z',
  380. 'result': [
  381. {
  382. 'count': 7.0,
  383. 'user_name': 'user_1',
  384. },
  385. {
  386. 'count': 6.0,
  387. 'user_name': 'user_2',
  388. },
  389. ]}]
  390. >>> df = top.export_pandas()
  391. >>> print df
  392. >>> count timestamp user_name
  393. 0 7 2013-10-04T00:00:00.000Z user_1
  394. 1 6 2013-10-04T00:00:00.000Z user_2
  395. """
  396. def __init__(self, url, endpoint):
  397. super(PyDruid, self).__init__(url, endpoint)
  398. def _post(self, query):
  399. try:
  400. headers, querystr, url = self._prepare_url_headers_and_body(query)
  401. req = urllib.request.Request(url, querystr, headers)
  402. res = urllib.request.urlopen(req)
  403. data = res.read().decode("utf-8")
  404. res.close()
  405. except urllib.error.HTTPError as e:
  406. err = e.reason
  407. if e.code == 500:
  408. # has Druid returned an error?
  409. try:
  410. err = json.loads(err)
  411. except ValueError:
  412. if HTML_ERROR.search(err):
  413. err = HTML_ERROR.search(err).group(1)
  414. except (ValueError, AttributeError, KeyError):
  415. pass
  416. raise IOError('{0} \n Druid Error: {1} \n Query is: {2}'.format(
  417. e, err, json.dumps(
  418. query.query_dict,
  419. indent=4,
  420. sort_keys=True,
  421. separators=(',', ': '))))
  422. else:
  423. query.parse(data)
  424. return query
  425. def scan(self, **kwargs):
  426. """
  427. A scan query returns raw Druid rows
  428. Required key/value pairs:
  429. :param str datasource: Data source to query
  430. :param str granularity: Time bucket to aggregate data by hour, day, minute, etc.
  431. :param int limit: The maximum number of rows to return
  432. :param intervals: ISO-8601 intervals for which to run the query on
  433. :type intervals: str or list
  434. Optional key/value pairs:
  435. :param pydruid.utils.filters.Filter filter: Indicates which rows of
  436. data to include in the query
  437. :param list dimensions: The list of dimensions to select. If left
  438. empty, all dimensions are returned
  439. :param list metrics: The list of metrics to select. If left empty,
  440. all metrics are returned
  441. :param dict context: A dict of query context options
  442. :return: The query result
  443. :rtype: Query
  444. Example:
  445. .. code-block:: python
  446. :linenos:
  447. >>> raw_data = client.scan(
  448. datasource=twitterstream,
  449. granularity='all',
  450. intervals='2013-06-14/pt1h',
  451. limit=1,
  452. context={"timeout": 1000}
  453. )
  454. >>> print raw_data
  455. >>> [{
  456. u'segmentId': u'zzzz',
  457. u'columns': [u'__time', 'status', 'region'],
  458. 'events': [{
  459. u'status': u'ok', 'region': u'SF', u'__time': 1509494400000,
  460. }]
  461. }]
  462. """
  463. query = self.query_builder.scan(kwargs)
  464. return self._post(query)