query.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. #
  2. # Copyright 2016 Metamarkets Group Inc.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import six
  17. import json
  18. import collections
  19. from pydruid.utils.aggregators import build_aggregators
  20. from pydruid.utils.filters import Filter
  21. from pydruid.utils.having import Having
  22. from pydruid.utils.dimensions import build_dimension
  23. from pydruid.utils.postaggregator import Postaggregator
  24. from pydruid.utils.query_utils import UnicodeWriter
  25. class Query(collections.MutableSequence):
  26. """
  27. Query objects are produced by PyDruid clients and can be used for
  28. exporting query results into TSV files or
  29. pandas.DataFrame objects for subsequent analysis. They also hold
  30. information about the issued query.
  31. Query acts as a wrapper over raw result list of dictionaries.
  32. :ivar str result_json: JSON object representing a query result. Initial value: None
  33. :ivar list result: Query result parsed into a list of dicts. Initial value: None
  34. :ivar str query_type: Name of most recently run query, e.g., topN. Initial value: None
  35. :ivar dict query_dict: JSON object representing the query. Initial value: None
  36. """
  37. def __init__(self, query_dict, query_type):
  38. super(Query, self).__init__()
  39. self.query_dict = query_dict
  40. self.query_type = query_type
  41. self.result = None
  42. self.result_json = None
  43. def parse(self, data):
  44. if data:
  45. self.result_json = data
  46. res = json.loads(self.result_json)
  47. self.result = res
  48. else:
  49. raise IOError('{Error parsing result: {0} for {1} query'.format(
  50. self.result_json, self.query_type))
  51. def export_tsv(self, dest_path):
  52. """
  53. Export the current query result to a tsv file.
  54. :param str dest_path: file to write query results to
  55. :raise NotImplementedError:
  56. Example
  57. .. code-block:: python
  58. :linenos:
  59. >>> top = client.topn(
  60. datasource='twitterstream',
  61. granularity='all',
  62. intervals='2013-10-04/pt1h',
  63. aggregations={"count": doublesum("count")},
  64. dimension='user_name',
  65. filter = Dimension('user_lang') == 'en',
  66. metric='count',
  67. threshold=2
  68. )
  69. >>> top.export_tsv('top.tsv')
  70. >>> !cat top.tsv
  71. >>> count user_name timestamp
  72. 7.0 user_1 2013-10-04T00:00:00.000Z
  73. 6.0 user_2 2013-10-04T00:00:00.000Z
  74. """
  75. if six.PY3:
  76. f = open(dest_path, 'w', newline='', encoding='utf-8')
  77. else:
  78. f = open(dest_path, 'wb')
  79. w = UnicodeWriter(f)
  80. if self.query_type == "timeseries":
  81. header = list(self.result[0]['result'].keys())
  82. header.append('timestamp')
  83. elif self.query_type == 'topN':
  84. header = list(self.result[0]['result'][0].keys())
  85. header.append('timestamp')
  86. elif self.query_type == "groupBy":
  87. header = list(self.result[0]['event'].keys())
  88. header.append('timestamp')
  89. header.append('version')
  90. else:
  91. raise NotImplementedError(
  92. 'TSV export not implemented for query type: {0}'.format(self.query_type))
  93. w.writerow(header)
  94. if self.result:
  95. if self.query_type == "topN" or self.query_type == "timeseries":
  96. for item in self.result:
  97. timestamp = item['timestamp']
  98. result = item['result']
  99. if type(result) is list: # topN
  100. for line in result:
  101. w.writerow(list(line.values()) + [timestamp])
  102. else: # timeseries
  103. w.writerow(list(result.values()) + [timestamp])
  104. elif self.query_type == "groupBy":
  105. for item in self.result:
  106. timestamp = item['timestamp']
  107. version = item['version']
  108. w.writerow(
  109. list(item['event'].values()) + [timestamp] + [version])
  110. f.close()
  111. def export_pandas(self):
  112. """
  113. Export the current query result to a Pandas DataFrame object.
  114. :return: The DataFrame representing the query result
  115. :rtype: DataFrame
  116. :raise NotImplementedError:
  117. Example
  118. .. code-block:: python
  119. :linenos:
  120. >>> top = client.topn(
  121. datasource='twitterstream',
  122. granularity='all',
  123. intervals='2013-10-04/pt1h',
  124. aggregations={"count": doublesum("count")},
  125. dimension='user_name',
  126. filter = Dimension('user_lang') == 'en',
  127. metric='count',
  128. threshold=2
  129. )
  130. >>> df = top.export_pandas()
  131. >>> print df
  132. >>> count timestamp user_name
  133. 0 7 2013-10-04T00:00:00.000Z user_1
  134. 1 6 2013-10-04T00:00:00.000Z user_2
  135. """
  136. import pandas
  137. if self.result:
  138. if self.query_type == "timeseries":
  139. nres = [list(v['result'].items()) + [('timestamp', v['timestamp'])]
  140. for v in self.result]
  141. nres = [dict(v) for v in nres]
  142. elif self.query_type == "topN":
  143. nres = []
  144. for item in self.result:
  145. timestamp = item['timestamp']
  146. results = item['result']
  147. tres = [dict(list(res.items()) + [('timestamp', timestamp)])
  148. for res in results]
  149. nres += tres
  150. elif self.query_type == "groupBy":
  151. nres = [list(v['event'].items()) + [('timestamp', v['timestamp'])]
  152. for v in self.result]
  153. nres = [dict(v) for v in nres]
  154. elif self.query_type == "select":
  155. nres = []
  156. for item in self.result:
  157. nres += [e.get('event') for e in item['result'].get('events')]
  158. elif self.query_type == "scan":
  159. nres = []
  160. for item in self.result:
  161. nres += [e for e in item.get('events')]
  162. else:
  163. raise NotImplementedError(
  164. 'Pandas export not implemented for query '
  165. 'type: {0}'.format(self.query_type))
  166. df = pandas.DataFrame(nres)
  167. return df
  168. def __str__(self):
  169. return str(self.result)
  170. def __len__(self):
  171. return len(self.result)
  172. def __delitem__(self, index):
  173. del self.result[index]
  174. def insert(self, index, value):
  175. self.result.insert(index, value)
  176. def __setitem__(self, index, value):
  177. self.result[index] = value
  178. def __getitem__(self, index):
  179. return self.result[index]
  180. class QueryBuilder(object):
  181. def __init__(self):
  182. self.last_query = None
  183. @staticmethod
  184. def parse_datasource(datasource, query_type):
  185. """
  186. Parse an input datasource object into valid dictionary
  187. Input can be a string, in which case it is simply returned, or a
  188. list, when it is turned into a UNION datasource.
  189. :param datasource: datasource parameter
  190. :param string query_type: query type
  191. :raise ValueError: if input is not string or list of strings
  192. """
  193. if not (
  194. isinstance(datasource, six.string_types) or
  195. (
  196. isinstance(datasource, list) and
  197. all([isinstance(x, six.string_types) for x in datasource])
  198. )
  199. ):
  200. raise ValueError(
  201. 'Datasource definition not valid. Must be string or list of strings')
  202. if isinstance(datasource, six.string_types):
  203. return datasource
  204. else:
  205. return {'type': 'union', 'dataSources': datasource}
  206. @staticmethod
  207. def validate_query(query_type, valid_parts, args):
  208. """
  209. Validate the query parts so only allowed objects are sent.
  210. Each query type can have an optional 'context' object attached which
  211. is used to set certain query context settings, etc. timeout or
  212. priority. As each query can have this object, there's
  213. no need for it to be sent - it might as well be added here.
  214. :param string query_type: a type of query
  215. :param list valid_parts: a list of valid object names
  216. :param dict args: the dict of args to be sent
  217. :raise ValueError: if an invalid object is given
  218. """
  219. valid_parts = valid_parts[:] + ['context']
  220. for key, val in six.iteritems(args):
  221. if key not in valid_parts:
  222. raise ValueError(
  223. 'Query component: {0} is not valid for query type: {1}.'
  224. .format(key, query_type) +
  225. 'The list of valid components is: \n {0}'
  226. .format(valid_parts))
  227. def build_query(self, query_type, args):
  228. """
  229. Build query based on given query type and arguments.
  230. :param string query_type: a type of query
  231. :param dict args: the dict of args to be sent
  232. :return: the resulting query
  233. :rtype: Query
  234. """
  235. query_dict = {'queryType': query_type}
  236. for key, val in six.iteritems(args):
  237. if key == 'aggregations':
  238. query_dict[key] = build_aggregators(val)
  239. elif key == 'post_aggregations':
  240. query_dict['postAggregations'] = \
  241. Postaggregator.build_post_aggregators(val)
  242. elif key == 'context':
  243. query_dict['context'] = val
  244. elif key == 'datasource':
  245. query_dict['dataSource'] = self.parse_datasource(val, query_type)
  246. elif key == 'paging_spec':
  247. query_dict['pagingSpec'] = val
  248. elif key == 'limit_spec':
  249. query_dict['limitSpec'] = val
  250. elif key == "filter" and val is not None:
  251. query_dict[key] = Filter.build_filter(val)
  252. elif key == "having" and val is not None:
  253. query_dict[key] = Having.build_having(val)
  254. elif key == 'dimension' and val is not None:
  255. query_dict[key] = build_dimension(val)
  256. elif key == 'dimensions':
  257. query_dict[key] = [build_dimension(v) for v in val]
  258. else:
  259. query_dict[key] = val
  260. self.last_query = Query(query_dict, query_type)
  261. return self.last_query
  262. def topn(self, args):
  263. """
  264. A TopN query returns a set of the values in a given dimension,
  265. sorted by a specified metric. Conceptually, a
  266. topN can be thought of as an approximate GroupByQuery over a
  267. single dimension with an Ordering spec. TopNs are
  268. faster and more resource efficient than GroupBy for this use case.
  269. :param dict args: dict of arguments
  270. :return: topn query
  271. :rtype: Query
  272. """
  273. query_type = 'topN'
  274. valid_parts = [
  275. 'datasource', 'granularity', 'filter', 'aggregations',
  276. 'post_aggregations', 'intervals', 'dimension', 'threshold',
  277. 'metric'
  278. ]
  279. self.validate_query(query_type, valid_parts, args)
  280. return self.build_query(query_type, args)
  281. def timeseries(self, args):
  282. """
  283. A timeseries query returns the values of the requested metrics
  284. (in aggregate) for each timestamp.
  285. :param dict args: dict of args
  286. :return: timeseries query
  287. :rtype: Query
  288. """
  289. query_type = 'timeseries'
  290. valid_parts = [
  291. 'datasource', 'granularity', 'filter', 'aggregations', 'descending',
  292. 'post_aggregations', 'intervals'
  293. ]
  294. self.validate_query(query_type, valid_parts, args)
  295. return self.build_query(query_type, args)
  296. def groupby(self, args):
  297. """
  298. A group-by query groups a results set (the requested aggregate
  299. metrics) by the specified dimension(s).
  300. :param dict args: dict of args
  301. :return: group by query
  302. :rtype: Query
  303. """
  304. query_type = 'groupBy'
  305. valid_parts = [
  306. 'datasource', 'granularity', 'filter', 'aggregations',
  307. 'having', 'post_aggregations', 'intervals', 'dimensions',
  308. 'limit_spec',
  309. ]
  310. self.validate_query(query_type, valid_parts, args)
  311. return self.build_query(query_type, args)
  312. def segment_metadata(self, args):
  313. """
  314. * Column type
  315. * Estimated size in bytes
  316. * Estimated size in bytes of each column
  317. * Interval the segment covers
  318. * Segment ID
  319. :param dict args: dict of args
  320. :return: segment metadata query
  321. :rtype: Query
  322. """
  323. query_type = 'segmentMetadata'
  324. valid_parts = ['datasource', 'intervals', 'analysisTypes', 'merge']
  325. self.validate_query(query_type, valid_parts, args)
  326. return self.build_query(query_type, args)
  327. def time_boundary(self, args):
  328. """
  329. A time boundary query returns the min and max timestamps present in a data source.
  330. :param dict args: dict of args
  331. :return: time boundary query
  332. :rtype: Query
  333. """
  334. query_type = 'timeBoundary'
  335. valid_parts = ['datasource']
  336. self.validate_query(query_type, valid_parts, args)
  337. return self.build_query(query_type, args)
  338. def select(self, args):
  339. """
  340. A select query returns raw Druid rows and supports pagination.
  341. :param dict args: dict of args
  342. :return: select query
  343. :rtype: Query
  344. """
  345. query_type = 'select'
  346. valid_parts = [
  347. 'datasource', 'granularity', 'filter', 'dimensions', 'metrics',
  348. 'paging_spec', 'intervals'
  349. ]
  350. self.validate_query(query_type, valid_parts, args)
  351. return self.build_query(query_type, args)
  352. def search(self, args):
  353. """
  354. A search query returns dimension values that match the search specification.
  355. :param dict args: dict of args
  356. :return: search query
  357. :rtype: Query
  358. """
  359. query_type = 'search'
  360. valid_parts = [
  361. 'datasource', 'granularity', 'filter', 'searchDimensions', 'query',
  362. 'limit', 'intervals', 'sort'
  363. ]
  364. self.validate_query(query_type, valid_parts, args)
  365. return self.build_query(query_type, args)
  366. def scan(self, args):
  367. """
  368. A scan query returns raw Druid rows
  369. :param dict args: dict of args
  370. :return: select query
  371. :rtype: Query
  372. """
  373. query_type = 'scan'
  374. valid_parts = [
  375. 'datasource', 'granularity', 'filter', 'dimensions', 'metrics',
  376. 'intervals', 'limit',
  377. ]
  378. self.validate_query(query_type, valid_parts, args)
  379. return self.build_query(query_type, args)