models.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960
  1. #!/usr/bin/env python
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. from __future__ import division
  18. import collections
  19. import datetime
  20. import dateutil
  21. import itertools
  22. import json
  23. import logging
  24. import numbers
  25. import re
  26. from django.urls import reverse
  27. from django.utils.html import escape
  28. from django.utils.translation import ugettext as _
  29. from desktop.lib.i18n import smart_unicode, smart_str, force_unicode
  30. from desktop.models import get_data_link, Document2
  31. from notebook.connectors.base import Notebook, _get_snippet_name
  32. from dashboard.dashboard_api import get_engine
  33. LOG = logging.getLogger(__name__)
  34. NESTED_FACET_FORM = {
  35. 'field': '',
  36. 'mincount': 0,
  37. 'limit': 5,
  38. 'sort': 'desc',
  39. 'canRange': False,
  40. 'isDate': False,
  41. 'aggregate': {'function': 'unique', 'formula': '', 'plain_formula': '', 'percentile': 50}
  42. }
  43. COMPARE_FACET = {'is_enabled': False, 'gap': '7DAYS', 'use_percentage': False, 'show_both': False, 'cohort_number': 1}
  44. QUERY_FACET = {'is_enabled': False, 'query': ''}
  45. class Collection2(object):
  46. def __init__(self, user, name='Default', data=None, document=None, engine='solr', source='data'):
  47. self.document = document
  48. if document is not None:
  49. self.data = json.loads(document.data)
  50. elif data is not None:
  51. self.data = json.loads(data)
  52. else:
  53. self.data = {
  54. 'collection': self.get_default(user, name, engine, source=source),
  55. 'layout': []
  56. }
  57. def get_json(self, user):
  58. return json.dumps(self.get_props(user))
  59. def get_props(self, user):
  60. props = self.data
  61. if self.document is not None:
  62. props['collection']['id'] = self.document.id
  63. props['collection']['label'] = self.document.name
  64. props['collection']['description'] = self.document.description
  65. # For backward compatibility
  66. if 'gridItems' not in props:
  67. props['gridItems'] = []
  68. if 'rows' not in props['collection']['template']:
  69. props['collection']['template']['rows'] = 25
  70. if 'showGrid' not in props['collection']['template']:
  71. props['collection']['template']['showGrid'] = True
  72. if 'showChart' not in props['collection']['template']:
  73. props['collection']['template']['showChart'] = False
  74. if 'chartSettings' not in props['collection']['template']:
  75. props['collection']['template']['chartSettings'] = {
  76. 'chartType': 'bars',
  77. 'chartSorting': 'none',
  78. 'chartScatterGroup': None,
  79. 'chartScatterSize': None,
  80. 'chartScope': 'world',
  81. 'chartX': None,
  82. 'chartYSingle': None,
  83. 'chartYMulti': [],
  84. 'chartData': [],
  85. 'chartMapLabel': None,
  86. }
  87. if 'enabled' not in props['collection']:
  88. props['collection']['enabled'] = True
  89. if 'engine' not in props['collection']:
  90. props['collection']['engine'] = 'solr'
  91. if 'source' not in props['collection']:
  92. props['collection']['source'] = 'data'
  93. if 'leafletmap' not in props['collection']['template']:
  94. props['collection']['template']['leafletmap'] = {'latitudeField': None, 'longitudeField': None, 'labelField': None}
  95. if 'moreLikeThis' not in props['collection']['template']:
  96. props['collection']['template']['moreLikeThis'] = False
  97. if 'timeFilter' not in props['collection']:
  98. props['collection']['timeFilter'] = {
  99. 'field': '',
  100. 'type': 'rolling',
  101. 'value': 'all',
  102. 'from': '',
  103. 'to': '',
  104. 'truncate': True
  105. }
  106. if 'suggest' not in props['collection']:
  107. props['collection']['suggest'] = {'enabled': False, 'dictionary': ''}
  108. for field in props['collection']['template']['fieldsAttributes']:
  109. if 'type' not in field:
  110. field['type'] = 'string'
  111. if 'nested' not in props['collection']:
  112. props['collection']['nested'] = {
  113. 'enabled': False,
  114. 'schema': []
  115. }
  116. for facet in props['collection']['facets']:
  117. properties = facet['properties']
  118. if 'gap' in properties and not 'initial_gap' in properties:
  119. properties['initial_gap'] = properties['gap']
  120. if 'start' in properties and not 'initial_start' in properties:
  121. properties['initial_start'] = properties['start']
  122. if 'end' in properties and not 'initial_end' in properties:
  123. properties['initial_end'] = properties['end']
  124. if 'domain' not in properties:
  125. properties['domain'] = {'blockParent': [], 'blockChildren': []}
  126. if 'missing' not in properties:
  127. properties['missing'] = False
  128. if 'slot' not in properties:
  129. properties['slot'] = 0
  130. if properties.get('facets'):
  131. for facet_facet in properties['facets']:
  132. if 'fieldLabel' not in facet_facet:
  133. facet_facet['fieldLabel'] = facet_facet['field']
  134. if 'multiselect' not in facet_facet:
  135. facet_facet['multiselect'] = True
  136. if facet['widgetType'] == 'histogram-widget':
  137. if 'timelineChartType' not in properties:
  138. properties['timelineChartType'] = 'bar'
  139. if 'enableSelection' not in properties:
  140. properties['enableSelection'] = True
  141. if 'extraSeries' not in properties:
  142. properties['extraSeries'] = []
  143. if facet['widgetType'] == 'map-widget' and facet['type'] == 'field':
  144. facet['type'] = 'pivot'
  145. properties['facets'] = []
  146. properties['facets_form'] = {'field': '', 'mincount': 0, 'limit': 5}
  147. if 'compare' not in properties:
  148. properties['compare'] = COMPARE_FACET
  149. if 'filter' not in properties:
  150. properties['filter'] = QUERY_FACET
  151. if 'qdefinitions' not in props['collection']:
  152. props['collection']['qdefinitions'] = []
  153. return props
  154. def get_default(self, user, name, engine='solr', source='data'):
  155. fields = self.fields_data(user, name, engine, source=source)
  156. id_field = [field['name'] for field in fields if field.get('isId')]
  157. if id_field:
  158. id_field = id_field[0]
  159. else:
  160. id_field = '' # Schemaless might not have an id
  161. if source == 'query':
  162. nb_doc = Document2.objects.document(user=user, doc_id=name)
  163. notebook = Notebook(document=nb_doc).get_data()
  164. label = _get_snippet_name(notebook, unique=True)
  165. else:
  166. label = name
  167. TEMPLATE = {
  168. "extracode": escape("<style type=\"text/css\">\nem {\n font-weight: bold;\n background-color: yellow;\n}</style>\n\n<script>\n</script>"),
  169. "highlighting": [""],
  170. "properties": {"highlighting_enabled": True},
  171. "template": """
  172. <div class="row-fluid">
  173. <div class="row-fluid">
  174. <div class="span12">%s</div>
  175. </div>
  176. <br/>
  177. </div>""" % ' '.join(['{{%s}}' % field['name'] for field in fields]),
  178. "isGridLayout": True,
  179. "showFieldList": True,
  180. "showGrid": True,
  181. "showChart": False,
  182. "chartSettings" : {
  183. 'chartType': 'bars',
  184. 'chartSorting': 'none',
  185. 'chartScatterGroup': None,
  186. 'chartScatterSize': None,
  187. 'chartScope': 'world',
  188. 'chartX': None,
  189. 'chartYSingle': None,
  190. 'chartYMulti': [],
  191. 'chartData': [],
  192. 'chartMapLabel': None,
  193. },
  194. "fieldsAttributes": [self._make_gridlayout_header_field(field) for field in fields],
  195. "fieldsSelected": [],
  196. "leafletmap": {'latitudeField': None, 'longitudeField': None, 'labelField': None},
  197. "rows": 25,
  198. }
  199. FACETS = []
  200. return {
  201. 'id': None,
  202. 'name': name,
  203. 'engine': engine,
  204. 'source': source,
  205. 'label': label,
  206. 'enabled': False,
  207. 'template': TEMPLATE,
  208. 'facets': FACETS,
  209. 'fields': fields,
  210. 'idField': id_field,
  211. }
  212. @classmethod
  213. def _make_field(cls, field, attributes):
  214. return {
  215. 'name': str(escape(field)),
  216. 'type': str(attributes.get('type', '')),
  217. 'isId': attributes.get('required') and attributes.get('uniqueKey'),
  218. 'isDynamic': 'dynamicBase' in attributes
  219. }
  220. @classmethod
  221. def _make_gridlayout_header_field(cls, field, isDynamic=False):
  222. return {'name': field['name'], 'type': field['type'], 'sort': {'direction': None}, 'isDynamic': isDynamic}
  223. @classmethod
  224. def _make_luke_from_schema_fields(cls, schema_fields):
  225. return dict([
  226. (f['name'], {
  227. 'copySources': [],
  228. 'type': f['type'],
  229. 'required': True,
  230. 'uniqueKey': f.get('uniqueKey'),
  231. 'flags': u'%s-%s-----OF-----l' % ('I' if f['indexed'] else '-', 'S' if f['stored'] else '-'), u'copyDests': []
  232. })
  233. for f in schema_fields['fields']
  234. ])
  235. def get_absolute_url(self):
  236. return reverse('search:index') + '?collection=%s' % self.id
  237. def fields(self, user):
  238. return sorted([str(field.get('name', '')) for field in self.fields_data(user)])
  239. def fields_data(self, user, name, engine='solr', source='data'):
  240. api = get_engine(user, engine, source=source)
  241. try:
  242. schema_fields = api.fields(name)
  243. schema_fields = schema_fields['schema']['fields']
  244. except Exception, e:
  245. LOG.warn('/luke call did not succeed: %s' % e)
  246. try:
  247. fields = api.schema_fields(name)
  248. schema_fields = Collection2._make_luke_from_schema_fields(fields)
  249. except Exception, e:
  250. LOG.error('Could not access collection: %s' % e)
  251. return []
  252. return sorted([self._make_field(field, attributes) for field, attributes in schema_fields.iteritems()])
  253. def update_data(self, post_data):
  254. data_dict = self.data
  255. data_dict.update(post_data)
  256. self.data = data_dict
  257. @property
  258. def autocomplete(self):
  259. return self.data['autocomplete']
  260. @autocomplete.setter
  261. def autocomplete(self, autocomplete):
  262. properties_ = self.data
  263. properties_['autocomplete'] = autocomplete
  264. self.data = json.dumps(properties_)
  265. @classmethod
  266. def get_field_list(cls, collection):
  267. if collection['template']['fieldsSelected'] and collection['template']['isGridLayout']:
  268. fields = set(collection['template']['fieldsSelected'] + ([collection['idField']] if collection['idField'] else []))
  269. # Add field if needed
  270. if collection['template']['leafletmap'].get('latitudeField'):
  271. fields.add(collection['template']['leafletmap']['latitudeField'])
  272. if collection['template']['leafletmap'].get('longitudeField'):
  273. fields.add(collection['template']['leafletmap']['longitudeField'])
  274. if collection['template']['leafletmap'].get('labelField'):
  275. fields.add(collection['template']['leafletmap']['labelField'])
  276. return list(fields)
  277. else:
  278. return ['*']
  279. def get_facet_field(category, field, facets):
  280. if category in ('nested', 'function'):
  281. id_pattern = '%(id)s'
  282. else:
  283. id_pattern = '%(field)s-%(id)s'
  284. facets = filter(lambda facet: facet['type'] == category and id_pattern % facet == field, facets)
  285. if facets:
  286. return facets[0]
  287. else:
  288. return None
  289. def pairwise2(field, fq_filter, iterable):
  290. pairs = []
  291. selected_values = [f['value'] for f in fq_filter]
  292. a, b = itertools.tee(iterable)
  293. for element in a:
  294. pairs.append({
  295. 'cat': field,
  296. 'value': element,
  297. 'count': next(a),
  298. 'selected': element in selected_values,
  299. 'exclude': all([f['exclude'] for f in fq_filter if f['value'] == element])
  300. })
  301. return pairs
  302. def range_pair(field, cat, fq_filter, iterable, end, collection_facet):
  303. # e.g. counts":["0",17430,"1000",1949,"2000",671,"3000",404,"4000",243,"5000",165],"gap":1000,"start":0,"end":6000}
  304. pairs = []
  305. selected_values = [f['value'] for f in fq_filter]
  306. is_single_unit_gap = re.match('^[\+\-]?1[A-Za-z]*$', str(collection_facet['properties']['gap'])) is not None
  307. is_up = collection_facet['properties']['sort'] == 'asc'
  308. if collection_facet['properties']['sort'] == 'asc' and (collection_facet['type'] == 'range-up' or collection_facet['properties'].get('type') == 'range-up'):
  309. prev = None
  310. n = []
  311. for e in iterable:
  312. if prev is not None:
  313. n.append(e)
  314. n.append(prev)
  315. prev = None
  316. else:
  317. prev = e
  318. iterable = n
  319. iterable.reverse()
  320. a, to = itertools.tee(iterable)
  321. next(to, None)
  322. counts = iterable[1::2]
  323. total_counts = counts.pop(0) if collection_facet['properties']['sort'] == 'asc' else 0
  324. isDate = collection_facet['properties']['isDate']
  325. for element in a:
  326. next(to, None)
  327. to_value = next(to, end)
  328. count = next(a)
  329. if collection_facet['properties']['sort'] == 'asc':
  330. from_value = to_value
  331. to_value = element
  332. else:
  333. from_value = element
  334. pairs.append({
  335. 'field': field, 'from': from_value if isDate else int(element), 'value': count, 'to': to_value if isDate else int(to_value), 'selected': element in selected_values,
  336. 'exclude': all([f['exclude'] for f in fq_filter if f['value'] == element]),
  337. 'is_single_unit_gap': is_single_unit_gap,
  338. 'total_counts': total_counts,
  339. 'is_up': is_up
  340. })
  341. total_counts += counts.pop(0) if counts else 0
  342. if collection_facet['properties']['sort'] == 'asc' and collection_facet['type'] != 'range-up' and collection_facet['properties'].get('type') != 'range-up':
  343. pairs.reverse()
  344. return pairs
  345. def range_pair2(facet_field, cat, fq_filter, iterable, end, facet, collection_facet=None):
  346. # e.g. counts":["0",17430,"1000",1949,"2000",671,"3000",404,"4000",243,"5000",165],"gap":1000,"start":0,"end":6000}
  347. pairs = []
  348. selected_values = [f['value'] for f in fq_filter]
  349. is_single_unit_gap = re.match('^[\+\-]?1[A-Za-z]*$', str(facet['gap'])) is not None
  350. is_up = facet['sort'] == 'asc'
  351. if facet['sort'] == 'asc' and facet['type'] == 'range-up':
  352. prev = None
  353. n = []
  354. for e in iterable:
  355. if prev is not None:
  356. n.append(e)
  357. n.append(prev)
  358. prev = None
  359. else:
  360. prev = e
  361. iterable = n
  362. iterable.reverse()
  363. a, to = itertools.tee(iterable)
  364. next(to, None)
  365. counts = iterable[1::2]
  366. total_counts = counts.pop(0) if facet['sort'] == 'asc' else 0
  367. sum_all = collection_facet and collection_facet['widgetType'] in ('timeline-widget', 'bucket-widget') and facet['type'] == 'range-up'
  368. for element in a:
  369. next(to, None)
  370. to_value = next(to, end)
  371. count = next(a)
  372. if sum_all:
  373. count = total_counts
  374. pairs.append({
  375. 'field': facet_field, 'from': element, 'value': count, 'to': to_value, 'selected': element in selected_values,
  376. 'exclude': all([f['exclude'] for f in fq_filter if f['value'] == element]),
  377. 'is_single_unit_gap': is_single_unit_gap,
  378. 'total_counts': total_counts,
  379. 'is_up': is_up
  380. })
  381. total_counts += counts.pop(0) if counts else 0
  382. if facet['sort'] == 'asc' and facet['type'] != 'range-up':
  383. pairs.reverse()
  384. return pairs
  385. def augment_solr_response(response, collection, query):
  386. augmented = response
  387. augmented['normalized_facets'] = []
  388. NAME = '%(field)s-%(id)s'
  389. normalized_facets = []
  390. selected_values = dict([(fq['id'], fq['filter']) for fq in query['fqs']])
  391. if response and response.get('facet_counts'):
  392. for facet in collection['facets']:
  393. category = facet['type']
  394. if category == 'field' and response['facet_counts']['facet_fields']:
  395. name = NAME % facet
  396. collection_facet = get_facet_field(category, name, collection['facets'])
  397. counts = pairwise2(facet['field'], selected_values.get(facet['id'], []), response['facet_counts']['facet_fields'][name])
  398. if collection_facet['properties']['sort'] == 'asc':
  399. counts.reverse()
  400. facet = {
  401. 'id': collection_facet['id'],
  402. 'field': facet['field'],
  403. 'type': category,
  404. 'label': collection_facet['label'],
  405. 'counts': counts,
  406. }
  407. normalized_facets.append(facet)
  408. elif (category == 'range' or category == 'range-up') and response['facet_counts']['facet_ranges']:
  409. name = NAME % facet
  410. collection_facet = get_facet_field(category, name, collection['facets'])
  411. counts = response['facet_counts']['facet_ranges'][name]['counts']
  412. end = response['facet_counts']['facet_ranges'][name]['end']
  413. counts = range_pair(facet['field'], name, selected_values.get(facet['id'], []), counts, end, collection_facet)
  414. facet = {
  415. 'id': collection_facet['id'],
  416. 'field': facet['field'],
  417. 'type': category,
  418. 'label': collection_facet['label'],
  419. 'counts': counts,
  420. 'extraSeries': []
  421. }
  422. normalized_facets.append(facet)
  423. elif category == 'query' and response['facet_counts']['facet_queries']:
  424. for name, value in response['facet_counts']['facet_queries'].iteritems():
  425. collection_facet = get_facet_field(category, name, collection['facets'])
  426. facet = {
  427. 'id': collection_facet['id'],
  428. 'query': name,
  429. 'type': category,
  430. 'label': name,
  431. 'counts': value,
  432. }
  433. normalized_facets.append(facet)
  434. elif category == 'pivot':
  435. name = NAME % facet
  436. if 'facet_pivot' in response['facet_counts'] and name in response['facet_counts']['facet_pivot']:
  437. if facet['properties']['scope'] == 'stack':
  438. count = _augment_pivot_2d(name, facet['id'], response['facet_counts']['facet_pivot'][name], selected_values)
  439. else:
  440. count = response['facet_counts']['facet_pivot'][name]
  441. _augment_pivot_nd(facet['id'], count, selected_values)
  442. else:
  443. count = []
  444. facet = {
  445. 'id': facet['id'],
  446. 'field': name,
  447. 'type': category,
  448. 'label': name,
  449. 'counts': count,
  450. }
  451. normalized_facets.append(facet)
  452. if response and response.get('facets'):
  453. for facet in collection['facets']:
  454. category = facet['type']
  455. name = facet['id'] # Nested facets can only have one name
  456. if category == 'function' and name in response['facets']:
  457. collection_facet = get_facet_field(category, name, collection['facets'])
  458. value = response['facets'][name]
  459. counts = {'value': value, 'percentage': 0}
  460. if collection_facet['properties']['filter']['is_enabled']:
  461. if collection_facet['properties']['compare']['is_enabled']:
  462. value = value[name]
  463. else:
  464. counts['value'] = value['count']
  465. if collection_facet['properties']['compare']['is_enabled']:
  466. orignal_number, final_number = value['buckets'][0].get(name, 0), value['buckets'][1].get(name, 0)
  467. if collection_facet['properties']['compare']['use_percentage']:
  468. if orignal_number != 0:
  469. counts['percentage'] = (final_number - orignal_number) / orignal_number * 100.0
  470. counts['value'] = final_number - orignal_number
  471. facet = {
  472. 'id': collection_facet['id'],
  473. 'query': name,
  474. 'type': category,
  475. 'label': name,
  476. 'counts': counts,
  477. }
  478. normalized_facets.append(facet)
  479. elif category == 'nested' and name in response['facets']:
  480. value = response['facets'][name]
  481. collection_facet = get_facet_field(category, name, collection['facets'])
  482. extraSeries = []
  483. counts = response['facets'][name]['buckets']
  484. # Give humane names to the columns
  485. cols = ['%(field)s' % facet, 'count(%(field)s)' % facet]
  486. last_seen_dim_col_index = 0
  487. prev_last_seen_dim_col_index = 0
  488. for i, f in enumerate(facet['properties']['facets'][1:]):
  489. if f['aggregate']['function'] == 'count':
  490. cols.append(f['field'])
  491. prev_last_seen_dim_col_index = last_seen_dim_col_index
  492. last_seen_dim_col_index = i + 2
  493. from libsolr.api import SolrApi
  494. aggregate_name = SolrApi._get_aggregate_function(f)
  495. cols.append(aggregate_name + ('_%(field)s' % facet['properties']['facets'][last_seen_dim_col_index - 1] if aggregate_name in cols else ''))
  496. rows = []
  497. facet_one = collection_facet['properties']['facets'][0]
  498. if 'missing' in value:
  499. counts.append({'val': '', 'count': value['missing']['count']})
  500. # Number or Date range
  501. if facet_one['canRange'] and not facet_one['type'] == 'field':
  502. dimension = 3 if facet_one['isDate'] else 1
  503. # Single dimension or dimension 2 with analytics
  504. if len(collection_facet['properties']['facets']) == 1 or len(collection_facet['properties']['facets']) == 2 and collection_facet['properties']['facets'][1]['aggregate']['function'] != 'count':
  505. column = 'count'
  506. if len(collection_facet['properties']['facets']) == 2:
  507. agg_keys = [key for key, value in counts[0].items() if key.lower().startswith('agg_')]
  508. legend = agg_keys[0].split(':', 2)[1]
  509. column = agg_keys[0]
  510. else:
  511. legend = facet['field'] # 'count(%s)' % legend
  512. agg_keys = [column]
  513. _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
  514. counts = [_v for _f in counts for _v in (_f['val'], _f[column])]
  515. counts = range_pair2(
  516. facet['field'],
  517. name,
  518. selected_values.get(facet['id'], []),
  519. counts,
  520. datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 0, dateutil.tz.tzoffset('Z', 0)) if facet['properties'].get('isDate') else 1,
  521. collection_facet['properties']['facets'][0],
  522. collection_facet=collection_facet)
  523. else:
  524. # Dimension 1 with counts and 2 with analytics
  525. agg_keys = [key for key, value in counts[0].items() if key.lower().startswith('agg_') or key.lower().startswith('dim_')] if counts else []
  526. agg_keys.sort(key=lambda a: a[4:])
  527. if len(agg_keys) == 1 and agg_keys[0].lower().startswith('dim_'):
  528. agg_keys.insert(0, 'count')
  529. counts = _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
  530. _series = collections.defaultdict(list)
  531. for row in rows:
  532. for i, cell in enumerate(row):
  533. if i > last_seen_dim_col_index:
  534. legend = cols[i]
  535. if prev_last_seen_dim_col_index != last_seen_dim_col_index:
  536. legend = '%s %s' % (cols[i], row[last_seen_dim_col_index])
  537. _series[legend].append(row[prev_last_seen_dim_col_index])
  538. _series[legend].append(cell)
  539. for _name, val in _series.iteritems():
  540. _c = range_pair2(
  541. facet['field'],
  542. _name,
  543. selected_values.get(facet['id'], []),
  544. val,
  545. datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 0, dateutil.tz.tzoffset('Z', 0)) if facet['properties'].get('isDate') else 1,
  546. collection_facet['properties']['facets'][0])
  547. extraSeries.append({'counts': _c, 'label': _name})
  548. counts = []
  549. elif collection_facet['properties'].get('isOldPivot'):
  550. facet_fields = [collection_facet['field']] + [f['field'] for f in collection_facet['properties'].get('facets', []) if f['aggregate']['function'] == 'count']
  551. column = 'count'
  552. agg_keys = [key for key, value in counts[0].items() if key.lower().startswith('agg_') or key.lower().startswith('dim_')]
  553. agg_keys.sort(key=lambda a: a[4:])
  554. if len(agg_keys) == 1 and agg_keys[0].lower().startswith('dim_'):
  555. agg_keys.insert(0, 'count')
  556. counts = _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
  557. #_convert_nested_to_augmented_pivot_nd(facet_fields, facet['id'], count, selected_values, dimension=2)
  558. dimension = len(facet_fields)
  559. elif len(collection_facet['properties']['facets']) == 1 or (len(collection_facet['properties']['facets']) == 2 and collection_facet['properties']['facets'][1]['aggregate']['function'] != 'count'):
  560. # Dimension 1 with 1 count or agg
  561. dimension = 1
  562. column = 'count'
  563. agg_keys = counts and [key for key, value in counts[0].items() if key.lower().startswith('agg_')]
  564. if len(collection_facet['properties']['facets']) == 2 and agg_keys:
  565. column = agg_keys[0]
  566. else:
  567. agg_keys = [column]
  568. legend = collection_facet['properties']['facets'][0]['field']
  569. _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
  570. counts = [_v for _f in counts for _v in (_f['val'], _f[column])]
  571. counts = pairwise2(legend, selected_values.get(facet['id'], []), counts)
  572. else:
  573. # Dimension 2 with analytics or 1 with N aggregates
  574. dimension = 2
  575. agg_keys = counts and [key for key, value in counts[0].items() if key.lower().startswith('agg_') or key.lower().startswith('dim_')]
  576. agg_keys.sort(key=lambda a: a[4:])
  577. if len(agg_keys) == 1 and agg_keys[0].lower().startswith('dim_'):
  578. agg_keys.insert(0, 'count')
  579. counts = _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows)
  580. actual_dimension = sum([_f['aggregate']['function'] == 'count' for _f in collection_facet['properties']['facets']])
  581. counts = filter(lambda a: len(a['fq_fields']) == actual_dimension, counts)
  582. num_bucket = response['facets'][name]['numBuckets'] if 'numBuckets' in response['facets'][name] else len(response['facets'][name])
  583. facet = {
  584. 'id': collection_facet['id'],
  585. 'field': facet['field'],
  586. 'type': category,
  587. 'label': collection_facet['label'],
  588. 'counts': counts,
  589. 'extraSeries': extraSeries,
  590. 'dimension': dimension,
  591. 'response': {'response': {'start': 0, 'numFound': num_bucket}}, # Todo * nested buckets + offsets
  592. 'docs': [dict(zip(cols, row)) for row in rows],
  593. 'fieldsAttributes': [Collection2._make_gridlayout_header_field({'name': col, 'type': 'aggr' if '(' in col else 'string'}) for col in cols],
  594. 'multiselect': collection_facet['properties']['facets'][0].get('multiselect', True)
  595. }
  596. normalized_facets.append(facet)
  597. # Remove unnecessary facet data
  598. if response:
  599. response.pop('facet_counts')
  600. response.pop('facets')
  601. augment_response(collection, query, response)
  602. if normalized_facets:
  603. augmented['normalized_facets'].extend(normalized_facets)
  604. return augmented
  605. def augment_response(collection, query, response):
  606. # HTML escaping
  607. if not query.get('download'):
  608. id_field = collection.get('idField', '')
  609. for doc in response['response']['docs']:
  610. link = None
  611. if 'link-meta' in doc:
  612. meta = json.loads(doc['link-meta'])
  613. link = get_data_link(meta)
  614. elif 'link' in doc:
  615. meta = {'type': 'link', 'link': doc['link']}
  616. link = get_data_link(meta)
  617. for field, value in doc.iteritems():
  618. if isinstance(value, numbers.Number):
  619. escaped_value = value
  620. elif field == '_childDocuments_': # Nested documents
  621. escaped_value = value
  622. elif isinstance(value, list): # Multivalue field
  623. escaped_value = [smart_unicode(escape(val), errors='replace') for val in value]
  624. else:
  625. value = smart_unicode(value, errors='replace')
  626. escaped_value = escape(value)
  627. doc[field] = escaped_value
  628. doc['externalLink'] = link
  629. doc['details'] = []
  630. doc['hueId'] = smart_unicode(doc.get(id_field, ''))
  631. if 'moreLikeThis' in response and response['moreLikeThis'][doc['hueId']].get('numFound'):
  632. _doc = response['moreLikeThis'][doc['hueId']]
  633. doc['_childDocuments_'] = _doc['docs']
  634. doc['numFound'] = _doc['numFound']
  635. del response['moreLikeThis'][doc['hueId']]
  636. highlighted_fields = response.get('highlighting', {}).keys()
  637. if highlighted_fields and not query.get('download'):
  638. id_field = collection.get('idField')
  639. if id_field:
  640. for doc in response['response']['docs']:
  641. if id_field in doc and smart_unicode(doc[id_field]) in highlighted_fields:
  642. highlighting = response['highlighting'][smart_unicode(doc[id_field])]
  643. if highlighting:
  644. escaped_highlighting = {}
  645. for field, hls in highlighting.iteritems():
  646. _hls = [escape(smart_unicode(hl, errors='replace')).replace('&lt;em&gt;', '<em>').replace('&lt;/em&gt;', '</em>') for hl in hls]
  647. escaped_highlighting[field] = _hls[0] if len(_hls) == 1 else _hls
  648. doc.update(escaped_highlighting)
  649. else:
  650. response['warning'] = _("The Solr schema requires an id field for performing the result highlighting")
  651. def _augment_pivot_2d(name, facet_id, counts, selected_values):
  652. values = set()
  653. for dimension in counts:
  654. for pivot in dimension['pivot']:
  655. values.add(pivot['value'])
  656. values = sorted(list(values))
  657. augmented = []
  658. for dimension in counts:
  659. count = {}
  660. pivot_field = ''
  661. for pivot in dimension['pivot']:
  662. count[pivot['value']] = pivot['count']
  663. pivot_field = pivot['field']
  664. for val in values:
  665. fq_values = [dimension['value'], val]
  666. fq_fields = [dimension['field'], pivot_field]
  667. fq_filter = selected_values.get(facet_id, [])
  668. _selected_values = [f['value'] for f in fq_filter]
  669. augmented.append({
  670. "count": count.get(val, 0),
  671. "value": val,
  672. "cat": dimension['value'],
  673. 'selected': fq_values in _selected_values,
  674. 'exclude': all([f['exclude'] for f in fq_filter if f['value'] == val]),
  675. 'fq_fields': fq_fields,
  676. 'fq_values': fq_values,
  677. })
  678. return augmented
  679. def _augment_stats_2d(name, facet, counts, selected_values, agg_keys, rows):
  680. fq_fields = []
  681. fq_values = []
  682. fq_filter = []
  683. _selected_values = [f['value'] for f in selected_values.get(facet['id'], [])]
  684. label = facet['properties']['facets'][0]['field']
  685. _dim_fields = [_f['field'] for _f in facet['properties']['facets'] if _f['aggregate']['function'] == 'count']
  686. return __augment_stats_2d(counts, label, fq_fields, fq_values, fq_filter, _selected_values, _dim_fields, agg_keys, rows)
  687. # Clear one dimension
  688. def __augment_stats_2d(counts, label, fq_fields, fq_values, fq_filter, _selected_values, _fields, agg_keys, rows):
  689. augmented = []
  690. for bucket in counts: # For each dimension, go through each bucket and pick up the counts or aggregates, then go recursively in the next dimension
  691. val = bucket['val']
  692. count = bucket['count']
  693. dim_row = [val]
  694. _fq_fields = fq_fields + _fields[0:1] # Pick dim field if there is one
  695. _fq_values = fq_values + [val]
  696. for agg_key in agg_keys:
  697. if agg_key == 'count':
  698. dim_row.append(count)
  699. augmented.append(_get_augmented(count, val, label, _fq_values, _fq_fields, fq_filter, _selected_values))
  700. elif agg_key.startswith('agg_'):
  701. label = fq_values[0] if len(_fq_fields) >= 2 else agg_key.split(':', 2)[1]
  702. if agg_keys.index(agg_key) == 0: # One count by dimension
  703. dim_row.append(count)
  704. if not agg_key in bucket: # No key if value is 0
  705. bucket[agg_key] = 0
  706. dim_row.append(bucket[agg_key])
  707. augmented.append(_get_augmented(bucket[agg_key], val, label, _fq_values, _fq_fields, fq_filter, _selected_values))
  708. else:
  709. augmented.append(_get_augmented(count, val, label, _fq_values, _fq_fields, fq_filter, _selected_values)) # Needed?
  710. # List nested fields
  711. _agg_keys = []
  712. if agg_key in bucket and bucket[agg_key]['buckets']: # Protect against empty buckets
  713. for key, value in bucket[agg_key]['buckets'][0].items():
  714. if key.lower().startswith('agg_') or key.lower().startswith('dim_'):
  715. _agg_keys.append(key)
  716. _agg_keys.sort(key=lambda a: a[4:])
  717. # Go rec
  718. if not _agg_keys or len(_agg_keys) == 1 and _agg_keys[0].lower().startswith('dim_'):
  719. _agg_keys.insert(0, 'count')
  720. next_dim = []
  721. new_rows = []
  722. if agg_key in bucket:
  723. augmented += __augment_stats_2d(bucket[agg_key]['buckets'], val, _fq_fields, _fq_values, fq_filter, _selected_values, _fields[1:], _agg_keys, next_dim)
  724. for row in next_dim:
  725. new_rows.append(dim_row + row)
  726. dim_row = new_rows
  727. if dim_row and type(dim_row[0]) == list:
  728. rows.extend(dim_row)
  729. else:
  730. rows.append(dim_row)
  731. return augmented
  732. def _get_augmented(count, val, label, fq_values, fq_fields, fq_filter, _selected_values):
  733. return {
  734. "count": count,
  735. "value": val,
  736. "cat": label,
  737. 'selected': fq_values in _selected_values,
  738. 'exclude': all([f['exclude'] for f in fq_filter if f['value'] == val]),
  739. 'fq_fields': fq_fields,
  740. 'fq_values': fq_values
  741. }
  742. def _augment_pivot_nd(facet_id, counts, selected_values, fields='', values=''):
  743. for c in counts:
  744. fq_fields = (fields if fields else []) + [c['field']]
  745. fq_values = (values if values else []) + [smart_str(c['value'])]
  746. if 'pivot' in c:
  747. _augment_pivot_nd(facet_id, c['pivot'], selected_values, fq_fields, fq_values)
  748. fq_filter = selected_values.get(facet_id, [])
  749. _selected_values = [f['value'] for f in fq_filter]
  750. c['selected'] = fq_values in _selected_values
  751. c['exclude'] = False
  752. c['fq_fields'] = fq_fields
  753. c['fq_values'] = fq_values
  754. def _convert_nested_to_augmented_pivot_nd(facet_fields, facet_id, counts, selected_values, fields='', values='', dimension=2):
  755. for c in counts['buckets']:
  756. c['field'] = facet_fields[0]
  757. fq_fields = (fields if fields else []) + [c['field']]
  758. fq_values = (values if values else []) + [smart_str(c['val'])]
  759. c['value'] = c.pop('val')
  760. bucket = 'd%s' % dimension
  761. if bucket in c:
  762. next_dimension = facet_fields[1:]
  763. if next_dimension:
  764. _convert_nested_to_augmented_pivot_nd(next_dimension, facet_id, c[bucket], selected_values, fq_fields, fq_values, dimension=dimension+1)
  765. c['pivot'] = c.pop(bucket)['buckets']
  766. else:
  767. c['count'] = c.pop(bucket)
  768. fq_filter = selected_values.get(facet_id, [])
  769. _selected_values = [f['value'] for f in fq_filter]
  770. c['selected'] = fq_values in _selected_values
  771. c['exclude'] = False
  772. c['fq_fields'] = fq_fields
  773. c['fq_values'] = fq_values
  774. def augment_solr_exception(response, collection):
  775. response.update(
  776. {
  777. "facet_counts": {
  778. },
  779. "highlighting": {
  780. },
  781. "normalized_facets": [
  782. {
  783. "field": facet['field'],
  784. "counts": [],
  785. "type": facet['type'],
  786. "label": facet['label']
  787. }
  788. for facet in collection['facets']
  789. ],
  790. "responseHeader": {
  791. "status": -1,
  792. "QTime": 0,
  793. "params": {
  794. }
  795. },
  796. "response": {
  797. "start": 0,
  798. "numFound": 0,
  799. "docs": [
  800. ]
  801. }
  802. })
  803. def extract_solr_exception_message(e):
  804. response = {}
  805. try:
  806. message = json.loads(e.message)
  807. msg = message['error'].get('msg')
  808. response['error'] = msg if msg else message['error']['trace']
  809. except Exception, e2:
  810. LOG.exception('Failed to extract json message: %s' % force_unicode(e2))
  811. LOG.exception('Failed to parse json response: %s' % force_unicode(e))
  812. response['error'] = force_unicode(e)
  813. return response