models.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. #!/usr/bin/env python
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. from future import standard_library
  18. standard_library.install_aliases()
  19. from builtins import str
  20. from builtins import object
  21. import datetime
  22. import json
  23. import logging
  24. import math
  25. import numbers
  26. import sys
  27. import uuid
  28. from datetime import timedelta
  29. from django.contrib.sessions.models import Session
  30. from django.db.models import Count
  31. from django.db.models.functions import Trunc
  32. from django.utils.html import escape
  33. from django.utils.translation import ugettext as _
  34. from desktop.conf import has_connectors, TASK_SERVER
  35. from desktop.lib.i18n import smart_unicode
  36. from desktop.lib.paths import SAFE_CHARACTERS_URI
  37. from desktop.models import Document2
  38. from useradmin.models import User
  39. from notebook.connectors.base import Notebook, get_interpreter
  40. if sys.version_info[0] > 2:
  41. import urllib.request, urllib.error
  42. from urllib.parse import quote as urllib_quote
  43. else:
  44. from urllib import quote as urllib_quote
  45. LOG = logging.getLogger(__name__)
  46. # Materialize and HTML escape results
  47. def escape_rows(rows, nulls_only=False, encoding=None):
  48. data = []
  49. for row in rows:
  50. escaped_row = []
  51. for field in row:
  52. if isinstance(field, numbers.Number):
  53. if math.isnan(field) or math.isinf(field):
  54. escaped_field = json.dumps(field)
  55. else:
  56. escaped_field = field
  57. elif field is None:
  58. escaped_field = 'NULL'
  59. else:
  60. escaped_field = smart_unicode(field, errors='replace', encoding=encoding) # Prevent error when getting back non utf8 like charset=iso-8859-1
  61. if not nulls_only:
  62. escaped_field = escape(escaped_field).replace(' ', ' ')
  63. escaped_row.append(escaped_field)
  64. data.append(escaped_row)
  65. return data
  66. def make_notebook(
  67. name='Browse', description='', editor_type='hive', statement='', status='ready',
  68. files=None, functions=None, settings=None, is_saved=False, database='default', snippet_properties=None, batch_submit=False,
  69. on_success_url=None, skip_historify=False, is_task=False, last_executed=-1, is_notebook=False, pub_sub_url=None, result_properties={},
  70. namespace=None, compute=None):
  71. '''
  72. skip_historify: do not add the task to the query history. e.g. SQL Dashboard
  73. is_task / isManaged: true when being a managed by Hue operation (include_managed=True in document), e.g. exporting query result, dropping some tables
  74. '''
  75. from notebook.connectors.hiveserver2 import HS2Api
  76. if has_connectors():
  77. interpreter = get_interpreter(connector_type=editor_type)
  78. editor_connector = editor_type
  79. editor_type = interpreter['dialect']
  80. else:
  81. editor_connector = editor_type
  82. editor = Notebook()
  83. if snippet_properties is None:
  84. snippet_properties = {}
  85. if editor_type == 'hive':
  86. sessions_properties = HS2Api.get_properties(editor_type)
  87. if files is not None:
  88. _update_property_value(sessions_properties, 'files', files)
  89. if functions is not None:
  90. _update_property_value(sessions_properties, 'functions', functions)
  91. if settings is not None:
  92. _update_property_value(sessions_properties, 'settings', settings)
  93. elif editor_type == 'impala':
  94. sessions_properties = HS2Api.get_properties(editor_type)
  95. if settings is not None:
  96. _update_property_value(sessions_properties, 'files', files)
  97. elif editor_type == 'java':
  98. sessions_properties = [] # Java options
  99. else:
  100. sessions_properties = []
  101. data = {
  102. 'name': name,
  103. 'uuid': str(uuid.uuid4()),
  104. 'description': description,
  105. 'sessions': [
  106. {
  107. 'type': editor_connector,
  108. 'properties': sessions_properties,
  109. 'id': None
  110. }
  111. ],
  112. 'selectedSnippet': editor_connector, # TODO: might need update in notebook.ko.js
  113. 'type': 'notebook' if is_notebook else 'query-%s' % editor_type,
  114. 'showHistory': True,
  115. 'isSaved': is_saved,
  116. 'onSuccessUrl': urllib_quote(on_success_url.encode('utf-8'), safe=SAFE_CHARACTERS_URI) if on_success_url else None,
  117. 'pubSubUrl': pub_sub_url,
  118. 'skipHistorify': skip_historify,
  119. 'isManaged': is_task,
  120. 'snippets': [
  121. {
  122. 'status': status,
  123. 'id': str(uuid.uuid4()),
  124. 'statement_raw': statement,
  125. 'statement': statement,
  126. 'type': editor_connector,
  127. 'wasBatchExecuted': batch_submit,
  128. 'lastExecuted': last_executed,
  129. 'properties': {
  130. 'files': [] if files is None else files,
  131. 'functions': [] if functions is None else functions,
  132. 'settings': [] if settings is None else settings
  133. },
  134. 'name': name,
  135. 'database': database,
  136. 'namespace': namespace if namespace else {},
  137. 'compute': compute if compute else {},
  138. 'result': {'handle':{}},
  139. 'variables': []
  140. }
  141. ] if not is_notebook else []
  142. }
  143. if snippet_properties:
  144. data['snippets'][0]['properties'].update(snippet_properties)
  145. if result_properties:
  146. data['snippets'][0]['result'].update(result_properties)
  147. editor.data = json.dumps(data)
  148. return editor
  149. def make_notebook2(name='Browse', description='', is_saved=False, snippets=None):
  150. from notebook.connectors.hiveserver2 import HS2Api
  151. editor = Notebook()
  152. _snippets = []
  153. for snippet in snippets:
  154. default_properties = {
  155. 'files': [],
  156. 'functions': [],
  157. 'settings': []
  158. }
  159. default_properties.update(snippet['properties'])
  160. snippet['properties'] = default_properties
  161. _snippets.append(snippet)
  162. data = {
  163. 'name': name,
  164. 'uuid': str(uuid.uuid4()),
  165. 'type': 'notebook',
  166. 'description': description,
  167. 'sessions': [
  168. {
  169. 'type': _snippet['type'],
  170. 'properties': HS2Api.get_properties(snippet['type']),
  171. 'id': None
  172. } for _snippet in _snippets # Non unique types currently
  173. ],
  174. 'selectedSnippet': _snippets[0]['type'],
  175. 'showHistory': False,
  176. 'isSaved': is_saved,
  177. 'snippets': [
  178. {
  179. 'status': _snippet.get('status', 'ready'),
  180. 'id': str(uuid.uuid4()),
  181. 'statement_raw': _snippet.get('statement', ''),
  182. 'statement': _snippet.get('statement', ''),
  183. 'type': _snippet.get('type'),
  184. 'properties': _snippet['properties'],
  185. 'name': name,
  186. 'database': _snippet.get('database'),
  187. 'result': {'handle':{}},
  188. 'variables': []
  189. } for _snippet in _snippets
  190. ]
  191. }
  192. editor.data = json.dumps(data)
  193. return editor
  194. class MockedDjangoRequest(object):
  195. def __init__(self, user, get=None, post=None, method='POST'):
  196. self.user = user
  197. self.jt = None
  198. self.GET = get if get is not None else {'format': 'json'}
  199. self.POST = post if post is not None else {}
  200. self.REQUEST = {}
  201. self.method = method
  202. def import_saved_beeswax_query(bquery):
  203. design = bquery.get_design()
  204. return make_notebook(
  205. name=bquery.name,
  206. description=bquery.desc,
  207. editor_type=_convert_type(bquery.type, bquery.data),
  208. statement=design.hql_query,
  209. status='ready',
  210. files=design.file_resources,
  211. functions=design.functions,
  212. settings=design.settings,
  213. is_saved=True,
  214. database=design.database
  215. )
  216. def import_saved_pig_script(pig_script):
  217. snippet_properties = {}
  218. snippet_properties['hadoopProperties'] = []
  219. if pig_script.dict.get('hadoopProperties'):
  220. for prop in pig_script.dict.get('hadoopProperties'):
  221. snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
  222. snippet_properties['parameters'] = []
  223. if pig_script.dict.get('parameters'):
  224. for param in pig_script.dict.get('parameters'):
  225. snippet_properties['parameters'].append("%s=%s" % (param.get('name'), param.get('value')))
  226. snippet_properties['resources'] = []
  227. if pig_script.dict.get('resources'):
  228. for resource in pig_script.dict.get('resources'):
  229. snippet_properties['resources'].append(resource.get('value'))
  230. notebook = make_notebook(
  231. name=pig_script.dict.get('name'),
  232. editor_type='pig',
  233. statement=pig_script.dict.get('script'),
  234. status='ready',
  235. snippet_properties=snippet_properties,
  236. is_saved=True
  237. )
  238. # Remove files, functions, settings from snippet properties
  239. data = notebook.get_data()
  240. data['snippets'][0]['properties'].pop('files')
  241. data['snippets'][0]['properties'].pop('functions')
  242. data['snippets'][0]['properties'].pop('settings')
  243. notebook.data = json.dumps(data)
  244. return notebook
  245. def import_saved_mapreduce_job(wf):
  246. snippet_properties = {}
  247. node = wf.start.get_child('to')
  248. try:
  249. files = json.loads(node.files)
  250. for filepath in files:
  251. snippet_properties['files'].append({'type': 'file', 'path': filepath})
  252. except ValueError as e:
  253. LOG.warn('Failed to parse files for mapreduce job design "%s".' % wf.name)
  254. snippet_properties['archives'] = []
  255. try:
  256. archives = json.loads(node.archives)
  257. for filepath in archives:
  258. snippet_properties['archives'].append(filepath)
  259. except ValueError as e:
  260. LOG.warn('Failed to parse archives for mapreduce job design "%s".' % wf.name)
  261. snippet_properties['hadoopProperties'] = []
  262. try:
  263. properties = json.loads(node.job_properties)
  264. if properties:
  265. for prop in properties:
  266. snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
  267. except ValueError as e:
  268. LOG.warn('Failed to parse job properties for mapreduce job design "%s".' % wf.name)
  269. snippet_properties['app_jar'] = node.jar_path
  270. notebook = make_notebook(
  271. name=wf.name,
  272. description=wf.description,
  273. editor_type='mapreduce',
  274. statement='',
  275. status='ready',
  276. snippet_properties=snippet_properties,
  277. is_saved=True
  278. )
  279. # Remove functions, settings from snippet properties
  280. data = notebook.get_data()
  281. data['snippets'][0]['properties'].pop('functions')
  282. data['snippets'][0]['properties'].pop('settings')
  283. notebook.data = json.dumps(data)
  284. return notebook
  285. def import_saved_shell_job(wf):
  286. snippet_properties = {}
  287. node = wf.start.get_child('to')
  288. snippet_properties['command_path'] = node.command
  289. snippet_properties['arguments'] = []
  290. snippet_properties['env_var'] = []
  291. try:
  292. params = json.loads(node.params)
  293. if params:
  294. for param in params:
  295. if param['type'] == 'argument':
  296. snippet_properties['arguments'].append(param['value'])
  297. else:
  298. snippet_properties['env_var'].append(param['value'])
  299. except ValueError as e:
  300. LOG.warn('Failed to parse parameters for shell job design "%s".' % wf.name)
  301. snippet_properties['hadoopProperties'] = []
  302. try:
  303. properties = json.loads(node.job_properties)
  304. if properties:
  305. for prop in properties:
  306. snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
  307. except ValueError as e:
  308. LOG.warn('Failed to parse job properties for shell job design "%s".' % wf.name)
  309. snippet_properties['files'] = []
  310. try:
  311. files = json.loads(node.files)
  312. for filepath in files:
  313. snippet_properties['files'].append({'type': 'file', 'path': filepath})
  314. except ValueError as e:
  315. LOG.warn('Failed to parse files for shell job design "%s".' % wf.name)
  316. snippet_properties['archives'] = []
  317. try:
  318. archives = json.loads(node.archives)
  319. for archive in archives:
  320. snippet_properties['archives'].append(archive['name'])
  321. except ValueError as e:
  322. LOG.warn('Failed to parse archives for shell job design "%s".' % wf.name)
  323. snippet_properties['capture_output'] = node.capture_output
  324. notebook = make_notebook(
  325. name=wf.name,
  326. description=wf.description,
  327. editor_type='shell',
  328. statement='',
  329. status='ready',
  330. snippet_properties=snippet_properties,
  331. is_saved=True
  332. )
  333. # Remove functions, settings from snippet properties
  334. data = notebook.get_data()
  335. data['snippets'][0]['properties'].pop('functions')
  336. data['snippets'][0]['properties'].pop('settings')
  337. notebook.data = json.dumps(data)
  338. return notebook
  339. def import_saved_java_job(wf):
  340. snippet_properties = {}
  341. node = wf.start.get_child('to')
  342. snippet_properties['app_jar'] = node.jar_path
  343. snippet_properties['class'] = node.main_class
  344. snippet_properties['args'] = node.args if node.args else ''
  345. snippet_properties['java_opts'] = node.java_opts if node.java_opts else ''
  346. snippet_properties['hadoopProperties'] = []
  347. try:
  348. properties = json.loads(node.job_properties)
  349. if properties:
  350. for prop in properties:
  351. snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value')))
  352. except ValueError as e:
  353. LOG.warn('Failed to parse job properties for Java job design "%s".' % wf.name)
  354. snippet_properties['files'] = []
  355. try:
  356. files = json.loads(node.files)
  357. for filepath in files:
  358. snippet_properties['files'].append({'type': 'file', 'path': filepath})
  359. except ValueError as e:
  360. LOG.warn('Failed to parse files for Java job design "%s".' % wf.name)
  361. snippet_properties['archives'] = []
  362. try:
  363. archives = json.loads(node.archives)
  364. for archive in archives:
  365. snippet_properties['archives'].append(archive['name'])
  366. except ValueError as e:
  367. LOG.warn('Failed to parse archives for Java job design "%s".' % wf.name)
  368. snippet_properties['capture_output'] = node.capture_output
  369. notebook = make_notebook(
  370. name=wf.name,
  371. description=wf.description,
  372. editor_type='java',
  373. statement='',
  374. status='ready',
  375. snippet_properties=snippet_properties,
  376. is_saved=True
  377. )
  378. # Remove functions, settings from snippet properties
  379. data = notebook.get_data()
  380. data['snippets'][0]['properties'].pop('functions')
  381. data['snippets'][0]['properties'].pop('settings')
  382. notebook.data = json.dumps(data)
  383. return notebook
  384. def _convert_type(btype, bdata):
  385. from beeswax.models import HQL, IMPALA, RDBMS, SPARK
  386. if btype == HQL:
  387. return 'hive'
  388. elif btype == IMPALA:
  389. return 'impala'
  390. elif btype == RDBMS:
  391. data = json.loads(bdata)
  392. return data['query']['server']
  393. elif btype == SPARK: # We should not import
  394. return 'spark'
  395. else:
  396. return 'hive'
  397. def _update_property_value(properties, key, value):
  398. """
  399. Update property dict in list of properties where prop has "key": key, set "value": value
  400. """
  401. for prop in properties:
  402. if prop['key'] == key:
  403. prop.update({'value': value})
  404. def _get_editor_type(editor_id):
  405. document = Document2.objects.get(id=editor_id)
  406. return document.type.rsplit('-', 1)[-1]
  407. class Analytics(object):
  408. @classmethod
  409. def admin_stats(cls):
  410. stats = []
  411. one_day = datetime.date.today() - timedelta(days=1)
  412. one_week = datetime.date.today() - timedelta(weeks=1)
  413. one_month = datetime.date.today() - timedelta(days=30)
  414. three_months = datetime.date.today() - timedelta(days=90)
  415. stats.append(('Last modified', '1 day'))
  416. stats.append(('Users', User.objects.filter(last_login__gte=one_day).count()))
  417. stats.append(('Sessions', Session.objects.filter(expire_date__gte=one_day).count()))
  418. stats.append(('Executed queries', Document2.objects.filter(last_modified__gte=one_day, is_history=True, type__startswith='query-').count()))
  419. stats.append(('\nLast modified', '1 week'))
  420. stats.append(('Users', User.objects.filter(last_login__gte=one_week).count()))
  421. stats.append(('Sessions', Session.objects.filter(expire_date__gte=one_week).count()))
  422. stats.append(('Executed queries', Document2.objects.filter(last_modified__gte=one_week, is_history=True, type__startswith='query-').count()))
  423. stats.append(('Saved queries', Document2.objects.filter(last_modified__gte=one_week, is_history=False, type__startswith='query-').count()))
  424. stats.append(('\nAll', ''))
  425. stats.append(('Active users 30 days', User.objects.filter(last_login__gte=one_month).count()))
  426. stats.append(('Sessions 30 days', Session.objects.filter(expire_date__gte=one_month).count()))
  427. stats.append(('Executed queries 30 days', Document2.objects.filter(last_modified__gte=one_month, is_history=True, type__startswith='query-').count()))
  428. stats.append(('Active users 90 days', User.objects.filter(last_login__gte=three_months).count()))
  429. return stats
  430. @classmethod
  431. def user_stats(cls, user_id=None, user=None):
  432. stats = []
  433. one_month = datetime.date.today() - timedelta(days=30)
  434. user = User.objects.get(id=user_id) if user is None else user
  435. queries = Document2.objects.filter(owner__id=user_id, type__startswith='query-', is_trashed=False, is_managed=False)
  436. stats.append({
  437. 'name': 'user',
  438. 'value': '%s - %s' % (user_id, user.username),'description': _('User info')
  439. })
  440. query_executions = queries.filter(is_history=True, type__startswith='query-')
  441. stats.append({
  442. 'name': 'query_executions',
  443. 'values': query_executions.count(),
  444. 'description': _('Query executions count')
  445. })
  446. stats.append({
  447. 'name': 'saved_queries_count',
  448. 'value': queries.filter(is_history=False, type__startswith='query-').count(),
  449. 'description': _('Saved queries count')
  450. })
  451. stats.append({
  452. 'name': 'query_executions_30_days_count',
  453. 'value': query_executions.filter(last_modified__gte=one_month).count(),
  454. 'description': _('Query executions 30 days total')
  455. })
  456. last_month_daily = queries.filter(last_modified__gte=one_month).annotate(day=Trunc('last_modified', 'day')).values('day').annotate(c=Count('day')).values('day', 'c').order_by('day')
  457. stats.append({
  458. 'name': 'query_executions_30_days_histogram',
  459. 'value': last_month_daily,
  460. 'description': _('Daily executions 30 days')
  461. })
  462. return stats
  463. @classmethod
  464. def query_stats(cls, query_id=None, query=None):
  465. stats = []
  466. one_month = datetime.date.today() - timedelta(days=30)
  467. query = Document2.objects.get(id=query_id) if query is None else query
  468. stats.append({
  469. 'name': 'query',
  470. 'value': '%s - %s' % (query_id, query.name),
  471. 'description': _('Query info')
  472. })
  473. executions = query.dependents.filter(is_history=True, type__startswith='query-')
  474. stats.append({
  475. 'name': 'execution_count',
  476. 'value': executions.count(),
  477. 'description': _('How many times executed')
  478. })
  479. stats.append({
  480. 'name': 'execution_count_shared',
  481. 'value': executions.exclude(owner=query.owner).count(),
  482. 'description': _('Executions by others')
  483. })
  484. last_month_daily = executions.filter(last_modified__gte=one_month).annotate(day=Trunc('last_modified', 'day')).values('day').annotate(c=Count('day')).values('day', 'c').order_by('day')
  485. stats.append({
  486. 'name': 'executions_30_days_histogram',
  487. 'value': last_month_daily,
  488. 'description': _('Daily executions 30 days')
  489. })
  490. # Could count number of "forks" (but would need to start tracking parent of Saved As query cf. saveAsNotebook)
  491. return stats