api2.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. #!/usr/bin/env python
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. import logging
  18. import json
  19. import StringIO
  20. import tempfile
  21. import zipfile
  22. from datetime import datetime
  23. from django.contrib.auth.models import Group, User
  24. from django.core import management
  25. from django.http import HttpResponse
  26. from django.shortcuts import redirect
  27. from django.utils.html import escape
  28. from django.utils.translation import ugettext as _
  29. from django.views.decorators.csrf import ensure_csrf_cookie
  30. from django.views.decorators.http import require_POST
  31. from metadata.conf import has_navigator
  32. from metadata.catalog_api import search_entities as metadata_search_entities, _highlight
  33. from metadata.catalog_api import search_entities_interactive as metadata_search_entities_interactive
  34. from notebook.connectors.altus import SdxApi, AnalyticDbApi
  35. from notebook.connectors.base import Notebook
  36. from notebook.views import upgrade_session_properties
  37. from desktop.lib.django_util import JsonResponse
  38. from desktop.lib.exceptions_renderable import PopupException
  39. from desktop.lib.export_csvxls import make_response
  40. from desktop.lib.i18n import smart_str, force_unicode
  41. from desktop.models import Document2, Document, Directory, FilesystemException, uuid_default, \
  42. UserPreferences, get_user_preferences, set_user_preferences, get_cluster_config
  43. from desktop.conf import get_clusters
  44. LOG = logging.getLogger(__name__)
  45. def api_error_handler(func):
  46. def decorator(*args, **kwargs):
  47. response = {}
  48. try:
  49. return func(*args, **kwargs)
  50. except Exception, e:
  51. LOG.exception('Error running %s' % func)
  52. response['status'] = -1
  53. response['message'] = force_unicode(str(e))
  54. finally:
  55. if response:
  56. return JsonResponse(response)
  57. return decorator
  58. @api_error_handler
  59. def get_config(request):
  60. config = get_cluster_config(request.user)
  61. config['status'] = 0
  62. return JsonResponse(config)
  63. @api_error_handler
  64. def get_context_namespaces(request, interface):
  65. response = {}
  66. namespaces = []
  67. clusters = get_clusters(request.user).values()
  68. if interface == 'hive':
  69. namespaces.extend([{
  70. 'id': cluster['id'],
  71. 'name': cluster['name']
  72. } for cluster in clusters if cluster.get('type') == 'direct' # and interfaces == 'hive'
  73. ])
  74. # From Altus SDX
  75. if [cluster for cluster in clusters if cluster['type'] == 'altus']:
  76. namespaces.extend([{
  77. 'id': namespace.get('crn', 'None'),
  78. 'name': namespace.get('namespaceName', 'Unknown'),
  79. 'status': namespace.get('status'),
  80. # creationDate
  81. } for namespace in SdxApi(request.user).list_namespaces()]
  82. )
  83. response[interface] = namespaces
  84. response['status'] = 0
  85. return JsonResponse(response)
  86. @api_error_handler
  87. def get_context_computes(request, interface):
  88. response = {}
  89. computes = []
  90. clusters = get_clusters(request.user).values()
  91. if interface == 'hive':
  92. computes.extend([{
  93. 'id': cluster['id'],
  94. 'name': cluster['name'],
  95. 'namespace': cluster['id'] # Dummy
  96. } for cluster in clusters
  97. ])
  98. # From Altus
  99. if [cluster for cluster in clusters if cluster['type'] == 'altus']:
  100. computes.extend([{
  101. 'id': cluster.get('crn', 'None'),
  102. 'name': cluster.get('clusterName', 'Unknown'),
  103. 'status': cluster.get('status'),
  104. # namespaceCrn
  105. # environmentType
  106. # secured
  107. # cdhVersion
  108. } for cluster in AnalyticDbApi(request.user).list_clusters()]
  109. )
  110. response[interface] = computes
  111. response['status'] = 0
  112. return JsonResponse(response)
  113. @api_error_handler
  114. def search_documents(request):
  115. """
  116. Returns the directories and documents based on given params that are accessible by the current user
  117. Optional params:
  118. perms=<mode> - Controls whether to retrieve owned, shared, or both. Defaults to both.
  119. include_history=<bool> - Controls whether to retrieve history docs. Defaults to false.
  120. include_trashed=<bool> - Controls whether to retrieve docs in the trash. Defaults to true.
  121. include_managed=<bool> - Controls whether to retrieve docs generated by Hue. Defaults to false.
  122. flatten=<bool> - Controls whether to return documents in a flat list, or roll up documents to a common directory
  123. if possible. Defaults to true.
  124. page=<n> - Controls pagination. Defaults to 1.
  125. limit=<n> - Controls limit per page. Defaults to all.
  126. type=<type> - Show documents of given type(s) (directory, query-hive, query-impala, query-mysql, etc).
  127. Defaults to all. Can appear multiple times.
  128. sort=<key> - Sort by the attribute <key>, which is one of: "name", "type", "owner", "last_modified"
  129. Accepts the form "-last_modified", which sorts in descending order.
  130. Defaults to "-last_modified".
  131. text=<frag> - Search for fragment "frag" in names and descriptions.
  132. """
  133. response = {
  134. 'documents': []
  135. }
  136. perms = request.GET.get('perms', 'both').lower()
  137. include_history = json.loads(request.GET.get('include_history', 'false'))
  138. include_trashed = json.loads(request.GET.get('include_trashed', 'true'))
  139. include_managed = json.loads(request.GET.get('include_managed', 'false'))
  140. flatten = json.loads(request.GET.get('flatten', 'true'))
  141. if perms not in ['owned', 'shared', 'both']:
  142. raise PopupException(_('Invalid value for perms, acceptable values are: owned, shared, both.'))
  143. documents = Document2.objects.documents(
  144. user=request.user,
  145. perms=perms,
  146. include_history=include_history,
  147. include_trashed=include_trashed,
  148. include_managed=include_managed
  149. )
  150. # Refine results
  151. response.update(_filter_documents(request, queryset=documents, flatten=flatten))
  152. # Paginate
  153. response.update(_paginate(request, queryset=response['documents']))
  154. # Serialize results
  155. response['documents'] = [doc.to_dict() for doc in response.get('documents', [])]
  156. return JsonResponse(response)
  157. def _search(user, perms='both', include_history=False, include_trashed=False, include_managed=False, search_text=None, limit=25):
  158. response = {
  159. 'documents': []
  160. }
  161. documents = Document2.objects.documents(
  162. user=user,
  163. perms=perms,
  164. include_history=include_history,
  165. include_trashed=include_trashed,
  166. include_managed=include_managed
  167. )
  168. type_filters = None
  169. sort = '-last_modified'
  170. search_text = search_text
  171. flatten = True
  172. page = 1
  173. # Refine results
  174. response.update(__filter_documents(type_filters, sort, search_text, queryset=documents, flatten=flatten))
  175. # Paginate
  176. response.update(__paginate(page, limit, queryset=response['documents']))
  177. return response
  178. @api_error_handler
  179. def get_document(request):
  180. """
  181. Returns the document or directory found for the given uuid or path and current user.
  182. If a directory is found, return any children documents too.
  183. Optional params:
  184. page=<n> - Controls pagination. Defaults to 1.
  185. limit=<n> - Controls limit per page. Defaults to all.
  186. type=<type> - Show documents of given type(s) (directory, query-hive, query-impala, query-mysql, etc). Default to all.
  187. sort=<key> - Sort by the attribute <key>, which is one of:
  188. "name", "type", "owner", "last_modified"
  189. Accepts the form "-last_modified", which sorts in descending order.
  190. Default to "-last_modified".
  191. text=<frag> - Search for fragment "frag" in names and descriptions.
  192. data=<false|true> - Return all the data of the document. Default to false.
  193. dependencies=<false|true> - Return all the dependencies and dependents of the document. Default to false.
  194. """
  195. path = request.GET.get('path', '/')
  196. uuid = request.GET.get('uuid')
  197. uuids = request.GET.get('uuids')
  198. with_data = request.GET.get('data', 'false').lower() == 'true'
  199. with_dependencies = request.GET.get('dependencies', 'false').lower() == 'true'
  200. if uuids:
  201. response = {
  202. 'data_list': [_get_document_helper(request, uuid, with_data, with_dependencies, path) for uuid in uuids.split(',')],
  203. 'status': 0
  204. }
  205. else:
  206. response = _get_document_helper(request, uuid, with_data, with_dependencies, path)
  207. return JsonResponse(response)
  208. def _get_document_helper(request, uuid, with_data, with_dependencies, path):
  209. if uuid:
  210. if uuid.isdigit():
  211. document = Document2.objects.document(user=request.user, doc_id=uuid)
  212. else:
  213. document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid)
  214. else: # Find by path
  215. document = Document2.objects.get_by_path(user=request.user, path=path)
  216. response = {
  217. 'document': document.to_dict(),
  218. 'parent': document.parent_directory.to_dict() if document.parent_directory else None,
  219. 'children': [],
  220. 'dependencies': [],
  221. 'dependents': [],
  222. 'data': '',
  223. 'status': 0
  224. }
  225. response['user_perms'] = {
  226. 'can_read': document.can_read(request.user),
  227. 'can_write': document.can_write(request.user)
  228. }
  229. if with_data:
  230. data = json.loads(document.data)
  231. # Upgrade session properties for Hive and Impala
  232. if document.type.startswith('query'):
  233. notebook = Notebook(document=document)
  234. notebook = upgrade_session_properties(request, notebook)
  235. data = json.loads(notebook.data)
  236. if document.type == 'query-pig': # Import correctly from before Hue 4.0
  237. properties = data['snippets'][0]['properties']
  238. if 'hadoopProperties' not in properties:
  239. properties['hadoopProperties'] = []
  240. if 'parameters' not in properties:
  241. properties['parameters'] = []
  242. if 'resources' not in properties:
  243. properties['resources'] = []
  244. if data.get('uuid') != document.uuid: # Old format < 3.11
  245. data['uuid'] = document.uuid
  246. response['data'] = data
  247. if with_dependencies:
  248. response['dependencies'] = [dependency.to_dict() for dependency in document.dependencies.all()]
  249. response['dependents'] = [dependent.to_dict() for dependent in document.dependents.exclude(is_history=True).all()]
  250. # Get children documents if this is a directory
  251. if document.is_directory:
  252. directory = Directory.objects.get(id=document.id)
  253. # If this is the user's home directory, fetch shared docs too
  254. if document.is_home_directory:
  255. children = directory.get_children_and_shared_documents(user=request.user)
  256. response.update(_filter_documents(request, queryset=children, flatten=True))
  257. else:
  258. children = directory.get_children_documents()
  259. response.update(_filter_documents(request, queryset=children, flatten=False))
  260. # Paginate and serialize Results
  261. if 'documents' in response:
  262. response.update(_paginate(request, queryset=response['documents']))
  263. # Rename documents to children
  264. response['children'] = response.pop('documents')
  265. response['children'] = [doc.to_dict() for doc in response['children']]
  266. return response
  267. @api_error_handler
  268. def open_document(request):
  269. doc_id = request.GET.get('id')
  270. if doc_id.isdigit():
  271. document = Document2.objects.document(user=request.user, doc_id=doc_id)
  272. else:
  273. document = Document2.objects.get_by_uuid(user=request.user, uuid=doc_id)
  274. return redirect(document.get_absolute_url())
  275. @api_error_handler
  276. @require_POST
  277. def move_document(request):
  278. source_doc_uuid = json.loads(request.POST.get('source_doc_uuid'))
  279. destination_doc_uuid = json.loads(request.POST.get('destination_doc_uuid'))
  280. if not source_doc_uuid or not destination_doc_uuid:
  281. raise PopupException(_('move_document requires source_doc_uuid and destination_doc_uuid'))
  282. source = Document2.objects.get_by_uuid(user=request.user, uuid=source_doc_uuid, perm_type='write')
  283. destination = Directory.objects.get_by_uuid(user=request.user, uuid=destination_doc_uuid, perm_type='write')
  284. doc = source.move(destination, request.user)
  285. return JsonResponse({
  286. 'status': 0,
  287. 'document': doc.to_dict()
  288. })
  289. @api_error_handler
  290. @require_POST
  291. def create_directory(request):
  292. parent_uuid = json.loads(request.POST.get('parent_uuid'))
  293. name = json.loads(request.POST.get('name'))
  294. if not parent_uuid or not name:
  295. raise PopupException(_('create_directory requires parent_uuid and name'))
  296. parent_dir = Directory.objects.get_by_uuid(user=request.user, uuid=parent_uuid, perm_type='write')
  297. directory = Directory.objects.create(name=name, owner=request.user, parent_directory=parent_dir)
  298. return JsonResponse({
  299. 'status': 0,
  300. 'directory': directory.to_dict()
  301. })
  302. @api_error_handler
  303. @require_POST
  304. def update_document(request):
  305. uuid = json.loads(request.POST.get('uuid'))
  306. if not uuid:
  307. raise PopupException(_('update_document requires uuid'))
  308. document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid, perm_type='write')
  309. whitelisted_attrs = ['name', 'description']
  310. for attr in whitelisted_attrs:
  311. if request.POST.get(attr):
  312. setattr(document, attr, request.POST.get(attr))
  313. document.save(update_fields=whitelisted_attrs)
  314. return JsonResponse({
  315. 'status': 0,
  316. 'document': document.to_dict()
  317. })
  318. @api_error_handler
  319. @require_POST
  320. def delete_document(request):
  321. """
  322. Accepts a uuid and optional skip_trash parameter
  323. (Default) skip_trash=false, flags a document as trashed
  324. skip_trash=true, deletes it permanently along with any history dependencies
  325. If directory and skip_trash=false, all dependencies will also be flagged as trash
  326. If directory and skip_trash=true, directory must be empty (no dependencies)
  327. """
  328. uuid = json.loads(request.POST.get('uuid'))
  329. skip_trash = json.loads(request.POST.get('skip_trash', 'false'))
  330. if not uuid:
  331. raise PopupException(_('delete_document requires uuid'))
  332. document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid, perm_type='write')
  333. if skip_trash:
  334. document.delete()
  335. else:
  336. document.trash()
  337. return JsonResponse({
  338. 'status': 0,
  339. })
  340. @api_error_handler
  341. @require_POST
  342. def copy_document(request):
  343. uuid = json.loads(request.POST.get('uuid'), '""')
  344. if not uuid:
  345. raise PopupException(_('copy_document requires uuid'))
  346. # Document2 and Document model objects are linked and both are saved when saving
  347. document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid)
  348. # Document model object
  349. document1 = document.doc.get()
  350. if document.type == 'directory':
  351. raise PopupException(_('Directory copy is not supported'))
  352. name = document.name + '-copy'
  353. # Make the copy of the Document2 model object
  354. copy_document = document.copy(name=name, owner=request.user)
  355. # Make the copy of Document model object too
  356. document1.copy(content_object=copy_document, name=name, owner=request.user)
  357. # Import workspace for all oozie jobs
  358. if document.type == 'oozie-workflow2' or document.type == 'oozie-bundle2' or document.type == 'oozie-coordinator2':
  359. from oozie.models2 import Workflow, Coordinator, Bundle, _import_workspace
  360. # Update the name field in the json 'data' field
  361. if document.type == 'oozie-workflow2':
  362. workflow = Workflow(document=document)
  363. workflow.update_name(name)
  364. workflow.update_uuid(copy_document.uuid)
  365. _import_workspace(request.fs, request.user, workflow)
  366. copy_document.update_data({'workflow': workflow.get_data()['workflow']})
  367. copy_document.save()
  368. if document.type == 'oozie-bundle2' or document.type == 'oozie-coordinator2':
  369. if document.type == 'oozie-bundle2':
  370. bundle_or_coordinator = Bundle(document=document)
  371. else:
  372. bundle_or_coordinator = Coordinator(document=document)
  373. json_data = bundle_or_coordinator.get_data_for_json()
  374. json_data['name'] = name
  375. json_data['uuid'] = copy_document.uuid
  376. copy_document.update_data(json_data)
  377. copy_document.save()
  378. _import_workspace(request.fs, request.user, bundle_or_coordinator)
  379. elif document.type == 'search-dashboard':
  380. from dashboard.models import Collection2
  381. collection = Collection2(request.user, document=document)
  382. collection.data['collection']['label'] = name
  383. collection.data['collection']['uuid'] = copy_document.uuid
  384. copy_document.update_data({'collection': collection.data['collection']})
  385. copy_document.save()
  386. # Keep the document and data in sync
  387. else:
  388. copy_data = copy_document.data_dict
  389. if 'name' in copy_data:
  390. copy_data['name'] = name
  391. if 'uuid' in copy_data:
  392. copy_data['uuid'] = copy_document.uuid
  393. copy_document.update_data(copy_data)
  394. copy_document.save()
  395. return JsonResponse({
  396. 'status': 0,
  397. 'document': copy_document.to_dict()
  398. })
  399. @api_error_handler
  400. @require_POST
  401. def restore_document(request):
  402. """
  403. Accepts a uuid
  404. Restores the document to /home
  405. """
  406. uuids = json.loads(request.POST.get('uuids'))
  407. if not uuids:
  408. raise PopupException(_('restore_document requires comma separated uuids'))
  409. for uuid in uuids.split(','):
  410. document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid, perm_type='write')
  411. document.restore()
  412. return JsonResponse({
  413. 'status': 0,
  414. })
  415. @api_error_handler
  416. @require_POST
  417. def share_document(request):
  418. """
  419. Set who else or which other group can interact with the document.
  420. Example of input: {'read': {'user_ids': [1, 2, 3], 'group_ids': [1, 2, 3]}}
  421. """
  422. perms_dict = request.POST.get('data')
  423. uuid = request.POST.get('uuid')
  424. if not uuid or not perms_dict:
  425. raise PopupException(_('share_document requires uuid and perms_dict'))
  426. else:
  427. perms_dict = json.loads(perms_dict)
  428. uuid = json.loads(uuid)
  429. doc = Document2.objects.get_by_uuid(user=request.user, uuid=uuid)
  430. for name, perm in perms_dict.iteritems():
  431. users = groups = None
  432. if perm.get('user_ids'):
  433. users = User.objects.in_bulk(perm.get('user_ids'))
  434. else:
  435. users = []
  436. if perm.get('group_ids'):
  437. groups = Group.objects.in_bulk(perm.get('group_ids'))
  438. else:
  439. groups = []
  440. doc = doc.share(request.user, name=name, users=users, groups=groups)
  441. return JsonResponse({
  442. 'status': 0,
  443. 'document': doc.to_dict()
  444. })
  445. @ensure_csrf_cookie
  446. def export_documents(request):
  447. if request.GET.get('documents'):
  448. selection = json.loads(request.GET.get('documents'))
  449. else:
  450. selection = json.loads(request.POST.get('documents'))
  451. # Only export documents the user has permissions to read
  452. docs = Document2.objects.documents(user=request.user, perms='both', include_history=True, include_trashed=True).\
  453. filter(id__in=selection).order_by('-id')
  454. # Add any dependencies to the set of exported documents
  455. export_doc_set = _get_dependencies(docs)
  456. # For directories, add any children docs to the set of exported documents
  457. export_doc_set.update(_get_dependencies(docs, deps_mode=False))
  458. # Get PKs of documents to export
  459. doc_ids = [doc.pk for doc in export_doc_set]
  460. num_docs = len(doc_ids)
  461. if len(selection) == 1 and num_docs >= len(selection) and docs[0].name:
  462. filename = docs[0].name
  463. else:
  464. filename = 'hue-documents-%s-(%s)' % (datetime.today().strftime('%Y-%m-%d'), num_docs)
  465. f = StringIO.StringIO()
  466. if doc_ids:
  467. doc_ids = ','.join(map(str, doc_ids))
  468. management.call_command('dumpdata', 'desktop.Document2', primary_keys=doc_ids, indent=2, use_natural_foreign_keys=True, verbosity=2, stdout=f)
  469. if request.GET.get('format') == 'json':
  470. return JsonResponse(f.getvalue(), safe=False)
  471. elif request.GET.get('format') == 'zip':
  472. zfile = zipfile.ZipFile(f, 'w')
  473. zfile.writestr("hue.json", f.getvalue())
  474. for doc in docs:
  475. if doc.type == 'notebook':
  476. try:
  477. from spark.models import Notebook
  478. zfile.writestr("notebook-%s-%s.txt" % (doc.name, doc.id), smart_str(Notebook(document=doc).get_str()))
  479. except Exception, e:
  480. LOG.exception(e)
  481. zfile.close()
  482. response = HttpResponse(content_type="application/zip")
  483. response["Content-Length"] = len(f.getvalue())
  484. response['Content-Disposition'] = 'attachment; filename="%s".zip' % filename
  485. response.write(f.getvalue())
  486. return response
  487. else:
  488. return make_response(f.getvalue(), 'json', filename)
  489. @ensure_csrf_cookie
  490. def import_documents(request):
  491. def is_reserved_directory(doc):
  492. return doc['fields']['type'] == 'directory' and doc['fields']['name'] in (Document2.HOME_DIR, Document2.TRASH_DIR)
  493. try:
  494. if request.FILES.get('documents'):
  495. documents = request.FILES['documents'].read()
  496. else:
  497. documents = json.loads(request.POST.get('documents'))
  498. documents = json.loads(documents)
  499. except ValueError, e:
  500. raise PopupException(_('Failed to import documents, the file does not contain valid JSON.'))
  501. # Validate documents
  502. if not _is_import_valid(documents):
  503. raise PopupException(_('Failed to import documents, the file does not contain the expected JSON schema for Hue documents.'))
  504. docs = []
  505. uuids_map = dict((doc['fields']['uuid'], None) for doc in documents if not is_reserved_directory(doc))
  506. for doc in documents:
  507. # Filter docs to import, ignoring reserved directories (home and Trash) and history docs
  508. if not is_reserved_directory(doc):
  509. # Remove any deprecated fields
  510. if 'tags' in doc['fields']:
  511. doc['fields'].pop('tags')
  512. # If doc is not owned by current user, make a copy of the document with current user as owner
  513. if doc['fields']['owner'][0] != request.user.username:
  514. doc = _copy_document_with_owner(doc, request.user, uuids_map)
  515. else: # Update existing doc or create new
  516. doc = _create_or_update_document_with_owner(doc, request.user, uuids_map)
  517. # For oozie docs replace dependent uuids with the newly created ones
  518. if doc['fields']['type'].startswith('oozie-'):
  519. doc = _update_imported_oozie_document(doc, uuids_map)
  520. # If the doc contains any history dependencies, ignore them
  521. # NOTE: this assumes that each dependency is exported as an array using the natural PK [uuid, version, is_history]
  522. deps_minus_history = [dep for dep in doc['fields'].get('dependencies', []) if len(dep) >= 3 and not dep[2]]
  523. doc['fields']['dependencies'] = deps_minus_history
  524. # Replace illegal characters
  525. if '/' in doc['fields']['name']:
  526. new_name = doc['fields']['name'].replace('/', '-')
  527. LOG.warn("Found illegal slash in document named: %s, renaming to: %s." % (doc['fields']['name'], new_name))
  528. doc['fields']['name'] = new_name
  529. # Set last modified date to now
  530. doc['fields']['last_modified'] = datetime.now().replace(microsecond=0).isoformat()
  531. docs.append(doc)
  532. f = tempfile.NamedTemporaryFile(mode='w+', suffix='.json')
  533. f.write(json.dumps(docs))
  534. f.flush()
  535. stdout = StringIO.StringIO()
  536. try:
  537. management.call_command('loaddata', f.name, verbosity=2, traceback=True, stdout=stdout)
  538. Document.objects.sync()
  539. if request.POST.get('redirect'):
  540. return redirect(request.POST.get('redirect'))
  541. else:
  542. return JsonResponse({
  543. 'status': 0,
  544. 'message': stdout.getvalue(),
  545. 'count': len(documents),
  546. 'created_count': len([doc for doc in documents if doc['pk'] is None]),
  547. 'updated_count': len([doc for doc in documents if doc['pk'] is not None]),
  548. 'username': request.user.username,
  549. 'documents': [
  550. dict([
  551. ('name', doc['fields']['name']),
  552. ('uuid', doc['fields']['uuid']),
  553. ('type', doc['fields']['type']),
  554. ('owner', doc['fields']['owner'][0])
  555. ]) for doc in docs]
  556. })
  557. except Exception, e:
  558. LOG.error('Failed to run loaddata command in import_documents:\n %s' % stdout.getvalue())
  559. return JsonResponse({'status': -1, 'message': smart_str(e)})
  560. finally:
  561. stdout.close()
  562. def _update_imported_oozie_document(doc, uuids_map):
  563. for key, value in uuids_map.iteritems():
  564. if value:
  565. doc['fields']['data'] = doc['fields']['data'].replace(key, value)
  566. return doc
  567. def user_preferences(request, key=None):
  568. response = {'status': 0, 'data': {}}
  569. if request.method != "POST":
  570. response['data'] = get_user_preferences(request.user, key)
  571. else:
  572. if "set" in request.POST:
  573. x = set_user_preferences(request.user, key, request.POST["set"])
  574. response['data'] = {key: x.value}
  575. elif "delete" in request.POST:
  576. try:
  577. x = UserPreferences.objects.get(user=request.user, key=key)
  578. x.delete()
  579. except UserPreferences.DoesNotExist:
  580. pass
  581. return JsonResponse(response)
  582. def search_entities(request):
  583. sources = json.loads(request.POST.get('sources')) or []
  584. if 'documents' in sources:
  585. search_text = json.loads(request.POST.get('query_s', ''))
  586. entities = _search(user=request.user, search_text=search_text)
  587. response = {
  588. 'entities': [{
  589. 'hue_name': _highlight(search_text, escape(e.name)),
  590. 'hue_description': _highlight(search_text, escape(e.description)),
  591. 'type': 'HUE',
  592. 'doc_type': escape(e.type),
  593. 'originalName': escape(e.name),
  594. 'link': e.get_absolute_url()
  595. } for e in entities['documents']
  596. ],
  597. 'count': len(entities['documents']),
  598. 'status': 0
  599. }
  600. return JsonResponse(response)
  601. else:
  602. if has_navigator(request.user):
  603. return metadata_search_entities(request)
  604. else:
  605. return JsonResponse({'status': 1, 'message': _('Navigator not enabled')})
  606. def search_entities_interactive(request):
  607. sources = json.loads(request.POST.get('sources')) or []
  608. if 'documents' in sources:
  609. search_text = json.loads(request.POST.get('query_s', ''))
  610. limit = int(request.POST.get('limit', 25))
  611. entities = _search(user=request.user, search_text=search_text, limit=limit)
  612. response = {
  613. 'results': [{
  614. 'hue_name': _highlight(search_text, escape(e.name)),
  615. 'hue_description': _highlight(search_text, escape(e.description)),
  616. 'link': e.get_absolute_url(),
  617. 'doc_type': escape(e.type),
  618. 'type': 'HUE',
  619. 'uuid': e.uuid,
  620. 'parentUuid': e.parent_directory.uuid,
  621. 'originalName': escape(e.name)
  622. } for e in entities['documents']
  623. ],
  624. 'count': len(entities['documents']),
  625. 'status': 0
  626. }
  627. return JsonResponse(response)
  628. else:
  629. if has_navigator(request.user):
  630. return metadata_search_entities_interactive(request)
  631. else:
  632. return JsonResponse({'status': 1, 'message': _('Navigator not enabled')})
  633. def _is_import_valid(documents):
  634. """
  635. Validates the JSON file to be imported for schema correctness
  636. :param documents: object loaded from JSON file
  637. :return: True if schema seems valid, False otherwise
  638. """
  639. return isinstance(documents, list) and \
  640. all(isinstance(d, dict) for d in documents) and \
  641. all(all(k in d for k in ('pk', 'model', 'fields')) for d in documents) and \
  642. all(all(k in d['fields'] for k in ('uuid', 'owner')) for d in documents)
  643. def _get_dependencies(documents, deps_mode=True):
  644. """
  645. Given a list of Document2 objects, perform a depth-first search and return a set of documents with all
  646. dependencies (excluding history docs) included
  647. :param doc_set: set of Document2 objects to include
  648. :param deps_mode: traverse dependencies relationship, otherwise traverse children relationship
  649. """
  650. doc_set = set()
  651. for doc in documents:
  652. stack = [doc]
  653. while stack:
  654. curr_doc = stack.pop()
  655. if curr_doc not in doc_set and not curr_doc.is_history:
  656. doc_set.add(curr_doc)
  657. if deps_mode:
  658. deps_set = set(curr_doc.dependencies.all())
  659. else:
  660. deps_set = set(curr_doc.children.all())
  661. stack.extend(deps_set - doc_set)
  662. return doc_set
  663. def _copy_document_with_owner(doc, owner, uuids_map):
  664. home_dir = Directory.objects.get_home_directory(owner)
  665. doc['fields']['owner'] = [owner.username]
  666. doc['pk'] = None
  667. doc['fields']['version'] = 1
  668. # Retrieve from the import_uuids_map if it's already been reassigned, or assign a new UUID and map it
  669. old_uuid = doc['fields']['uuid']
  670. if uuids_map[old_uuid] is None:
  671. uuids_map[old_uuid] = uuid_default()
  672. doc['fields']['uuid'] = uuids_map[old_uuid]
  673. # Update UUID in data if needed
  674. if 'data' in doc['fields']:
  675. data = json.loads(doc['fields']['data'])
  676. if 'uuid' in data:
  677. data['uuid'] = uuids_map[old_uuid]
  678. doc['fields']['data'] = json.dumps(data)
  679. # Remap parent directory if needed
  680. parent_uuid = None
  681. if doc['fields'].get('parent_directory'):
  682. parent_uuid = doc['fields']['parent_directory'][0]
  683. if parent_uuid is not None and parent_uuid in uuids_map.keys():
  684. if uuids_map[parent_uuid] is None:
  685. uuids_map[parent_uuid] = uuid_default()
  686. doc['fields']['parent_directory'] = [uuids_map[parent_uuid], 1, False]
  687. else:
  688. if parent_uuid is not None:
  689. LOG.warn('Could not find parent directory with UUID: %s in JSON import, will set parent to home directory' %
  690. parent_uuid)
  691. doc['fields']['parent_directory'] = [home_dir.uuid, home_dir.version, home_dir.is_history]
  692. # Remap dependencies if needed
  693. idx = 0
  694. for dep_uuid, dep_version, dep_is_history in doc['fields']['dependencies']:
  695. if dep_uuid not in uuids_map.keys():
  696. LOG.warn('Could not find dependency UUID: %s in JSON import, may cause integrity errors if not found.' % dep_uuid)
  697. else:
  698. if uuids_map[dep_uuid] is None:
  699. uuids_map[dep_uuid] = uuid_default()
  700. doc['fields']['dependencies'][idx][0] = uuids_map[dep_uuid]
  701. idx += 1
  702. return doc
  703. def _create_or_update_document_with_owner(doc, owner, uuids_map):
  704. home_dir = Directory.objects.get_home_directory(owner)
  705. create_new = False
  706. try:
  707. owned_docs = Document2.objects.filter(uuid=doc['fields']['uuid'], owner=owner).order_by('-last_modified')
  708. if owned_docs.exists():
  709. existing_doc = owned_docs[0]
  710. doc['pk'] = existing_doc.pk
  711. else:
  712. create_new = True
  713. except FilesystemException, e:
  714. create_new = True
  715. if create_new:
  716. LOG.warn('Could not find document with UUID: %s, will create a new document on import.', doc['fields']['uuid'])
  717. doc['pk'] = None
  718. doc['fields']['version'] = 1
  719. # Verify that parent exists, log warning and set parent to user's home directory if not found
  720. if doc['fields']['parent_directory']:
  721. uuid, version, is_history = doc['fields']['parent_directory']
  722. if uuid not in uuids_map.keys() and \
  723. not Document2.objects.filter(uuid=uuid, version=version, is_history=is_history).exists():
  724. LOG.warn('Could not find parent document with UUID: %s, will set parent to home directory' % uuid)
  725. doc['fields']['parent_directory'] = [home_dir.uuid, home_dir.version, home_dir.is_history]
  726. # Verify that dependencies exist, raise critical error if any dependency not found
  727. # Ignore history dependencies
  728. if doc['fields']['dependencies']:
  729. history_deps_list = []
  730. for index, (uuid, version, is_history) in enumerate(doc['fields']['dependencies']):
  731. if not uuid in uuids_map.keys() and not is_history and \
  732. not Document2.objects.filter(uuid=uuid, version=version).exists():
  733. raise PopupException(_('Cannot import document, dependency with UUID: %s not found.') % uuid)
  734. elif is_history:
  735. history_deps_list.insert(0, index) # Insert in decreasing order to facilitate delete
  736. LOG.warn('History dependency with UUID: %s ignored while importing document %s' % (uuid, doc['fields']['name']))
  737. # Delete history dependencies not found in the DB
  738. for index in history_deps_list:
  739. del doc['fields']['dependencies'][index]
  740. return doc
  741. def _filter_documents(request, queryset, flatten=True):
  742. """
  743. Given optional querystring params extracted from the request, filter the given queryset of documents and return a
  744. dictionary with the refined queryset and filter params
  745. :param request: request object with params
  746. :param queryset: Document2 queryset
  747. :param flatten: Return all results in a flat list if true, otherwise roll up to common directory
  748. """
  749. type_filters = request.GET.getlist('type', None)
  750. sort = request.GET.get('sort', '-last_modified')
  751. search_text = request.GET.get('text', None)
  752. return __filter_documents(type_filters, sort, search_text, queryset, flatten)
  753. def __filter_documents(type_filters, sort, search_text, queryset, flatten=True):
  754. documents = queryset.search_documents(
  755. types=type_filters,
  756. search_text=search_text,
  757. order_by=sort)
  758. # Roll up documents to common directory
  759. if not flatten:
  760. documents = documents.exclude(parent_directory__in=documents)
  761. count = documents.count()
  762. return {
  763. 'documents': documents,
  764. 'count': count,
  765. 'types': type_filters,
  766. 'text': search_text,
  767. 'sort': sort
  768. }
  769. def _paginate(request, queryset):
  770. """
  771. Given optional querystring params extracted from the request, slice the given queryset of documents for the given page
  772. and limit, and return the updated queryset along with pagination params used.
  773. :param request: request object with params
  774. :param queryset: queryset
  775. """
  776. page = int(request.GET.get('page', 1))
  777. limit = int(request.GET.get('limit', 0))
  778. return __paginate(page, limit, queryset)
  779. def __paginate(page, limit, queryset):
  780. if limit > 0:
  781. offset = (page - 1) * limit
  782. last = offset + limit
  783. queryset = queryset.all()[offset:last]
  784. return {
  785. 'documents': queryset,
  786. 'page': page,
  787. 'limit': limit
  788. }