api3.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. #!/usr/bin/env python
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. from future import standard_library
  18. standard_library.install_aliases()
  19. from builtins import oct, zip
  20. from past.builtins import basestring
  21. import json
  22. import logging
  23. import urllib.request, urllib.error
  24. import sys
  25. from django.urls import reverse
  26. from django.utils.translation import ugettext as _
  27. from django.views.decorators.http import require_POST
  28. LOG = logging.getLogger(__name__)
  29. try:
  30. from simple_salesforce.api import Salesforce
  31. from simple_salesforce.exceptions import SalesforceRefusedRequest
  32. except ImportError:
  33. LOG.warn('simple_salesforce module not found')
  34. from desktop.lib import django_mako
  35. from desktop.lib.django_util import JsonResponse
  36. from desktop.lib.exceptions_renderable import PopupException
  37. from desktop.lib.i18n import smart_unicode
  38. from desktop.lib.python_util import check_encoding
  39. from desktop.models import Document2
  40. from kafka.kafka_api import get_topics
  41. from metadata.manager_client import ManagerApi
  42. from notebook.connectors.base import get_api, Notebook
  43. from notebook.decorators import api_error_handler
  44. from notebook.models import make_notebook, MockedDjangoRequest, escape_rows
  45. from indexer.controller import CollectionManagerController
  46. from indexer.file_format import HiveFormat
  47. from indexer.fields import Field
  48. from indexer.indexers.envelope import EnvelopeIndexer
  49. from indexer.indexers.base import get_api
  50. from indexer.indexers.flink_sql import FlinkIndexer
  51. from indexer.indexers.morphline import MorphlineIndexer
  52. from indexer.indexers.rdbms import run_sqoop, _get_api
  53. from indexer.indexers.sql import SQLIndexer, _create_database
  54. from indexer.models import _save_pipeline
  55. from indexer.solr_client import SolrClient, MAX_UPLOAD_SIZE
  56. from indexer.indexers.flume import FlumeIndexer
  57. if sys.version_info[0] > 2:
  58. from io import StringIO as string_io
  59. from urllib.parse import urlparse, unquote as urllib_unquote
  60. else:
  61. from StringIO import StringIO as string_io
  62. from urllib import unquote as urllib_unquote
  63. from urlparse import urlparse
  64. try:
  65. from beeswax.server import dbms
  66. except ImportError as e:
  67. LOG.warn('Hive and HiveServer2 interfaces are not enabled')
  68. try:
  69. from filebrowser.views import detect_parquet
  70. except ImportError as e:
  71. LOG.warn('File Browser interface is not enabled')
  72. try:
  73. from search.conf import SOLR_URL
  74. except ImportError as e:
  75. LOG.warn('Solr Search interface is not enabled')
  76. def _escape_white_space_characters(s, inverse=False):
  77. MAPPINGS = {
  78. "\n": "\\n",
  79. "\t": "\\t",
  80. "\r": "\\r",
  81. " ": "\\s"
  82. }
  83. to = 1 if inverse else 0
  84. from_ = 0 if inverse else 1
  85. for pair in MAPPINGS.items():
  86. if sys.version_info[0] > 2:
  87. s = s.replace(pair[to], pair[from_])
  88. else:
  89. s = s.replace(pair[to], pair[from_]).encode('utf-8')
  90. return s
  91. def _convert_format(format_dict, inverse=False):
  92. for field in format_dict:
  93. if isinstance(format_dict[field], basestring):
  94. format_dict[field] = _escape_white_space_characters(format_dict[field], inverse)
  95. @api_error_handler
  96. def guess_format(request):
  97. file_format = json.loads(request.POST.get('fileFormat', '{}'))
  98. if file_format['inputFormat'] == 'file':
  99. path = urllib_unquote(file_format["path"])
  100. indexer = MorphlineIndexer(request.user, request.fs)
  101. if not request.fs.isfile(path):
  102. raise PopupException(_('Path %(path)s is not a file') % file_format)
  103. stream = request.fs.open(path)
  104. format_ = indexer.guess_format({
  105. "file": {
  106. "stream": stream,
  107. "name": path
  108. }
  109. })
  110. _convert_format(format_)
  111. elif file_format['inputFormat'] == 'table':
  112. db = dbms.get(request.user)
  113. try:
  114. table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
  115. except Exception as e:
  116. raise PopupException(e.message if hasattr(e, 'message') and e.message else e)
  117. storage = {}
  118. for delim in table_metadata.storage_details:
  119. if delim['data_type']:
  120. if '=' in delim['data_type']:
  121. key, val = delim['data_type'].split('=', 1)
  122. storage[key] = val
  123. else:
  124. storage[delim['data_type']] = delim['comment']
  125. if table_metadata.details['properties']['format'] == 'text':
  126. format_ = {
  127. "quoteChar": "\"",
  128. "recordSeparator": '\\n',
  129. "type": "csv",
  130. "hasHeader": False,
  131. "fieldSeparator": storage.get('field.delim', ',')
  132. }
  133. elif table_metadata.details['properties']['format'] == 'parquet':
  134. format_ = {"type": "parquet", "hasHeader": False,}
  135. else:
  136. raise PopupException('Hive table format %s is not supported.' % table_metadata.details['properties']['format'])
  137. elif file_format['inputFormat'] == 'query':
  138. format_ = {"quoteChar": "\"", "recordSeparator": "\\n", "type": "csv", "hasHeader": False, "fieldSeparator": "\u0001"}
  139. elif file_format['inputFormat'] == 'rdbms':
  140. format_ = {"type": "csv"}
  141. elif file_format['inputFormat'] == 'stream':
  142. if file_format['streamSelection'] == 'kafka':
  143. format_ = {
  144. "type": "csv",
  145. "fieldSeparator": ",",
  146. "hasHeader": True,
  147. "quoteChar": "\"",
  148. "recordSeparator": "\\n",
  149. 'topics': get_topics()
  150. }
  151. elif file_format['streamSelection'] == 'flume':
  152. format_ = {"type": "csv", "fieldSeparator": ",", "hasHeader": True, "quoteChar": "\"", "recordSeparator": "\\n"}
  153. elif file_format['inputFormat'] == 'connector':
  154. if file_format['connectorSelection'] == 'sfdc':
  155. sf = Salesforce(
  156. username=file_format['streamUsername'],
  157. password=file_format['streamPassword'],
  158. security_token=file_format['streamToken']
  159. )
  160. format_ = {
  161. "type": "csv",
  162. "fieldSeparator": ",",
  163. "hasHeader": True,
  164. "quoteChar": "\"",
  165. "recordSeparator": "\\n",
  166. 'objects': [sobject['name'] for sobject in sf.restful('sobjects/')['sobjects'] if sobject['queryable']]
  167. }
  168. else:
  169. raise PopupException(_('Input format %(inputFormat)s connector not recognized: $(connectorSelection)s') % file_format)
  170. else:
  171. raise PopupException(_('Input format not recognized: %(inputFormat)s') % file_format)
  172. format_['status'] = 0
  173. return JsonResponse(format_)
  174. def guess_field_types(request):
  175. file_format = json.loads(request.POST.get('fileFormat', '{}'))
  176. if file_format['inputFormat'] == 'file':
  177. indexer = MorphlineIndexer(request.user, request.fs)
  178. path = urllib_unquote(file_format["path"])
  179. stream = request.fs.open(path)
  180. encoding = check_encoding(stream.read(10000))
  181. stream.seek(0)
  182. _convert_format(file_format["format"], inverse=True)
  183. format_ = indexer.guess_field_types({
  184. "file": {
  185. "stream": stream,
  186. "name": path
  187. },
  188. "format": file_format['format']
  189. })
  190. # Note: Would also need to set charset to table (only supported in Hive)
  191. if 'sample' in format_ and format_['sample']:
  192. format_['sample'] = escape_rows(format_['sample'], nulls_only=True, encoding=encoding)
  193. for col in format_['columns']:
  194. col['name'] = smart_unicode(col['name'], errors='replace', encoding=encoding)
  195. elif file_format['inputFormat'] == 'table':
  196. sample = get_api(
  197. request, {'type': 'hive'}).get_sample_data({'type': 'hive'}, database=file_format['databaseName'], table=file_format['tableName']
  198. )
  199. db = dbms.get(request.user)
  200. table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
  201. format_ = {
  202. "sample": sample['rows'][:4],
  203. "columns": [
  204. Field(col.name, HiveFormat.FIELD_TYPE_TRANSLATE.get(col.type, 'string')).to_dict()
  205. for col in table_metadata.cols
  206. ]
  207. }
  208. elif file_format['inputFormat'] == 'query':
  209. query_id = file_format['query']['id'] if file_format['query'].get('id') else file_format['query']
  210. notebook = Notebook(document=Document2.objects.document(user=request.user, doc_id=query_id)).get_data()
  211. snippet = notebook['snippets'][0]
  212. db = get_api(request, snippet)
  213. if file_format.get('sampleCols'):
  214. columns = file_format.get('sampleCols')
  215. sample = file_format.get('sample')
  216. else:
  217. snippet['query'] = snippet['statement']
  218. try:
  219. sample = db.fetch_result(notebook, snippet, 4, start_over=True)['rows'][:4]
  220. except Exception as e:
  221. LOG.warn('Skipping sample data as query handle might be expired: %s' % e)
  222. sample = [[], [], [], [], []]
  223. columns = db.autocomplete(snippet=snippet, database='', table='')
  224. columns = [
  225. Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')).to_dict()
  226. for col in columns['extended_columns']
  227. ]
  228. format_ = {
  229. "sample": sample,
  230. "columns": columns,
  231. }
  232. elif file_format['inputFormat'] == 'rdbms':
  233. api = _get_api(request)
  234. sample = api.get_sample_data(None, database=file_format['rdbmsDatabaseName'], table=file_format['tableName'])
  235. format_ = {
  236. "sample": list(sample['rows'])[:4],
  237. "columns": [
  238. Field(col['name'], col['type']).to_dict()
  239. for col in sample['full_headers']
  240. ]
  241. }
  242. elif file_format['inputFormat'] == 'stream':
  243. if file_format['streamSelection'] == 'kafka':
  244. if file_format.get('kafkaSelectedTopics') == 'user_behavior':
  245. kafkaFieldNames = [
  246. 'user_id',
  247. 'item_id',
  248. 'category_id',
  249. 'behavior',
  250. 'ts'
  251. ]
  252. kafkaFieldTypes = ['BIGINT'] * len(kafkaFieldNames)
  253. kafkaFieldNames.append('proctime')
  254. kafkaFieldTypes.append('TIMESTAMP')
  255. kafkaFieldNames.append('WATERMARK')
  256. kafkaFieldTypes.append('WATERMARK')
  257. else:
  258. # Note: mocked here, should come from SFDC or Kafka API or sampling job
  259. kafkaFieldNames = file_format.get('kafkaFieldNames', '').split(',')
  260. kafkaFieldTypes = file_format.get('kafkaFieldTypes', '').split(',')
  261. data = """%(kafkaFieldNames)s
  262. %(data)s""" % {
  263. 'kafkaFieldNames': ','.join(kafkaFieldNames),
  264. 'data': '\n'.join([','.join(['...'] * len(kafkaFieldTypes))] * 5)
  265. }
  266. stream = string_io()
  267. stream.write(data)
  268. _convert_format(file_format["format"], inverse=True)
  269. indexer = MorphlineIndexer(request.user, request.fs)
  270. format_ = indexer.guess_field_types({
  271. "file": {
  272. "stream": stream,
  273. "name": file_format['path']
  274. },
  275. "format": file_format['format']
  276. })
  277. type_mapping = dict(
  278. list(
  279. zip(kafkaFieldNames, kafkaFieldTypes)
  280. )
  281. )
  282. for col in format_['columns']:
  283. col['keyType'] = type_mapping[col['name']]
  284. col['type'] = type_mapping[col['name']]
  285. elif file_format['streamSelection'] == 'flume':
  286. if 'hue-httpd/access_log' in file_format['channelSourcePath']:
  287. columns = [
  288. {'name': 'id', 'type': 'string', 'unique': True},
  289. {'name': 'client_ip', 'type': 'string'},
  290. {'name': 'time', 'type': 'date'},
  291. {'name': 'request', 'type': 'string'},
  292. {'name': 'code', 'type': 'plong'},
  293. {'name': 'bytes', 'type': 'plong'},
  294. {'name': 'method', 'type': 'string'},
  295. {'name': 'url', 'type': 'string'},
  296. {'name': 'protocol', 'type': 'string'},
  297. {'name': 'app', 'type': 'string'},
  298. {'name': 'subapp', 'type': 'string'}
  299. ]
  300. else:
  301. columns = [{'name': 'message', 'type': 'string'}]
  302. format_ = {
  303. "sample": [['...'] * len(columns)] * 4,
  304. "columns": [
  305. Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string'), unique=col.get('unique')).to_dict()
  306. for col in columns
  307. ]
  308. }
  309. elif file_format['inputFormat'] == 'connector':
  310. if file_format['connectorSelection'] == 'sfdc':
  311. sf = Salesforce(
  312. username=file_format['streamUsername'],
  313. password=file_format['streamPassword'],
  314. security_token=file_format['streamToken']
  315. )
  316. table_metadata = [{
  317. 'name': column['name'],
  318. 'type': column['type']
  319. } for column in sf.restful('sobjects/%(streamObject)s/describe/' % file_format)['fields']
  320. ]
  321. query = 'SELECT %s FROM %s LIMIT 4' % (', '.join([col['name'] for col in table_metadata]), file_format['streamObject'])
  322. print(query)
  323. try:
  324. records = sf.query_all(query)
  325. except SalesforceRefusedRequest as e:
  326. raise PopupException(message=str(e))
  327. format_ = {
  328. "sample": [list(row.values())[1:] for row in records['records']],
  329. "columns": [
  330. Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')).to_dict()
  331. for col in table_metadata
  332. ]
  333. }
  334. else:
  335. raise PopupException(_('Connector format not recognized: %(connectorSelection)s') % file_format)
  336. else:
  337. raise PopupException(_('Input format not recognized: %(inputFormat)s') % file_format)
  338. return JsonResponse(format_)
  339. @api_error_handler
  340. def importer_submit(request):
  341. source = json.loads(request.POST.get('source', '{}'))
  342. outputFormat = json.loads(request.POST.get('destination', '{}'))['outputFormat']
  343. destination = json.loads(request.POST.get('destination', '{}'))
  344. destination['ouputFormat'] = outputFormat # Workaround a very weird bug
  345. start_time = json.loads(request.POST.get('start_time', '-1'))
  346. if source['inputFormat'] == 'file':
  347. if source['path']:
  348. path = urllib_unquote(source['path'])
  349. source['path'] = request.fs.netnormpath(path)
  350. if destination['ouputFormat'] in ('database', 'table'):
  351. destination['nonDefaultLocation'] = request.fs.netnormpath(destination['nonDefaultLocation']) \
  352. if destination['nonDefaultLocation'] else destination['nonDefaultLocation']
  353. if destination['ouputFormat'] == 'index':
  354. source['columns'] = destination['columns']
  355. index_name = destination["name"]
  356. if destination['indexerRunJob'] or source['inputFormat'] == 'stream':
  357. _convert_format(source["format"], inverse=True)
  358. job_handle = _large_indexing(
  359. request,
  360. source,
  361. index_name,
  362. start_time=start_time,
  363. lib_path=destination['indexerJobLibPath'],
  364. destination=destination
  365. )
  366. else:
  367. client = SolrClient(request.user)
  368. job_handle = _small_indexing(
  369. request.user,
  370. request.fs,
  371. client,
  372. source,
  373. destination, index_name
  374. )
  375. elif source['inputFormat'] in ('stream', 'connector') or destination['ouputFormat'] == 'stream':
  376. args = {
  377. 'source': source,
  378. 'destination': destination,
  379. 'start_time': start_time,
  380. 'dry_run': request.POST.get('show_command')
  381. }
  382. api = FlinkIndexer(
  383. request.user,
  384. request.fs
  385. )
  386. job_handle = api.create_table_from_kafka(**args)
  387. if request.POST.get('show_command'):
  388. job_handle = {
  389. 'status': 0,
  390. 'commands': job_handle
  391. }
  392. elif source['inputFormat'] == 'altus':
  393. # BDR copy or DistCP + DDL + Sentry DDL copy
  394. pass
  395. elif source['inputFormat'] == 'rdbms':
  396. if destination['outputFormat'] in ('database', 'file', 'table', 'hbase'):
  397. job_handle = run_sqoop(
  398. request,
  399. source,
  400. destination,
  401. start_time
  402. )
  403. elif destination['ouputFormat'] == 'database':
  404. job_handle = _create_database(
  405. request,
  406. source,
  407. destination,
  408. start_time
  409. )
  410. else:
  411. job_handle = _create_table(
  412. request,
  413. source,
  414. destination,
  415. start_time
  416. )
  417. request.audit = {
  418. 'operation': 'EXPORT',
  419. 'operationText': 'User %(username)s exported %(inputFormat)s to %(ouputFormat)s: %(name)s' % {
  420. 'username': request.user.username,
  421. 'inputFormat': source['inputFormat'],
  422. 'ouputFormat': destination['ouputFormat'],
  423. 'name': destination['name'],
  424. },
  425. 'allowed': True
  426. }
  427. return JsonResponse(job_handle)
  428. @require_POST
  429. @api_error_handler
  430. def index(request):
  431. '''
  432. Input: pasted data, CSV/json files, Kafka topic
  433. Output: tables
  434. '''
  435. source = json.loads(request.POST.get('source', '{}'))
  436. destination = json.loads(request.POST.get('destination', '{}'))
  437. options = json.loads(request.POST.get('options', '{}'))
  438. connector_id = request.POST.get('connector')
  439. api = get_api(request.user, connector_id)
  440. if request.FILES.get('data'):
  441. source['file'] = request.FILES['data']
  442. result = api.index(source, destination, options)
  443. return JsonResponse({'result': result})
  444. def _small_indexing(user, fs, client, source, destination, index_name):
  445. kwargs = {}
  446. errors = []
  447. if source['inputFormat'] not in ('manual', 'table', 'query_handle'):
  448. path = urllib_unquote(source["path"])
  449. stats = fs.stats(path)
  450. if stats.size > MAX_UPLOAD_SIZE:
  451. raise PopupException(_('File size is too large to handle!'))
  452. indexer = MorphlineIndexer(user, fs)
  453. fields = indexer.get_field_list(destination['columns'])
  454. _create_solr_collection(user, fs, client, destination, index_name, kwargs)
  455. if source['inputFormat'] == 'file':
  456. path = urllib_unquote(source["path"])
  457. data = fs.read(path, 0, MAX_UPLOAD_SIZE)
  458. if client.is_solr_six_or_more():
  459. kwargs['processor'] = 'tolerant'
  460. kwargs['map'] = 'NULL:'
  461. try:
  462. if source['inputFormat'] == 'query':
  463. query_id = source['query']['id'] if source['query'].get('id') else source['query']
  464. notebook = Notebook(document=Document2.objects.document(user=user, doc_id=query_id)).get_data()
  465. request = MockedDjangoRequest(user=user)
  466. snippet = notebook['snippets'][0]
  467. searcher = CollectionManagerController(user)
  468. columns = [field['name'] for field in fields if field['name'] != 'hue_id']
  469. # Assumes handle still live
  470. fetch_handle = lambda rows, start_over: get_api(
  471. request, snippet
  472. ).fetch_result(
  473. notebook,
  474. snippet,
  475. rows=rows,
  476. start_over=start_over
  477. )
  478. rows = searcher.update_data_from_hive(
  479. index_name,
  480. columns,
  481. fetch_handle=fetch_handle,
  482. indexing_options=kwargs
  483. )
  484. # TODO if rows == MAX_ROWS truncation warning
  485. elif source['inputFormat'] == 'manual':
  486. pass # No need to do anything
  487. else:
  488. response = client.index(name=index_name, data=data, **kwargs)
  489. errors = [error.get('message', '') for error in response['responseHeader'].get('errors', [])]
  490. except Exception as e:
  491. try:
  492. client.delete_index(index_name, keep_config=False)
  493. except Exception as e2:
  494. LOG.warn('Error while cleaning-up config of failed collection creation %s: %s' % (index_name, e2))
  495. raise e
  496. return {
  497. 'status': 0,
  498. 'on_success_url': reverse('indexer:indexes',
  499. kwargs={'index': index_name}),
  500. 'pub_sub_url': 'assist.collections.refresh',
  501. 'errors': errors
  502. }
  503. def _large_indexing(request, file_format, collection_name, query=None, start_time=None, lib_path=None, destination=None):
  504. indexer = MorphlineIndexer(request.user, request.fs)
  505. unique_field = indexer.get_unique_field(file_format)
  506. is_unique_generated = indexer.is_unique_generated(file_format)
  507. schema_fields = indexer.get_kept_field_list(file_format['columns'])
  508. if is_unique_generated:
  509. schema_fields += [{"name": unique_field, "type": "string"}]
  510. client = SolrClient(user=request.user)
  511. if not client.exists(collection_name) and not request.POST.get('show_command'): # if destination['isTargetExisting']:
  512. client.create_index(
  513. name=collection_name,
  514. fields=request.POST.get('fields', schema_fields),
  515. unique_key_field=unique_field
  516. # No df currently
  517. )
  518. else:
  519. # TODO: check if format matches
  520. pass
  521. if file_format['inputFormat'] == 'table':
  522. db = dbms.get(request.user)
  523. table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
  524. input_path = table_metadata.path_location
  525. elif file_format['inputFormat'] == 'stream' and file_format['streamSelection'] == 'flume':
  526. indexer = FlumeIndexer(user=request.user)
  527. if request.POST.get('show_command'):
  528. configs = indexer.generate_config(file_format, destination)
  529. return {'status': 0, 'commands': configs[-1]}
  530. else:
  531. return indexer.start(collection_name, file_format, destination)
  532. elif file_format['inputFormat'] == 'stream':
  533. return _envelope_job(request, file_format, destination, start_time=start_time, lib_path=lib_path)
  534. elif file_format['inputFormat'] == 'file':
  535. input_path = '${nameNode}%s' % urllib_unquote(file_format["path"])
  536. else:
  537. input_path = None
  538. morphline = indexer.generate_morphline_config(collection_name, file_format, unique_field, lib_path=lib_path)
  539. return indexer.run_morphline(
  540. request,
  541. collection_name,
  542. morphline,
  543. input_path,
  544. query,
  545. start_time=start_time,
  546. lib_path=lib_path
  547. )
  548. def _envelope_job(request, file_format, destination, start_time=None, lib_path=None):
  549. collection_name = destination['name']
  550. indexer = EnvelopeIndexer(request.user, request.fs)
  551. lib_path = None # Todo optional input field
  552. input_path = None
  553. if file_format['inputFormat'] == 'table':
  554. db = dbms.get(request.user)
  555. table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
  556. input_path = table_metadata.path_location
  557. elif file_format['inputFormat'] == 'file':
  558. input_path = file_format["path"]
  559. properties = {
  560. 'input_path': input_path,
  561. 'format': 'csv'
  562. }
  563. elif file_format['inputFormat'] == 'stream' and file_format['streamSelection'] == 'flume':
  564. pass
  565. elif file_format['inputFormat'] == 'stream':
  566. if file_format['streamSelection'] == 'kafka':
  567. manager = ManagerApi()
  568. properties = {
  569. "brokers": manager.get_kafka_brokers(),
  570. "topics": file_format['kafkaSelectedTopics'],
  571. "kafkaFieldType": file_format['kafkaFieldType'],
  572. "kafkaFieldDelimiter": file_format['kafkaFieldDelimiter'],
  573. }
  574. if file_format.get('kafkaSelectedTopics') == 'NavigatorAuditEvents':
  575. schema_fields = MorphlineIndexer.get_kept_field_list(file_format['sampleCols'])
  576. properties.update({
  577. "kafkaFieldNames": ', '.join([_field['name'] for _field in schema_fields]),
  578. "kafkaFieldTypes": ', '.join([_field['type'] for _field in schema_fields])
  579. })
  580. else:
  581. properties.update({
  582. "kafkaFieldNames": file_format['kafkaFieldNames'],
  583. "kafkaFieldTypes": file_format['kafkaFieldTypes']
  584. })
  585. if True:
  586. properties['window'] = ''
  587. else: # For "KafkaSQL"
  588. properties['window'] = '''
  589. window {
  590. enabled = true
  591. milliseconds = 60000
  592. }'''
  593. elif file_format['inputFormat'] == 'connector':
  594. if file_format['streamSelection'] == 'flume':
  595. properties = {
  596. 'streamSelection': file_format['streamSelection'],
  597. 'channelSourceHosts': file_format['channelSourceHosts'],
  598. 'channelSourceSelectedHosts': file_format['channelSourceSelectedHosts'],
  599. 'channelSourcePath': file_format['channelSourcePath'],
  600. }
  601. else:
  602. # sfdc
  603. properties = {
  604. 'streamSelection': file_format['streamSelection'],
  605. 'streamUsername': file_format['streamUsername'],
  606. 'streamPassword': file_format['streamPassword'],
  607. 'streamToken': file_format['streamToken'],
  608. 'streamEndpointUrl': file_format['streamEndpointUrl'],
  609. 'streamObject': file_format['streamObject'],
  610. }
  611. if destination['outputFormat'] == 'table':
  612. if destination['isTargetExisting']: # Todo: check if format matches
  613. pass
  614. else:
  615. destination['importData'] = False # Avoid LOAD DATA
  616. if destination['tableFormat'] == 'kudu':
  617. properties['kafkaFieldNames'] = properties['kafkaFieldNames'].lower() # Kudu names should be all lowercase
  618. # Create table
  619. if not request.POST.get('show_command'):
  620. SQLIndexer(
  621. user=request.user,
  622. fs=request.fs
  623. ).create_table_from_a_file(
  624. file_format,
  625. destination
  626. ).execute(request)
  627. if destination['tableFormat'] == 'kudu':
  628. manager = ManagerApi()
  629. properties["output_table"] = "impala::%s" % collection_name
  630. properties["kudu_master"] = manager.get_kudu_master()
  631. else:
  632. properties['output_table'] = collection_name
  633. elif destination['outputFormat'] == 'stream':
  634. manager = ManagerApi()
  635. properties['brokers'] = manager.get_kafka_brokers()
  636. properties['topics'] = file_format['kafkaSelectedTopics']
  637. properties['kafkaFieldDelimiter'] = file_format['kafkaFieldDelimiter']
  638. elif destination['outputFormat'] == 'file':
  639. properties['path'] = file_format["path"]
  640. if file_format['inputFormat'] == 'stream':
  641. properties['format'] = 'csv'
  642. else:
  643. properties['format'] = file_format['tableFormat'] # or csv
  644. elif destination['outputFormat'] == 'index':
  645. properties['collectionName'] = collection_name
  646. properties['connection'] = SOLR_URL.get()
  647. properties["app_name"] = 'Data Ingest'
  648. properties["inputFormat"] = file_format['inputFormat']
  649. properties["ouputFormat"] = destination['ouputFormat']
  650. properties["streamSelection"] = file_format["streamSelection"]
  651. configs = indexer.generate_config(properties)
  652. if request.POST.get('show_command'):
  653. return {'status': 0, 'commands': configs['envelope.conf']}
  654. else:
  655. return indexer.run(request, collection_name, configs, input_path, start_time=start_time, lib_path=lib_path)
  656. def _create_solr_collection(user, fs, client, destination, index_name, kwargs):
  657. unique_key_field = destination['indexerPrimaryKey'] and destination['indexerPrimaryKey'][0] or None
  658. df = destination['indexerDefaultField'] and destination['indexerDefaultField'][0] or None
  659. indexer = MorphlineIndexer(user, fs)
  660. fields = indexer.get_field_list(destination['columns'])
  661. skip_fields = [field['name'] for field in fields if not field['keep']]
  662. kwargs['fieldnames'] = ','.join([field['name'] for field in fields])
  663. for field in fields:
  664. for operation in field['operations']:
  665. if operation['type'] == 'split':
  666. field['multiValued'] = True # Solr requires multiValued to be set when splitting
  667. kwargs['f.%(name)s.split' % field] = 'true'
  668. kwargs['f.%(name)s.separator' % field] = operation['settings']['splitChar'] or ','
  669. if skip_fields:
  670. kwargs['skip'] = ','.join(skip_fields)
  671. fields = [field for field in fields if field['name'] not in skip_fields]
  672. if not unique_key_field:
  673. unique_key_field = 'hue_id'
  674. fields += [{"name": unique_key_field, "type": "string"}]
  675. kwargs['rowid'] = unique_key_field
  676. if not destination['hasHeader']:
  677. kwargs['header'] = 'false'
  678. else:
  679. kwargs['skipLines'] = 1
  680. if not client.exists(index_name):
  681. client.create_index(
  682. name=index_name,
  683. config_name=destination.get('indexerConfigSet'),
  684. fields=fields,
  685. unique_key_field=unique_key_field,
  686. df=df,
  687. shards=destination['indexerNumShards'],
  688. replication=destination['indexerReplicationFactor']
  689. )
  690. @api_error_handler
  691. @require_POST
  692. # @check_document_modify_permission()
  693. def save_pipeline(request):
  694. response = {'status': -1}
  695. notebook = json.loads(request.POST.get('notebook', '{}'))
  696. notebook_doc, save_as = _save_pipeline(notebook, request.user)
  697. response['status'] = 0
  698. response['save_as'] = save_as
  699. response.update(notebook_doc.to_dict())
  700. response['message'] = request.POST.get('editorMode') == 'true' and _('Query saved successfully') or _('Notebook saved successfully')
  701. return JsonResponse(response)