api3.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. #!/usr/bin/env python
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. import json
  18. import logging
  19. from django.core.urlresolvers import reverse
  20. from django.utils.translation import ugettext as _
  21. from desktop.lib import django_mako
  22. from desktop.lib.django_util import JsonResponse
  23. from desktop.lib.exceptions_renderable import PopupException
  24. from desktop.models import Document2
  25. from notebook.connectors.base import get_api, Notebook
  26. from indexer.controller import CollectionManagerController
  27. from indexer.file_format import HiveFormat
  28. from indexer.fields import Field
  29. from indexer.smart_indexer import Indexer
  30. from notebook.models import make_notebook
  31. LOG = logging.getLogger(__name__)
  32. try:
  33. from beeswax.server import dbms
  34. except ImportError, e:
  35. LOG.warn('Hive and HiveServer2 interfaces are not enabled')
  36. def _escape_white_space_characters(s, inverse = False):
  37. MAPPINGS = {
  38. "\n": "\\n",
  39. "\t": "\\t",
  40. "\r": "\\r",
  41. " ": "\\s"
  42. }
  43. to = 1 if inverse else 0
  44. from_ = 0 if inverse else 1
  45. for pair in MAPPINGS.iteritems():
  46. s = s.replace(pair[to], pair[from_]).encode('utf-8')
  47. return s
  48. def _convert_format(format_dict, inverse=False):
  49. for field in format_dict:
  50. if isinstance(format_dict[field], basestring):
  51. format_dict[field] = _escape_white_space_characters(format_dict[field], inverse)
  52. def guess_format(request):
  53. file_format = json.loads(request.POST.get('fileFormat', '{}'))
  54. if file_format['inputFormat'] == 'file':
  55. indexer = Indexer(request.user, request.fs)
  56. stream = request.fs.open(file_format["path"])
  57. format_ = indexer.guess_format({
  58. "file": {
  59. "stream": stream,
  60. "name": file_format['path']
  61. }
  62. })
  63. _convert_format(format_)
  64. elif file_format['inputFormat'] == 'table':
  65. db = dbms.get(request.user)
  66. table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
  67. storage = dict([(delim['data_type'], delim['comment']) for delim in table_metadata.storage_details])
  68. if table_metadata.details['properties']['format'] == 'text':
  69. format_ = {"quoteChar": "\"", "recordSeparator": '\\n', "type": "csv", "hasHeader": False, "fieldSeparator": storage['serialization.format']}
  70. elif table_metadata.details['properties']['format'] == 'parquet':
  71. format_ = {"type": "parquet", "hasHeader": False,}
  72. else:
  73. raise PopupException('Hive table format %s is not supported.' % table_metadata.details['properties']['format'])
  74. elif file_format['inputFormat'] == 'query':
  75. format_ = {"quoteChar": "\"", "recordSeparator": "\\n", "type": "csv", "hasHeader": False, "fieldSeparator": "\u0001"}
  76. return JsonResponse(format_)
  77. def guess_field_types(request):
  78. file_format = json.loads(request.POST.get('fileFormat', '{}'))
  79. if file_format['inputFormat'] == 'file':
  80. indexer = Indexer(request.user, request.fs)
  81. stream = request.fs.open(file_format["path"])
  82. _convert_format(file_format["format"], inverse=True)
  83. format_ = indexer.guess_field_types({
  84. "file": {
  85. "stream": stream,
  86. "name": file_format['path']
  87. },
  88. "format": file_format['format']
  89. })
  90. elif file_format['inputFormat'] == 'table':
  91. sample = get_api(request, {'type': 'hive'}).get_sample_data({'type': 'hive'}, database=file_format['databaseName'], table=file_format['tableName'])
  92. db = dbms.get(request.user)
  93. table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
  94. format_ = {
  95. "sample": sample['rows'][:4],
  96. "columns": [
  97. Field(col.name, HiveFormat.FIELD_TYPE_TRANSLATE.get(col.type, 'string')).to_dict()
  98. for col in table_metadata.cols
  99. ]
  100. }
  101. elif file_format['inputFormat'] == 'query': # Only support open query history
  102. # TODO get schema from explain query, which is not possible
  103. notebook = Notebook(document=Document2.objects.get(id=file_format['query'])).get_data()
  104. snippet = notebook['snippets'][0]
  105. sample = get_api(request, snippet).fetch_result(notebook, snippet, 4, start_over=True)
  106. format_ = {
  107. "sample": sample['rows'][:4],
  108. "sample_cols": sample.meta,
  109. "columns": [
  110. Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')).to_dict()
  111. for col in sample.meta
  112. ]
  113. }
  114. return JsonResponse(format_)
  115. def index_file(request):
  116. file_format = json.loads(request.POST.get('fileFormat', '{}'))
  117. _convert_format(file_format["format"], inverse=True)
  118. collection_name = file_format["name"]
  119. job_handle = _index(request, file_format, collection_name)
  120. return JsonResponse(job_handle)
  121. def importer_submit(request):
  122. source = json.loads(request.POST.get('source', '{}'))
  123. destination = json.loads(request.POST.get('destination', '{}'))
  124. if destination['ouputFormat'] == 'index':
  125. _convert_format(source["format"], inverse=True)
  126. collection_name = destination["name"]
  127. source['columns'] = destination['columns']
  128. job_handle = _index(request, source, collection_name)
  129. elif destination['ouputFormat'] == 'database':
  130. job_handle = create_database(request, source, destination)
  131. else:
  132. job_handle = _create_table(request, source, destination)
  133. return JsonResponse(job_handle)
  134. def create_database(request, source, destination):
  135. database = destination['name']
  136. comment = destination['description']
  137. use_default_location = destination['useDefaultLocation']
  138. external_path = destination['nonDefaultLocation']
  139. sql = django_mako.render_to_string("gen/create_database_statement.mako", {
  140. 'database': {
  141. 'name': database,
  142. 'comment': comment,
  143. 'use_default_location': use_default_location,
  144. 'external_location': external_path,
  145. 'properties': [],
  146. }
  147. }
  148. )
  149. editor_type = 'hive'
  150. on_success_url = reverse('metastore:show_tables', kwargs={'database': database})
  151. try:
  152. notebook = make_notebook(name='Execute and watch', editor_type=editor_type, statement=sql, status='ready', on_success_url=on_success_url)
  153. return notebook.execute(request, batch=False)
  154. except Exception, e:
  155. raise PopupException(_('The table could not be created.'), detail=e.message)
  156. def _create_table(request, source, destination):
  157. try:
  158. notebook = _create_table_from_a_file(request, source, destination)
  159. return notebook.execute(request, batch=False)
  160. except Exception, e:
  161. raise PopupException(_('The table could not be created.'), detail=e.message)
  162. def _create_table_from_a_file(request, source, destination):
  163. if '.' in destination['name']:
  164. database, table_name = destination['name'].split('.', 1)
  165. else:
  166. database = 'default'
  167. table_name = destination['name']
  168. final_table_name = table_name
  169. table_format = destination['tableFormat']
  170. columns = destination['columns']
  171. partition_columns = destination['partitionColumns']
  172. comment = destination['description']
  173. source_path = source['path']
  174. external = not destination['useDefaultLocation']
  175. external_path = destination['nonDefaultLocation']
  176. load_data = destination['importData']
  177. skip_header = destination['hasHeader']
  178. primary_keys = destination['primaryKeys']
  179. if destination['useCustomDelimiters']:
  180. field_delimiter = destination['customFieldDelimiter']
  181. collection_delimiter = destination['customCollectionDelimiter']
  182. map_delimiter = destination['customMapDelimiter']
  183. regexp_delimiter = destination['customRegexp']
  184. else:
  185. field_delimiter = ','
  186. collection_delimiter = r'\\002'
  187. map_delimiter = r'\\003'
  188. regexp_delimiter = '.*'
  189. file_format = 'TextFile'
  190. row_format = 'Delimited'
  191. serde_name = ''
  192. serde_properties = ''
  193. extra_create_properties = ''
  194. sql = ''
  195. if source['inputFormat'] == 'manual':
  196. load_data = False
  197. if table_format == 'json':
  198. row_format = 'serde'
  199. serde_name = 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
  200. serde_properties = '''"separatorChar" = "\\t",
  201. "quoteChar" = "'",
  202. "escapeChar" = "\\\\"
  203. '''
  204. if load_data:
  205. if table_format in ('parquet', 'kudu'):
  206. table_name, final_table_name = 'hue__tmp_%s' % table_name, table_name
  207. sql += '\n\nDROP TABLE IF EXISTS `%(database)s`.`%(table_name)s`;\n' % {
  208. 'database': database,
  209. 'table_name': table_name
  210. }
  211. if external or load_data and table_format in ('parquet', 'kudu'):
  212. if not request.fs.isdir(external_path): # File selected
  213. external_path, external_file_name = request.fs.split(external_path)
  214. if len(request.fs.listdir(external_path)) > 1:
  215. external_path = external_path + '/%s_table' % external_file_name # If dir not just the file, create data dir and move file there.
  216. request.fs.mkdir(external_path)
  217. request.fs.rename(source_path, external_path)
  218. sql += django_mako.render_to_string("gen/create_table_statement.mako", {
  219. 'table': {
  220. 'name': table_name,
  221. 'comment': comment,
  222. 'row_format': row_format,
  223. 'field_terminator': field_delimiter,
  224. 'collection_terminator': collection_delimiter,
  225. 'map_key_terminator': map_delimiter,
  226. 'serde_name': serde_name,
  227. 'serde_properties': serde_properties,
  228. 'file_format': file_format,
  229. 'external': external or load_data and table_format in ('parquet', 'kudu'),
  230. 'path': external_path,
  231. 'skip_header': skip_header,
  232. 'primary_keys': primary_keys if table_format == 'kudu' and not load_data else []
  233. },
  234. 'columns': columns,
  235. 'partition_columns': partition_columns,
  236. 'database': database
  237. }
  238. )
  239. if table_format == 'text' and not external and load_data:
  240. sql += "\n\nLOAD DATA INPATH '%s' INTO TABLE `%s`.`%s`;" % (source_path, database, table_name)
  241. if table_format in ('parquet', 'kudu'):
  242. file_format = table_format
  243. if table_format == 'kudu':
  244. columns_list = ['`%s`' % col for col in primary_keys + [col['name'] for col in destination['columns'] if col['name'] not in primary_keys]]
  245. extra_create_properties = """PRIMARY KEY (%(primary_keys)s)
  246. DISTRIBUTE BY HASH INTO 16 BUCKETS
  247. STORED AS %(file_format)s
  248. TBLPROPERTIES(
  249. 'kudu.num_tablet_replicas' = '1'
  250. )""" % {
  251. 'file_format': file_format,
  252. 'primary_keys': ', '.join(primary_keys)
  253. }
  254. else:
  255. columns_list = ['*']
  256. sql += '''\n\nCREATE TABLE `%(database)s`.`%(final_table_name)s`
  257. %(extra_create_properties)s
  258. AS SELECT %(columns_list)s
  259. FROM `%(database)s`.`%(table_name)s`;''' % {
  260. 'database': database,
  261. 'final_table_name': final_table_name,
  262. 'table_name': table_name,
  263. 'extra_create_properties': extra_create_properties,
  264. 'columns_list': ', '.join(columns_list),
  265. }
  266. sql += '\n\nDROP TABLE IF EXISTS `%(database)s`.`%(table_name)s`;\n' % {
  267. 'database': database,
  268. 'table_name': table_name
  269. }
  270. editor_type = 'impala' if table_format == 'kudu' else 'hive'
  271. on_success_url = reverse('metastore:describe_table', kwargs={'database': database, 'table': table_name})
  272. return make_notebook(name='Execute and watch', editor_type=editor_type, statement=sql, status='ready', database=database, on_success_url=on_success_url)
  273. def _index(request, file_format, collection_name, query=None):
  274. indexer = Indexer(request.user, request.fs)
  275. unique_field = indexer.get_unique_field(file_format)
  276. is_unique_generated = indexer.is_unique_generated(file_format)
  277. schema_fields = indexer.get_kept_field_list(file_format['columns'])
  278. if is_unique_generated:
  279. schema_fields += [{"name": unique_field, "type": "string"}]
  280. morphline = indexer.generate_morphline_config(collection_name, file_format, unique_field)
  281. collection_manager = CollectionManagerController(request.user)
  282. if not collection_manager.collection_exists(collection_name):
  283. collection_manager.create_collection(collection_name, schema_fields, unique_key_field=unique_field)
  284. if file_format['inputFormat'] == 'table':
  285. db = dbms.get(request.user)
  286. table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
  287. input_path = table_metadata.path_location
  288. elif file_format['inputFormat'] == 'file':
  289. input_path = '${nameNode}%s' % file_format["path"]
  290. else:
  291. input_path = None
  292. return indexer.run_morphline(request, collection_name, morphline, input_path, query)