tests.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # Licensed to Cloudera, Inc. under one
  4. # or more contributor license agreements. See the NOTICE file
  5. # distributed with this work for additional information
  6. # regarding copyright ownership. Cloudera, Inc. licenses this file
  7. # to you under the Apache License, Version 2.0 (the
  8. # "License"); you may not use this file except in compliance
  9. # with the License. You may obtain a copy of the License at
  10. #
  11. # http://www.apache.org/licenses/LICENSE-2.0
  12. #
  13. # Unless required by applicable law or agreed to in writing, software
  14. # distributed under the License is distributed on an "AS IS" BASIS,
  15. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. # See the License for the specific language governing permissions and
  17. # limitations under the License.
  18. import json
  19. import logging
  20. import urllib
  21. from nose.plugins.skip import SkipTest
  22. from nose.tools import assert_true, assert_equal, assert_false
  23. from django.utils.encoding import smart_str
  24. from django.contrib.auth.models import User, Group
  25. from django.core.urlresolvers import reverse
  26. from desktop.lib.django_test_util import make_logged_in_client, assert_equal_mod_whitespace
  27. from desktop.lib.test_utils import add_permission, grant_access
  28. from hadoop.pseudo_hdfs4 import is_live_cluster
  29. from metastore import parser
  30. from useradmin.models import HuePermission, GroupPermission, group_has_permission
  31. from beeswax.conf import LIST_PARTITIONS_LIMIT
  32. from beeswax.views import collapse_whitespace
  33. from beeswax.test_base import make_query, wait_for_query_to_finish, verify_history, get_query_server_config, fetch_query_result_data
  34. from beeswax.models import QueryHistory
  35. from beeswax.server import dbms
  36. from beeswax.test_base import BeeswaxSampleProvider
  37. LOG = logging.getLogger(__name__)
  38. def _make_query(client, query, submission_type="Execute",
  39. udfs=None, settings=None, resources=[],
  40. wait=False, name=None, desc=None, local=True,
  41. is_parameterized=True, max=30.0, database='default', email_notify=False, **kwargs):
  42. """Wrapper around the real make_query"""
  43. res = make_query(client, query, submission_type,
  44. udfs, settings, resources,
  45. wait, name, desc, local, is_parameterized, max, database, email_notify, **kwargs)
  46. # Should be in the history if it's submitted.
  47. if submission_type == 'Execute':
  48. fragment = collapse_whitespace(smart_str(query[:20]))
  49. verify_history(client, fragment=fragment)
  50. return res
  51. class TestMetastoreWithHadoop(BeeswaxSampleProvider):
  52. requires_hadoop = True
  53. def setUp(self):
  54. user = User.objects.get(username='test')
  55. self.db = dbms.get(user, get_query_server_config())
  56. add_permission("test", "test", "write", "metastore")
  57. def test_basic_flow(self):
  58. # Default database should exist
  59. response = self.client.get("/metastore/databases")
  60. assert_true(self.db_name in response.context["databases"])
  61. # Table should have been created
  62. response = self.client.get("/metastore/tables/")
  63. assert_equal(200, response.status_code)
  64. # Switch databases
  65. response = self.client.get("/metastore/tables/%s?format=json" % self.db_name)
  66. data = json.loads(response.content)
  67. assert_true('name' in data["tables"][0])
  68. assert_true("test" in data["table_names"])
  69. # Should default to "default" database
  70. response = self.client.get("/metastore/tables/not_there")
  71. assert_equal(200, response.status_code)
  72. # And have detail
  73. response = self.client.get("/metastore/table/%s/test?format=json" % self.db_name)
  74. data = json.loads(response.content)
  75. assert_true("foo" in [col['name'] for col in data['cols']])
  76. assert_true("SerDe Library:" in [prop['col_name'] for prop in data['properties']], data)
  77. # Remember the number of history items. Use a generic fragment 'test' to pass verification.
  78. history_cnt = verify_history(self.client, fragment='test')
  79. # Show table data.
  80. response = self.client.get("/metastore/table/%s/test/read" % self.db_name, follow=True)
  81. response = self.client.get(reverse("beeswax:api_watch_query_refresh_json", kwargs={'id': response.context['query'].id}), follow=True)
  82. response = wait_for_query_to_finish(self.client, response, max=30.0)
  83. # Note that it may not return all rows at once. But we expect at least 10.
  84. results = fetch_query_result_data(self.client, response)
  85. assert_true(len(results['results']) > 0)
  86. # This should NOT go into the query history.
  87. assert_equal(verify_history(self.client, fragment='test'), history_cnt, 'Implicit queries should not be saved in the history')
  88. def test_show_tables(self):
  89. hql = """
  90. CREATE TABLE test_show_tables_1 (a int) COMMENT 'Test for show_tables';
  91. CREATE TABLE test_show_tables_2 (a int) COMMENT 'Test for show_tables';
  92. CREATE TABLE test_show_tables_3 (a int) COMMENT 'Test for show_tables';
  93. """
  94. resp = _make_query(self.client, hql, database=self.db_name)
  95. resp = wait_for_query_to_finish(self.client, resp, max=30.0)
  96. # Table should have been created
  97. response = self.client.get("/metastore/tables/%s?filter=show_tables&format=json" % self.db_name)
  98. assert_equal(200, response.status_code)
  99. data = json.loads(response.content)
  100. assert_equal(len(data['tables']), 3)
  101. assert_true('name' in data["tables"][0])
  102. assert_true('comment' in data["tables"][0])
  103. assert_true('type' in data["tables"][0])
  104. hql = """
  105. CREATE TABLE test_show_tables_4 (a int) COMMENT 'Test for show_tables';
  106. CREATE TABLE test_show_tables_5 (a int) COMMENT 'Test for show_tables';
  107. """
  108. resp = _make_query(self.client, hql, database=self.db_name)
  109. resp = wait_for_query_to_finish(self.client, resp, max=30.0)
  110. # Table should have been created
  111. response = self.client.get("/metastore/tables/%s?filter=show_tables&format=json" % self.db_name)
  112. assert_equal(200, response.status_code)
  113. data = json.loads(response.content)
  114. assert_equal(len(data['tables']), 5)
  115. assert_true('name' in data["tables"][0])
  116. assert_true('comment' in data["tables"][0])
  117. assert_true('type' in data["tables"][0])
  118. hql = """
  119. CREATE INDEX test_index ON TABLE test_show_tables_1 (a) AS 'COMPACT' WITH DEFERRED REBUILD;
  120. """
  121. resp = _make_query(self.client, hql, wait=True, local=False, max=30.0, database=self.db_name)
  122. # By default, index table should not appear in show tables view
  123. response = self.client.get("/metastore/tables/%s?format=json" % self.db_name)
  124. assert_equal(200, response.status_code)
  125. data = json.loads(response.content)
  126. assert_false('test_index' in data['tables'])
  127. def test_describe_view(self):
  128. resp = self.client.get('/metastore/table/%s/myview?format=json' % self.db_name)
  129. assert_equal(200, resp.status_code, resp.content)
  130. data = json.loads(resp.content)
  131. assert_true(data['is_view'])
  132. assert_equal("myview", data['name'])
  133. def test_describe_partitions(self):
  134. response = self.client.get("/metastore/table/%s/test_partitions?format=json" % self.db_name)
  135. data = json.loads(response.content)
  136. assert_equal(2, len(data['partition_keys']), data)
  137. response = self.client.get("/metastore/table/%s/test_partitions/partitions?format=json" % self.db_name, follow=True)
  138. data = json.loads(response.content)
  139. partition_columns = [col for cols in data['partition_values_json'] for col in cols['columns']]
  140. assert_true("baz_one" in partition_columns)
  141. assert_true("boom_two" in partition_columns)
  142. assert_true("baz_foo" in partition_columns)
  143. assert_true("boom_bar" in partition_columns)
  144. # Not partitioned
  145. response = self.client.get("/metastore/table/%s/test/partitions" % self.db_name, follow=True)
  146. assert_true("is not partitioned." in response.content)
  147. def test_describe_partitioned_table_with_limit(self):
  148. # We have 2 partitions in the test table
  149. finish = LIST_PARTITIONS_LIMIT.set_for_testing("1")
  150. try:
  151. response = self.client.get("/metastore/table/%s/test_partitions/partitions" % self.db_name)
  152. partition_values_json = json.loads(response.context['partition_values_json'])
  153. assert_equal(1, len(partition_values_json))
  154. finally:
  155. finish()
  156. finish = LIST_PARTITIONS_LIMIT.set_for_testing("3")
  157. try:
  158. response = self.client.get("/metastore/table/%s/test_partitions/partitions" % self.db_name)
  159. partition_values_json = json.loads(response.context['partition_values_json'])
  160. assert_equal(2, len(partition_values_json))
  161. finally:
  162. finish()
  163. def test_read_partitions(self):
  164. if not is_live_cluster():
  165. raise SkipTest
  166. partition_spec = "baz='baz_one',boom='boom_two'"
  167. response = self.client.get("/metastore/table/%s/test_partitions/partitions/%s/read" % (self.db_name, partition_spec), follow=True)
  168. response = self.client.get(reverse("beeswax:api_watch_query_refresh_json", kwargs={'id': response.context['query'].id}), follow=True)
  169. response = wait_for_query_to_finish(self.client, response, max=30.0)
  170. results = fetch_query_result_data(self.client, response)
  171. assert_true(len(results['results']) > 0, results)
  172. def test_browse_partition(self):
  173. partition_spec = "baz='baz_one',boom='boom_two'"
  174. response = self.client.get("/metastore/table/%s/test_partitions/partitions/%s/browse" % (self.db_name, partition_spec), follow=True)
  175. if is_live_cluster():
  176. path = '/user/hive/warehouse/%s.db/test_partitions/baz=baz_one/boom=boom_two' % self.db_name
  177. else:
  178. path = '/user/hive/warehouse/test_partitions/baz=baz_one/boom=boom_two'
  179. filebrowser_path = urllib.unquote(reverse("filebrowser.views.view", kwargs={'path': path}))
  180. assert_equal(response.request['PATH_INFO'], filebrowser_path)
  181. def test_drop_partition(self):
  182. # Create partition first
  183. partition_spec = "baz='baz_drop',boom='boom_drop'"
  184. hql = 'ALTER TABLE `%s`.`test_partitions` ADD IF NOT EXISTS PARTITION (%s);' % (self.db_name, partition_spec)
  185. resp = _make_query(self.client, hql, database=self.db_name)
  186. wait_for_query_to_finish(self.client, resp, max=30.0)
  187. # Assert partition exists
  188. response = self.client.get("/metastore/table/%s/test_partitions/partitions" % self.db_name)
  189. assert_true("baz_drop" in response.content)
  190. # Drop partition
  191. self.client.post("/metastore/table/%s/test_partitions/partitions/drop" % self.db_name, {'partition_selection': [partition_spec]}, follow=True)
  192. query = QueryHistory.objects.latest('id')
  193. assert_equal_mod_whitespace("ALTER TABLE `%s`.`test_partitions` DROP IF EXISTS PARTITION (%s) PURGE" % (self.db_name, partition_spec), query.query)
  194. response = self.client.get("/metastore/table/%s/test_partitions/partitions" % self.db_name)
  195. assert_false("baz_drop" in response.content)
  196. def test_drop_multi_tables(self):
  197. hql = """
  198. CREATE TABLE test_drop_1 (a int);
  199. CREATE TABLE test_drop_2 (a int);
  200. CREATE TABLE test_drop_3 (a int);
  201. """
  202. resp = _make_query(self.client, hql, database=self.db_name)
  203. resp = wait_for_query_to_finish(self.client, resp, max=30.0)
  204. # Drop them
  205. resp = self.client.get('/metastore/tables/drop/%s' % self.db_name, follow=True)
  206. assert_true('want to delete' in resp.content, resp.content)
  207. resp = self.client.post('/metastore/tables/drop/%s' % self.db_name, {u'table_selection': [u'test_drop_1', u'test_drop_2', u'test_drop_3']})
  208. assert_equal(resp.status_code, 302)
  209. def test_drop_multi_tables_with_skip_trash(self):
  210. hql = """
  211. CREATE TABLE test_drop_multi_tables_with_skip_trash_1 (a int);
  212. CREATE TABLE test_drop_multi_tables_with_skip_trash_2 (a int);
  213. CREATE TABLE test_drop_multi_tables_with_skip_trash_3 (a int);
  214. """
  215. resp = _make_query(self.client, hql, database=self.db_name)
  216. resp = wait_for_query_to_finish(self.client, resp, max=30.0)
  217. # Drop them
  218. resp = self.client.get('/metastore/tables/drop/%s' % self.db_name, follow=True)
  219. assert_true('want to delete' in resp.content, resp.content)
  220. resp = self.client.post('/metastore/tables/drop/%s' % self.db_name, {u'table_selection': [u'test_drop_multi_tables_with_skip_trash_1', u'test_drop_multi_tables_with_skip_trash_2', u'test_drop_multi_tables_with_skip_trash_3'], u'skip_trash': u'on'})
  221. assert_equal(resp.status_code, 302)
  222. response = self.client.get("/metastore/tables/%s?format=json" % self.db_name)
  223. assert_equal(200, response.status_code)
  224. data = json.loads(response.content)
  225. assert_false('test_drop_multi_tables_with_skip_trash_1' in data['tables'])
  226. assert_false('test_drop_multi_tables_with_skip_trash_2' in data['tables'])
  227. assert_false('test_drop_multi_tables_with_skip_trash_3' in data['tables'])
  228. def test_drop_multi_databases(self):
  229. db1 = '%s_test_drop_1' % self.db_name
  230. db2 = '%s_test_drop_2' % self.db_name
  231. db3 = '%s_test_drop_3' % self.db_name
  232. try:
  233. hql = """
  234. CREATE DATABASE %(db1)s;
  235. CREATE DATABASE %(db2)s;
  236. CREATE DATABASE %(db3)s;
  237. """ % {'db1': db1, 'db2': db2, 'db3': db3}
  238. resp = _make_query(self.client, hql)
  239. resp = wait_for_query_to_finish(self.client, resp, max=30.0)
  240. # Add a table to db1
  241. hql = "CREATE TABLE " + "`" + db1 + "`." + "`test_drop_1` (a int);"
  242. resp = _make_query(self.client, hql, database=db1)
  243. resp = wait_for_query_to_finish(self.client, resp, max=30.0)
  244. assert_equal(resp.status_code, 200)
  245. # Drop them
  246. resp = self.client.get('/metastore/databases/drop', follow=True)
  247. assert_true('want to delete' in resp.content, resp.content)
  248. resp = self.client.post('/metastore/databases/drop', {u'database_selection': [db1, db2, db3]})
  249. assert_equal(resp.status_code, 302)
  250. finally:
  251. make_query(self.client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db1}, wait=True)
  252. make_query(self.client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db2}, wait=True)
  253. make_query(self.client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db3}, wait=True)
  254. def test_load_data(self):
  255. """
  256. Test load data queries.
  257. These require Hadoop, because they ask the metastore
  258. about whether a table is partitioned.
  259. """
  260. # Check that view works
  261. resp = self.client.get("/metastore/table/%s/test/load" % self.db_name, follow=True)
  262. assert_true('Path' in resp.content)
  263. data_path = '%(prefix)s/tmp/foo' % {'prefix': self.cluster.fs_prefix}
  264. # Try the submission
  265. self.client.post("/metastore/table/%s/test/load" % self.db_name, {'path': data_path, 'overwrite': True}, follow=True)
  266. query = QueryHistory.objects.latest('id')
  267. assert_equal_mod_whitespace("LOAD DATA INPATH '%(data_path)s' OVERWRITE INTO TABLE `%(db)s`.`test`" % {'data_path': data_path, 'db': self.db_name}, query.query)
  268. resp = self.client.post("/metastore/table/%s/test/load" % self.db_name, {'path': data_path, 'overwrite': False}, follow=True)
  269. query = QueryHistory.objects.latest('id')
  270. assert_equal_mod_whitespace("LOAD DATA INPATH '%(data_path)s' INTO TABLE `%(db)s`.`test`" % {'data_path': data_path, 'db': self.db_name}, query.query)
  271. # Try it with partitions
  272. resp = self.client.post("/metastore/table/%s/test_partitions/load" % self.db_name, {'path': data_path, 'partition_0': "alpha", 'partition_1': "beta"}, follow=True)
  273. query = QueryHistory.objects.latest('id')
  274. assert_equal_mod_whitespace(query.query, "LOAD DATA INPATH '%(data_path)s' INTO TABLE `%(db)s`.`test_partitions` PARTITION (baz='alpha', boom='beta')" % {'data_path': data_path, 'db': self.db_name})
  275. def test_has_write_access_frontend(self):
  276. client = make_logged_in_client(username='write_access_frontend', groupname='write_access_frontend', is_superuser=False)
  277. grant_access("write_access_frontend", "write_access_frontend", "metastore")
  278. user = User.objects.get(username='write_access_frontend')
  279. def check(client, assertz):
  280. response = client.get("/metastore/databases")
  281. assertz("Drop</button>" in response.content, response.content)
  282. assertz("Create a new database" in response.content, response.content)
  283. response = client.get("/metastore/tables/")
  284. assertz("Drop</button>" in response.content, response.content)
  285. assertz("Create a new table" in response.content, response.content)
  286. check(client, assert_false)
  287. # Add access
  288. group, created = Group.objects.get_or_create(name='write_access_frontend')
  289. perm, created = HuePermission.objects.get_or_create(app='metastore', action='write')
  290. GroupPermission.objects.get_or_create(group=group, hue_permission=perm)
  291. check(client, assert_true)
  292. def test_has_write_access_backend(self):
  293. client = make_logged_in_client(username='write_access_backend', groupname='write_access_backend', is_superuser=False)
  294. grant_access("write_access_backend", "write_access_backend", "metastore")
  295. grant_access("write_access_backend", "write_access_backend", "beeswax")
  296. user = User.objects.get(username='write_access_backend')
  297. resp = _make_query(client, 'CREATE TABLE test_perm_1 (a int);', database=self.db_name) # Only fails if we were using Sentry and won't allow SELECT to user
  298. resp = wait_for_query_to_finish(client, resp, max=30.0)
  299. def check(client, http_codes):
  300. resp = client.get('/metastore/tables/drop/%s' % self.db_name)
  301. assert_true(resp.status_code in http_codes, resp.content)
  302. resp = client.post('/metastore/tables/drop/%s' % self.db_name, {u'table_selection': [u'test_perm_1']})
  303. assert_true(resp.status_code in http_codes, resp.content)
  304. check(client, [301]) # Denied
  305. # Add access
  306. group, created = Group.objects.get_or_create(name='write_access_backend')
  307. perm, created = HuePermission.objects.get_or_create(app='metastore', action='write')
  308. GroupPermission.objects.get_or_create(group=group, hue_permission=perm)
  309. check(client, [200, 302]) # Ok
  310. def test_alter_database(self):
  311. resp = self.client.post(reverse("metastore:get_database_metadata", kwargs={'database': self.db_name}))
  312. json_resp = json.loads(resp.content)
  313. assert_true('data' in json_resp, json_resp)
  314. assert_true('parameters' in json_resp['data'], json_resp)
  315. assert_false('message=After Alter' in json_resp['data']['parameters'], json_resp)
  316. # Alter message
  317. resp = self.client.post(reverse("metastore:alter_database", kwargs={'database': self.db_name}),
  318. {'properties': json.dumps({'message': 'After Alter'})})
  319. json_resp = json.loads(resp.content)
  320. assert_equal(0, json_resp['status'], json_resp)
  321. assert_equal('{message=After Alter}', json_resp['data']['parameters'], json_resp)
  322. def test_alter_table(self):
  323. resp = _make_query(self.client, "CREATE TABLE test_alter_table (a int) COMMENT 'Before Alter';", database=self.db_name)
  324. resp = wait_for_query_to_finish(self.client, resp, max=30.0)
  325. resp = self.client.get('/metastore/table/%s/test_alter_table' % self.db_name)
  326. assert_true('test_alter_table', resp.content)
  327. assert_true('Before Alter', resp.content)
  328. # Alter name
  329. resp = self.client.post(reverse("metastore:alter_table",
  330. kwargs={'database': self.db_name, 'table': 'test_alter_table'}),
  331. {'new_table_name': 'table_altered'})
  332. json_resp = json.loads(resp.content)
  333. assert_equal('table_altered', json_resp['data']['name'], json_resp)
  334. # Alter comment
  335. resp = self.client.post(reverse("metastore:alter_table",
  336. kwargs={'database': self.db_name, 'table': 'table_altered'}),
  337. {'comment': 'After Alter'})
  338. json_resp = json.loads(resp.content)
  339. assert_equal('After Alter', json_resp['data']['comment'], json_resp)
  340. # Invalid table name returns error response
  341. resp = self.client.post(reverse("metastore:alter_table",
  342. kwargs={'database': self.db_name, 'table': 'table_altered'}),
  343. {'new_table_name': 'bad name'})
  344. json_resp = json.loads(resp.content)
  345. assert_equal(1, json_resp['status'], json_resp)
  346. assert_true('Failed to alter table' in json_resp['data'], json_resp)
  347. def test_alter_column(self):
  348. resp = _make_query(self.client, 'CREATE TABLE test_alter_column (before_alter int);', database=self.db_name)
  349. resp = wait_for_query_to_finish(self.client, resp, max=30.0)
  350. resp = self.client.get('/metastore/table/%s/test_alter_column' % self.db_name)
  351. assert_true('before_alter', resp.content)
  352. assert_true('int', resp.content)
  353. # Alter name, type and comment
  354. resp = self.client.post(reverse("metastore:alter_column",
  355. kwargs={'database': self.db_name, 'table': 'test_alter_column'}),
  356. {'column': 'before_alter', 'new_column_name': 'after_alter', 'new_column_type': 'string', 'comment': 'alter comment'})
  357. json_resp = json.loads(resp.content)
  358. assert_equal('after_alter', json_resp['data']['name'], json_resp)
  359. assert_equal('string', json_resp['data']['type'], json_resp)
  360. assert_equal('alter comment', json_resp['data']['comment'], json_resp)
  361. # Invalid column type returns error response
  362. resp = self.client.post(reverse("metastore:alter_column",
  363. kwargs={'database': self.db_name, 'table': 'test_alter_column'}),
  364. {'column': 'before_alter', 'new_column_name': 'foo'})
  365. json_resp = json.loads(resp.content)
  366. assert_equal(1, json_resp['status'], json_resp)
  367. assert_true('Failed to alter column' in json_resp['message'], json_resp)
  368. class TestParser(object):
  369. def test_parse_simple(self):
  370. name = 'simple'
  371. type = 'string'
  372. comment = 'test_parse_simple'
  373. column = {'name': name, 'type': type, 'comment': comment}
  374. parse_tree = parser.parse_column(name, type, comment)
  375. assert_equal(parse_tree, column)
  376. def test_parse_decimal(self):
  377. name = 'simple'
  378. type = 'decimal(12,2)'
  379. comment = 'test_parse_decimal'
  380. column = {'name': name, 'type': type, 'comment': comment}
  381. parse_tree = parser.parse_column(name, type, comment)
  382. assert_equal(parse_tree, column)
  383. def test_parse_array(self):
  384. name = 'array'
  385. type = 'array<string>'
  386. comment = 'test_parse_array'
  387. column = {'name': name, 'type': 'array', 'comment': comment, 'item': {'type': 'string'}}
  388. parse_tree = parser.parse_column(name, type, comment)
  389. assert_equal(parse_tree, column)
  390. def test_parse_map(self):
  391. name = 'map'
  392. type = 'map<string,int>'
  393. comment = 'test_parse_map'
  394. column = {'name': name, 'type': 'map', 'comment': comment, 'key': {'type': 'string'}, 'value': {'type': 'int'}}
  395. parse_tree = parser.parse_column(name, type, comment)
  396. assert_equal(parse_tree, column)
  397. def test_parse_struct(self):
  398. name = 'struct'
  399. type = 'struct<name:string,age:int>'
  400. comment = 'test_parse_struct'
  401. column = {'name': name, 'type': 'struct', 'comment': comment, 'fields': [{'name': 'name', 'type': 'string'}, {'name': 'age', 'type': 'int'}]}
  402. parse_tree = parser.parse_column(name, type, comment)
  403. assert_equal(parse_tree, column)
  404. def test_parse_nested(self):
  405. name = 'nested'
  406. type = 'array<struct<name:string,age:int>>'
  407. comment = 'test_parse_nested'
  408. column = {'name': name, 'type': 'array', 'comment': comment, 'item': {'type': 'struct', 'fields': [{'name': 'name', 'type': 'string'}, {'name': 'age', 'type': 'int'}]}}
  409. parse_tree = parser.parse_column(name, type, comment)
  410. assert_equal(parse_tree, column)