views_test.py 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471
  1. # -*- coding: utf-8 -*-
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. #!/usr/bin/env python
  18. from __future__ import absolute_import
  19. from future import standard_library
  20. standard_library.install_aliases()
  21. from builtins import zip
  22. from builtins import range
  23. from builtins import object
  24. from builtins import str
  25. import json
  26. import logging
  27. import os
  28. import re
  29. import stat
  30. import sys
  31. import tempfile
  32. import urllib.request, urllib.error
  33. import urllib.parse
  34. from time import sleep, time
  35. from avro import schema, datafile, io
  36. from aws.s3.s3fs import S3FileSystemException
  37. from aws.s3.s3test_utils import get_test_bucket
  38. from azure.conf import is_abfs_enabled, is_adls_enabled
  39. from django.urls import reverse
  40. from django.utils.encoding import smart_str
  41. from django.utils.translation import ugettext_lazy as _
  42. from nose.plugins.attrib import attr
  43. from nose.plugins.skip import SkipTest
  44. from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal, assert_raises,\
  45. assert_greater
  46. from desktop.lib.django_test_util import make_logged_in_client
  47. from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group
  48. from desktop.lib.view_util import location_to_url
  49. from desktop.conf import is_oozie_enabled
  50. from hadoop import pseudo_hdfs4
  51. from hadoop.conf import UPLOAD_CHUNK_SIZE
  52. from hadoop.fs.webhdfs import WebHdfs
  53. from useradmin.models import User, Group
  54. from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE, MAX_SNAPPY_DECOMPRESSION_SIZE
  55. from filebrowser.lib.rwx import expand_mode
  56. from filebrowser.views import snappy_installed
  57. if sys.version_info[0] > 2:
  58. from urllib.parse import unquote as urllib_unquote, urlparse
  59. open_file = open
  60. else:
  61. from urllib import unquote as urllib_unquote
  62. from urlparse import urlparse
  63. open_file = file
  64. if sys.version_info[0] > 2:
  65. from unittest.mock import patch, Mock
  66. else:
  67. from mock import patch, Mock
  68. LOG = logging.getLogger(__name__)
  69. def cleanup_tree(cluster, path):
  70. try:
  71. cluster.fs.rmtree(path)
  72. except:
  73. # Don't let cleanup errors mask earlier failures
  74. LOG.exception('failed to cleanup %s' % path)
  75. def cleanup_file(cluster, path):
  76. try:
  77. cluster.fs.remove(path)
  78. except:
  79. # Don't let cleanup errors mask earlier failures
  80. LOG.exception('failed to cleanup %s' % path)
  81. class TestFileBrowser():
  82. def setUp(self):
  83. self.client = make_logged_in_client(username="test_filebrowser", groupname='test_filebrowser', recreate=True, is_superuser=False)
  84. self.user = User.objects.get(username="test_filebrowser")
  85. grant_access(self.user.username, 'test_filebrowser', 'filebrowser')
  86. add_to_group(self.user.username, 'test_filebrowser')
  87. def test_listdir_paged(self):
  88. with patch('desktop.middleware.fsmanager.get_filesystem') as get_filesystem:
  89. with patch('filebrowser.views.snappy_installed') as snappy_installed:
  90. snappy_installed.return_value = False
  91. get_filesystem.return_value = Mock(
  92. stats=Mock(
  93. return_value=Mock(
  94. isDir=True,
  95. size=1024,
  96. path=b'/',
  97. mtime=None,
  98. mode=stat.S_IFDIR
  99. ),
  100. ),
  101. normpath=Mock(return_value='/'),
  102. listdir_stats=Mock(
  103. return_value=[] # Add "Mock files here"
  104. ),
  105. superuser='hdfs',
  106. supergroup='hdfs'
  107. )
  108. response = self.client.get('/filebrowser/view=')
  109. assert_equal(200, response.status_code)
  110. dir_listing = response.context[0]['files']
  111. assert_equal(1, len(dir_listing))
  112. def test_listdir_paged_with_non_ascii(self):
  113. parent_dir = Mock(
  114. isDir=True,
  115. size=0,
  116. path=u'/user/systest/test5/Tжейкоб/..',
  117. mtime=1581717441.0,
  118. mode=16877,
  119. user=u'systest',
  120. type=u'DIRECTORY',
  121. to_json_dict=Mock(
  122. return_value={'size': 0, 'group': u'supergroup', 'blockSize': 0, 'replication': 0, 'user': u'systest',
  123. 'mtime': 1581717441.0, 'path': u'/user/systest/test5/T\u0436\u0435\u0439\u043a\u043e\u0431/..',
  124. 'atime': 0.0, 'mode': 16877}
  125. )
  126. )
  127. parent_dir.name = u'..'
  128. self_dir = Mock(
  129. isDir=True,
  130. size=0,
  131. path=u'/user/systest/test5/Tжейкоб',
  132. mtime=1581717441.0,
  133. mode=16877,
  134. user=u'systest',
  135. type=u'DIRECTORY',
  136. to_json_dict=Mock(
  137. return_value={'size': 0, 'group': u'supergroup', 'blockSize': 0, 'replication': 0, 'user': u'systest',
  138. 'mtime': 1581717441.0, 'path': u'/user/systest/test5/T\u0436\u0435\u0439\u043a\u043e\u0431',
  139. 'atime': 0.0, 'mode': 16877}
  140. )
  141. )
  142. self_dir.name = u'Tжейкоб'
  143. file_1 = Mock(
  144. isDir=False,
  145. size=9,
  146. path=u'/user/systest/test5/Tжейкоб/file_1.txt',
  147. mtime=1581670301.0, mode=33279,
  148. user=u'systest',
  149. type=u'FILE',
  150. to_json_dict=Mock(
  151. return_value={'size': 9, 'group': u'supergroup', 'blockSize': 134217728, 'replication': 1, 'user': u'systest',
  152. 'mtime': 1581670301.0,
  153. 'path': u'/user/systest/test5/T\u0436\u0435\u0439\u043a\u043e\u0431/file_1.txt',
  154. 'atime': 1581708019.0, 'mode': 33279}
  155. )
  156. )
  157. file_1.name = u'file_1.txt'
  158. file_2 = Mock(
  159. isDir=False,
  160. size=0,
  161. path=u'/user/systest/test5/Tжейкоб/文件_2.txt',
  162. mtime=1581707672.0,
  163. mode=33188,
  164. user=u'systest',
  165. type=u'FILE',
  166. to_json_dict=Mock(
  167. return_value={'size': 18, 'group': u'supergroup', 'blockSize': 134217728, 'replication': 1, 'user': u'systest',
  168. 'mtime': 1581707672.0,
  169. 'path': u'/user/systest/test5/T\u0436\u0435\u0439\u043a\u043e\u0431/\u6587\u4ef6_2.txt',
  170. 'atime': 1581707672.0, 'mode': 33188}
  171. )
  172. )
  173. file_2.name = u'文件_2.txt'
  174. file_3 = Mock(
  175. isDir=False,
  176. size=0,
  177. path=u'/user/systest/test5/Tжейкоб/employés_file.txt',
  178. mtime=1581039792.0,
  179. mode=33188,
  180. user=u'systest',
  181. type=u'FILE',
  182. to_json_dict=Mock(
  183. return_value={'size': 0, 'group': u'supergroup', 'blockSize': 134217728, 'replication': 1, 'user': u'systest',
  184. 'mtime': 1581039792.0,
  185. 'path': u'/user/systest/test5/T\u0436\u0435\u0439\u043a\u043e\u0431/employ\xe9s_file.txt',
  186. 'atime': 1581708003.0, 'mode': 33188}
  187. )
  188. )
  189. file_3.name = u'employés_file.txt'
  190. with patch('desktop.middleware.fsmanager.get_filesystem') as get_filesystem:
  191. with patch('filebrowser.views.snappy_installed') as snappy_installed:
  192. snappy_installed.return_value = False
  193. get_filesystem.return_value = Mock(
  194. stats=Mock(
  195. return_value=self_dir
  196. ),
  197. normpath=WebHdfs.norm_path,
  198. is_sentry_managed=Mock(return_value=False),
  199. listdir_stats=Mock(
  200. return_value=[parent_dir, file_1, file_2, file_3]
  201. ),
  202. superuser='hdfs',
  203. supergroup='hdfs'
  204. )
  205. response = self.client.get('/filebrowser/view=%2Fuser%2Fsystest%2Ftest5%2FT%D0%B6%D0%B5%D0%B9%D0%BA%D0%BE%D0%B1?pagesize=45&pagenum=1&filter=&sortby=name&descending=false&format=json&_=1581670214204')
  206. assert_equal(200, response.status_code)
  207. dir_listing = json.loads(response.content)['files']
  208. assert_equal(5, len(dir_listing))
  209. assert_true(b'"url": "/filebrowser/view=%2Fuser%2Fsystest%2Ftest5",' in response.content, response.content)
  210. assert_true(b'"url": "/filebrowser/view=%2Fuser%2Fsystest%2Ftest5%2FT%D0%B6%D0%B5%D0%B9%D0%BA%D0%BE%D0%B1",' in response.content, response.content)
  211. assert_true(b'"url": "/filebrowser/view=%2Fuser%2Fsystest%2Ftest5%2FT%D0%B6%D0%B5%D0%B9%D0%BA%D0%BE%D0%B1%2Ffile_1.txt",' in response.content, response.content)
  212. assert_true(b'"url": "/filebrowser/view=%2Fuser%2Fsystest%2Ftest5%2FT%D0%B6%D0%B5%D0%B9%D0%BA%D0%BE%D0%B1%2F%E6%96%87%E4%BB%B6_2.txt",' in response.content, response.content)
  213. assert_true(b'"url": "/filebrowser/view=%2Fuser%2Fsystest%2Ftest5%2FT%D0%B6%D0%B5%D0%B9%D0%BA%D0%BE%D0%B1%2Femploy%C3%A9s_file.txt",' in response.content, response.content)
  214. class TestFileBrowserWithHadoop(object):
  215. requires_hadoop = True
  216. integration = True
  217. def setUp(self):
  218. self.c = make_logged_in_client(username='test', is_superuser=False)
  219. grant_access('test', 'test', 'filebrowser')
  220. grant_access('test', 'test', 'jobbrowser')
  221. add_to_group('test')
  222. self.user = User.objects.get(username='test')
  223. self.cluster = pseudo_hdfs4.shared_cluster()
  224. self.cluster.fs.setuser('test')
  225. self.prefix = self.cluster.fs_prefix + '/filebrowser'
  226. self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir, '/user/test')
  227. def tearDown(self):
  228. cleanup_tree(self.cluster, self.prefix)
  229. assert_false(self.cluster.fs.exists(self.prefix))
  230. self.cluster.fs.setuser('test')
  231. def test_remove(self):
  232. prefix = self.prefix + '/test-delete'
  233. PATH_1 = '%s/1' % prefix
  234. PATH_2 = '%s/2' % prefix
  235. PATH_3 = '%s/3' % prefix
  236. self.cluster.fs.mkdir(prefix)
  237. self.cluster.fs.mkdir(PATH_1)
  238. self.cluster.fs.mkdir(PATH_2)
  239. self.cluster.fs.mkdir(PATH_3)
  240. assert_true(self.cluster.fs.exists(PATH_1))
  241. assert_true(self.cluster.fs.exists(PATH_2))
  242. assert_true(self.cluster.fs.exists(PATH_3))
  243. self.c.post('/filebrowser/rmtree', dict(path=[PATH_1]))
  244. assert_false(self.cluster.fs.exists(PATH_1))
  245. assert_true(self.cluster.fs.exists(PATH_2))
  246. assert_true(self.cluster.fs.exists(PATH_3))
  247. self.c.post('/filebrowser/rmtree', dict(path=[PATH_2, PATH_3]))
  248. assert_false(self.cluster.fs.exists(PATH_1))
  249. assert_false(self.cluster.fs.exists(PATH_2))
  250. assert_false(self.cluster.fs.exists(PATH_3))
  251. def test_move(self):
  252. prefix = self.cluster.fs_prefix + '/test-move'
  253. PATH_1 = '%s/1' % prefix
  254. PATH_2 = '%s/2' % prefix
  255. SUB_PATH1_1 = '%s/1' % PATH_1
  256. SUB_PATH1_2 = '%s/2' % PATH_1
  257. SUB_PATH1_3 = '%s/3' % PATH_1
  258. SUB_PATH2_1 = '%s/1' % PATH_2
  259. SUB_PATH2_2 = '%s/2' % PATH_2
  260. SUB_PATH2_3 = '%s/3' % PATH_2
  261. self.cluster.fs.mkdir(prefix)
  262. self.cluster.fs.mkdir(PATH_1)
  263. self.cluster.fs.mkdir(PATH_2)
  264. self.cluster.fs.mkdir(SUB_PATH1_1)
  265. self.cluster.fs.mkdir(SUB_PATH1_2)
  266. self.cluster.fs.mkdir(SUB_PATH1_3)
  267. assert_true(self.cluster.fs.exists(SUB_PATH1_1))
  268. assert_true(self.cluster.fs.exists(SUB_PATH1_2))
  269. assert_true(self.cluster.fs.exists(SUB_PATH1_3))
  270. assert_false(self.cluster.fs.exists(SUB_PATH2_1))
  271. assert_false(self.cluster.fs.exists(SUB_PATH2_2))
  272. assert_false(self.cluster.fs.exists(SUB_PATH2_3))
  273. self.c.post('/filebrowser/move', dict(src_path=[SUB_PATH1_1], dest_path=PATH_2))
  274. assert_false(self.cluster.fs.exists(SUB_PATH1_1))
  275. assert_true(self.cluster.fs.exists(SUB_PATH1_2))
  276. assert_true(self.cluster.fs.exists(SUB_PATH1_3))
  277. assert_true(self.cluster.fs.exists(SUB_PATH2_1))
  278. assert_false(self.cluster.fs.exists(SUB_PATH2_2))
  279. assert_false(self.cluster.fs.exists(SUB_PATH2_3))
  280. self.c.post('/filebrowser/move', dict(src_path=[SUB_PATH1_2, SUB_PATH1_3], dest_path=PATH_2))
  281. assert_false(self.cluster.fs.exists(SUB_PATH1_1))
  282. assert_false(self.cluster.fs.exists(SUB_PATH1_2))
  283. assert_false(self.cluster.fs.exists(SUB_PATH1_3))
  284. assert_true(self.cluster.fs.exists(SUB_PATH2_1))
  285. assert_true(self.cluster.fs.exists(SUB_PATH2_2))
  286. assert_true(self.cluster.fs.exists(SUB_PATH2_3))
  287. response = self.c.post('/filebrowser/move', dict(src_path=[SUB_PATH1_2, SUB_PATH1_3], dest_path=SUB_PATH1_2))
  288. assert_equal(500, response.status_code)
  289. def test_copy(self):
  290. prefix = self.cluster.fs_prefix + '/test-copy'
  291. PATH_1 = '%s/1' % prefix
  292. PATH_2 = '%s/2' % prefix
  293. SUB_PATH1_1 = '%s/1' % PATH_1
  294. SUB_PATH1_2 = '%s/2' % PATH_1
  295. SUB_PATH1_3 = '%s/3' % PATH_1
  296. SUB_PATH2_1 = '%s/1' % PATH_2
  297. SUB_PATH2_2 = '%s/2' % PATH_2
  298. SUB_PATH2_3 = '%s/3' % PATH_2
  299. self.cluster.fs.mkdir(prefix)
  300. self.cluster.fs.mkdir(PATH_1)
  301. self.cluster.fs.mkdir(PATH_2)
  302. self.cluster.fs.mkdir(SUB_PATH1_1)
  303. self.cluster.fs.mkdir(SUB_PATH1_2)
  304. self.cluster.fs.mkdir(SUB_PATH1_3)
  305. assert_true(self.cluster.fs.exists(SUB_PATH1_1))
  306. assert_true(self.cluster.fs.exists(SUB_PATH1_2))
  307. assert_true(self.cluster.fs.exists(SUB_PATH1_3))
  308. assert_false(self.cluster.fs.exists(SUB_PATH2_1))
  309. assert_false(self.cluster.fs.exists(SUB_PATH2_2))
  310. assert_false(self.cluster.fs.exists(SUB_PATH2_3))
  311. self.c.post('/filebrowser/copy', dict(src_path=[SUB_PATH1_1], dest_path=PATH_2))
  312. assert_true(self.cluster.fs.exists(SUB_PATH1_1))
  313. assert_true(self.cluster.fs.exists(SUB_PATH1_2))
  314. assert_true(self.cluster.fs.exists(SUB_PATH1_3))
  315. assert_true(self.cluster.fs.exists(SUB_PATH2_1))
  316. assert_false(self.cluster.fs.exists(SUB_PATH2_2))
  317. assert_false(self.cluster.fs.exists(SUB_PATH2_3))
  318. self.c.post('/filebrowser/copy', dict(src_path=[SUB_PATH1_2, SUB_PATH1_3], dest_path=PATH_2))
  319. assert_true(self.cluster.fs.exists(SUB_PATH1_1))
  320. assert_true(self.cluster.fs.exists(SUB_PATH1_2))
  321. assert_true(self.cluster.fs.exists(SUB_PATH1_3))
  322. assert_true(self.cluster.fs.exists(SUB_PATH2_1))
  323. assert_true(self.cluster.fs.exists(SUB_PATH2_2))
  324. assert_true(self.cluster.fs.exists(SUB_PATH2_3))
  325. def test_mkdir_singledir(self):
  326. prefix = self.cluster.fs_prefix + '/test-filebrowser-mkdir'
  327. # We test that mkdir fails when a non-relative path is provided and a multi-level path is provided.
  328. success_path = 'mkdir_singledir'
  329. path_absolute = '/mkdir_singledir'
  330. path_fail = 'fail/foo'
  331. path_other_failure = 'fail#bar'
  332. # Two of the following post requests should throw exceptions.
  333. # See https://issues.cloudera.org/browse/HUE-793.
  334. self.c.post('/filebrowser/mkdir', dict(path=prefix, name=path_fail))
  335. self.c.post('/filebrowser/mkdir', dict(path=prefix, name=path_other_failure))
  336. self.c.post('/filebrowser/mkdir', dict(path=prefix, name=path_absolute))
  337. self.c.post('/filebrowser/mkdir', dict(path=prefix, name=success_path))
  338. # Read the parent dir and make sure we created 'success_path' only.
  339. response = self.c.get('/filebrowser/view=' + prefix)
  340. dir_listing = response.context[0]['files']
  341. assert_equal(3, len(dir_listing))
  342. assert_equal(dir_listing[2]['name'], success_path)
  343. def test_touch(self):
  344. prefix = self.cluster.fs_prefix + '/test-filebrowser-touch'
  345. success_path = 'touch_file'
  346. path_absolute = '/touch_file'
  347. path_fail = 'touch_fail/file'
  348. self.cluster.fs.mkdir(prefix)
  349. resp = self.c.post('/filebrowser/touch', dict(path=prefix, name=path_fail))
  350. assert_equal(500, resp.status_code)
  351. resp = self.c.post('/filebrowser/touch', dict(path=prefix, name=path_absolute))
  352. assert_equal(500, resp.status_code)
  353. resp = self.c.post('/filebrowser/touch', dict(path=prefix, name=success_path))
  354. assert_equal(200, resp.status_code)
  355. # Read the parent dir and make sure we created 'success_path' only.
  356. response = self.c.get('/filebrowser/view=' + prefix)
  357. file_listing = response.context[0]['files']
  358. assert_equal(3, len(file_listing))
  359. assert_equal(file_listing[2]['name'], success_path)
  360. def test_chmod(self):
  361. prefix = self.cluster.fs_prefix + '/test_chmod'
  362. PATH = "%s/chmod_test" % prefix
  363. SUBPATH = PATH + '/test'
  364. self.cluster.fs.mkdir(SUBPATH)
  365. permissions = ('user_read', 'user_write', 'user_execute',
  366. 'group_read', 'group_write', 'group_execute',
  367. 'other_read', 'other_write', 'other_execute',
  368. 'sticky') # Order matters!
  369. # Get current mode, change mode, check mode
  370. # Start with checking current mode
  371. assert_not_equal(0o41777, int(self.cluster.fs.stats(PATH)["mode"]))
  372. # Setup post data
  373. permissions_dict = dict( list(zip(permissions, [True]*len(permissions))) )
  374. kwargs = {'path': [PATH]}
  375. kwargs.update(permissions_dict)
  376. # Set 1777, then check permissions of dirs
  377. response = self.c.post("/filebrowser/chmod", kwargs)
  378. assert_equal(0o41777, int(self.cluster.fs.stats(PATH)["mode"]))
  379. # Now do the above recursively
  380. assert_not_equal(0o41777, int(self.cluster.fs.stats(SUBPATH)["mode"]))
  381. kwargs['recursive'] = True
  382. response = self.c.post("/filebrowser/chmod", kwargs)
  383. assert_equal(0o41777, int(self.cluster.fs.stats(SUBPATH)["mode"]))
  384. # Test bulk chmod
  385. PATH_2 = "%s/test-chmod2" % prefix
  386. PATH_3 = "%s/test-chown3" % prefix
  387. self.cluster.fs.mkdir(PATH_2)
  388. self.cluster.fs.mkdir(PATH_3)
  389. kwargs['path'] = [PATH_2, PATH_3]
  390. assert_not_equal(0o41777, int(self.cluster.fs.stats(PATH_2)["mode"]))
  391. assert_not_equal(0o41777, int(self.cluster.fs.stats(PATH_3)["mode"]))
  392. self.c.post("/filebrowser/chmod", kwargs)
  393. assert_equal(0o41777, int(self.cluster.fs.stats(PATH_2)["mode"]))
  394. assert_equal(0o41777, int(self.cluster.fs.stats(PATH_3)["mode"]))
  395. def test_chmod_sticky(self):
  396. prefix = self.cluster.fs_prefix + '/test_chmod_sticky'
  397. PATH = "%s/chmod_test" % prefix
  398. self.cluster.fs.mkdir(PATH)
  399. # Get current mode and make sure sticky bit is off
  400. mode = expand_mode( int(self.cluster.fs.stats(PATH)["mode"]) )
  401. assert_equal(False, mode[-1])
  402. # Setup post data
  403. permissions = ('user_read', 'user_write', 'user_execute',
  404. 'group_read', 'group_write', 'group_execute',
  405. 'other_read', 'other_write', 'other_execute',
  406. 'sticky') # Order matters!
  407. permissions_dict = dict([x for x in zip(permissions, mode) if x[1]])
  408. permissions_dict['sticky'] = True
  409. kwargs = {'path': [PATH]}
  410. kwargs.update(permissions_dict)
  411. # Set sticky bit, then check sticky bit is on in hdfs
  412. response = self.c.post("/filebrowser/chmod", kwargs)
  413. mode = expand_mode( int(self.cluster.fs.stats(PATH)["mode"]) )
  414. assert_equal(True, mode[-1])
  415. # Unset sticky bit, then check sticky bit is off in hdfs
  416. del kwargs['sticky']
  417. response = self.c.post("/filebrowser/chmod", kwargs)
  418. mode = expand_mode( int(self.cluster.fs.stats(PATH)["mode"]) )
  419. assert_equal(False, mode[-1])
  420. def test_chown(self):
  421. prefix = self.cluster.fs_prefix + '/test_chown'
  422. self.cluster.fs.mkdir(prefix)
  423. # Login as Non Hadoop superuser
  424. response = self.c.post(reverse('index'))
  425. assert_false('Change owner' in response.content)
  426. # Only the Hadoop superuser really has carte blanche here
  427. c2 = make_logged_in_client(self.cluster.superuser)
  428. self.cluster.fs.setuser(self.cluster.superuser)
  429. PATH = u"%s/test-chown-en-Español" % prefix
  430. self.cluster.fs.mkdir(PATH)
  431. c2.post("/filebrowser/chown", dict(path=[PATH], user="x", group="y"))
  432. assert_equal("x", self.cluster.fs.stats(PATH)["user"])
  433. assert_equal("y", self.cluster.fs.stats(PATH)["group"])
  434. c2.post("/filebrowser/chown", dict(path=[PATH], user="__other__", user_other="z", group="y"))
  435. assert_equal("z", self.cluster.fs.stats(PATH)["user"])
  436. # Now check recursive
  437. SUBPATH = PATH + '/test'
  438. self.cluster.fs.mkdir(SUBPATH)
  439. c2.post("/filebrowser/chown", dict(path=[PATH], user="x", group="y", recursive=True))
  440. assert_equal("x", self.cluster.fs.stats(SUBPATH)["user"])
  441. assert_equal("y", self.cluster.fs.stats(SUBPATH)["group"])
  442. c2.post("/filebrowser/chown", dict(path=[PATH], user="__other__", user_other="z", group="y", recursive=True))
  443. assert_equal("z", self.cluster.fs.stats(SUBPATH)["user"])
  444. # Test bulk chown
  445. PATH_2 = u"/test-chown-en-Español2"
  446. PATH_3 = u"/test-chown-en-Español2"
  447. self.cluster.fs.mkdir(PATH_2)
  448. self.cluster.fs.mkdir(PATH_3)
  449. c2.post("/filebrowser/chown", dict(path=[PATH_2, PATH_3], user="x", group="y", recursive=True))
  450. assert_equal("x", self.cluster.fs.stats(PATH_2)["user"])
  451. assert_equal("y", self.cluster.fs.stats(PATH_2)["group"])
  452. assert_equal("x", self.cluster.fs.stats(PATH_3)["user"])
  453. assert_equal("y", self.cluster.fs.stats(PATH_3)["group"])
  454. def test_rename(self):
  455. prefix = self.cluster.fs_prefix + '/test_rename'
  456. self.cluster.fs.mkdir(prefix)
  457. PREFIX = u"%s/test-rename/" % prefix
  458. NAME = u"test-rename-before"
  459. NEW_NAME = u"test-rename-after"
  460. self.cluster.fs.mkdir(PREFIX + NAME)
  461. op = "rename"
  462. # test for full path rename
  463. self.c.post("/filebrowser/rename", dict(src_path=PREFIX + NAME, dest_path=PREFIX + NEW_NAME))
  464. assert_true(self.cluster.fs.exists(PREFIX + NEW_NAME))
  465. # test for smart rename
  466. self.c.post("/filebrowser/rename", dict(src_path=PREFIX + NAME, dest_path=NEW_NAME))
  467. assert_true(self.cluster.fs.exists(PREFIX + NEW_NAME))
  468. def test_listdir(self):
  469. # Delete user's home if there's already something there
  470. home = self.cluster.fs.do_as_user('test', self.cluster.fs.get_home_dir)
  471. if self.cluster.fs.exists(home):
  472. self.cluster.fs.do_as_superuser(self.cluster.fs.rmtree, home)
  473. response = self.c.get('/filebrowser/')
  474. # Since we deleted the home directory... home_directory context should be None.
  475. assert_false(response.context[0]['home_directory'], response.context[0]['home_directory'])
  476. self.cluster.fs.do_as_superuser(self.cluster.fs.mkdir, home)
  477. self.cluster.fs.do_as_superuser(self.cluster.fs.chown, home, 'test', 'test')
  478. # These paths contain non-ascii characters. Your editor will need the
  479. # corresponding font library to display them correctly.
  480. #
  481. # We test that mkdir can handle unicode strings as well as byte strings.
  482. # And even when the byte string can't be decoded properly (big5), the listdir
  483. # still succeeds.
  484. orig_paths = [
  485. u'greek-Ελληνικά',
  486. u'chinese-漢語',
  487. ]
  488. prefix = home + '/test-filebrowser/'
  489. for path in orig_paths:
  490. self.c.post('/filebrowser/mkdir', dict(path=prefix, name=path))
  491. # Read the parent dir
  492. response = self.c.get('/filebrowser/view=' + prefix)
  493. dir_listing = response.context[0]['files']
  494. assert_equal(len(orig_paths) + 2, len(dir_listing))
  495. for dirent in dir_listing:
  496. path = dirent['name']
  497. if path in ('.', '..'):
  498. continue
  499. assert_true(path in orig_paths)
  500. # Drill down into the subdirectory
  501. url = urllib.parse.urlsplit(dirent['url'])[2]
  502. resp = self.c.get(url)
  503. # We are actually reading a directory
  504. assert_equal('.', resp.context[0]['files'][1]['name'])
  505. assert_equal('..', resp.context[0]['files'][0]['name'])
  506. # Test's home directory now exists. Should be returned.
  507. response = self.c.get('/filebrowser/view=' + prefix)
  508. assert_equal(response.context[0]['home_directory'], home)
  509. # Test URL conflicts with filenames
  510. stat_dir = '%sstat/dir' % prefix
  511. self.cluster.fs.do_as_user('test', self.cluster.fs.mkdir, stat_dir)
  512. response = self.c.get('/filebrowser/view=%s' % stat_dir)
  513. assert_equal(stat_dir, response.context[0]['path'])
  514. response = self.c.get('/filebrowser/view=/test-filebrowser/?default_to_home')
  515. assert_true(re.search('%s$' % home, urllib_unquote(response['Location'])))
  516. # Test path relative to home directory
  517. self.cluster.fs.do_as_user('test', self.cluster.fs.mkdir, '%s/test_dir' % home)
  518. response = self.c.get('/filebrowser/home_relative_view=/test_dir')
  519. assert_equal('%s/test_dir' % home, response.context[0]['path'])
  520. def test_listdir_sort_and_filter(self):
  521. prefix = self.cluster.fs_prefix + '/test_rename'
  522. self.cluster.fs.mkdir(prefix)
  523. BASE = '%s/test_sort_and_filter' % prefix
  524. FUNNY_NAME = u'greek-Ελληνικά'
  525. self.cluster.fs.mkdir(BASE)
  526. # Create 10 files
  527. for i in range(1, 11):
  528. self.cluster.fs.create(self.cluster.fs.join(BASE, str(i)), data="foo" * i)
  529. # Create 1 funny name directory
  530. self.cluster.fs.mkdir(self.cluster.fs.join(BASE, FUNNY_NAME))
  531. # All 12 of the entries
  532. expect = [ '..', '.', FUNNY_NAME] + [ str(i) for i in range(1, 11) ]
  533. # Check pagination
  534. listing = self.c.get('/filebrowser/view=' + BASE + '?pagesize=20').context[0]['files']
  535. assert_equal(len(expect), len(listing))
  536. listing = self.c.get('/filebrowser/view=' + BASE + '?pagesize=10').context[0]['files']
  537. assert_equal(12, len(listing))
  538. listing = self.c.get('/filebrowser/view=' + BASE + '?pagesize=10&pagenum=1').context[0]['files']
  539. assert_equal(12, len(listing))
  540. listing = self.c.get('/filebrowser/view=' + BASE + '?pagesize=10&pagenum=2').context[0]['files']
  541. assert_equal(3, len(listing))
  542. # Check sorting (name)
  543. listing = self.c.get('/filebrowser/view=' + BASE + '?sortby=name').context[0]['files']
  544. assert_equal(sorted(expect[2:]), [ f['name'] for f in listing ][2:])
  545. listing = self.c.get('/filebrowser/view=' + BASE + '?sortby=name&descending=false').context[0]['files']
  546. assert_equal(sorted(expect[2:]), [ f['name'] for f in listing ][2:])
  547. listing = self.c.get('/filebrowser/view=' + BASE + '?sortby=name&descending=true').context[0]['files']
  548. assert_equal(".", listing[1]['name'])
  549. assert_equal("..", listing[0]['name'])
  550. assert_equal(FUNNY_NAME, listing[2]['name'])
  551. # Check sorting (size)
  552. listing = self.c.get('/filebrowser/view=' + BASE + '?sortby=size').context[0]['files']
  553. assert_equal(expect, [ f['name'] for f in listing ])
  554. # Check sorting (mtime)
  555. listing = self.c.get('/filebrowser/view=' + BASE + '?sortby=mtime').context[0]['files']
  556. assert_equal(".", listing[1]['name'])
  557. assert_equal("..", listing[0]['name'])
  558. assert_equal(FUNNY_NAME, listing[-1]['name'])
  559. # Check filter
  560. listing = self.c.get('/filebrowser/view=' + BASE + '?filter=1').context[0]['files']
  561. assert_equal(['..', '.', '1', '10'], [ f['name'] for f in listing ])
  562. listing = self.c.get('/filebrowser/view=' + BASE + '?filter=' + FUNNY_NAME).context[0]['files']
  563. assert_equal(['..', '.', FUNNY_NAME], [ f['name'] for f in listing ])
  564. # Check filter + sorting
  565. listing = self.c.get('/filebrowser/view=' + BASE + '?filter=1&sortby=name&descending=true').context[0]['files']
  566. assert_equal(['..', '.', '10', '1'], [ f['name'] for f in listing ])
  567. # Check filter + sorting + pagination
  568. listing = self.c.get('/filebrowser/view=' + BASE + '?filter=1&sortby=name&descending=true&pagesize=1&pagenum=2').context[0]['files']
  569. assert_equal(['..', '.', '1'], [ f['name'] for f in listing ])
  570. # Check filter with empty results
  571. resp = self.c.get('/filebrowser/view=' + BASE + '?filter=empty&sortby=name&descending=true&pagesize=1&pagenum=2')
  572. listing = resp.context[0]['files']
  573. assert_equal([], listing)
  574. page = resp.context[0]['page']
  575. assert_equal({}, page)
  576. def test_view_snappy_compressed(self):
  577. if not snappy_installed():
  578. raise SkipTest
  579. import snappy
  580. cluster = pseudo_hdfs4.shared_cluster()
  581. finish = []
  582. try:
  583. prefix = self.cluster.fs_prefix + '/test_view_snappy_compressed'
  584. self.cluster.fs.mkdir(prefix)
  585. f = cluster.fs.open(prefix + '/test-view.snappy', "w")
  586. f.write(snappy.compress('This is a test of the emergency broadcasting system.'))
  587. f.close()
  588. f = cluster.fs.open(prefix + '/test-view.stillsnappy', "w")
  589. f.write(snappy.compress('The broadcasters of your area in voluntary cooperation with the FCC and other authorities.'))
  590. f.close()
  591. f = cluster.fs.open(prefix + '/test-view.notsnappy', "w")
  592. f.write('foobar')
  593. f.close()
  594. # Snappy compressed fail
  595. response = self.c.get('/filebrowser/view=%s/test-view.notsnappy?compression=snappy' % prefix)
  596. assert_true('Failed to decompress' in response.context[0]['message'], response)
  597. # Snappy compressed succeed
  598. response = self.c.get('/filebrowser/view=%s/test-view.snappy' % prefix)
  599. assert_equal('snappy', response.context[0]['view']['compression'])
  600. assert_equal(response.context[0]['view']['contents'], 'This is a test of the emergency broadcasting system.', response)
  601. # Snappy compressed succeed
  602. response = self.c.get('/filebrowser/view=%s/test-view.stillsnappy' % prefix)
  603. assert_equal('snappy', response.context[0]['view']['compression'])
  604. assert_equal(response.context[0]['view']['contents'], 'The broadcasters of your area in voluntary cooperation with the FCC and other authorities.', response)
  605. # Largest snappy compressed file
  606. finish.append( MAX_SNAPPY_DECOMPRESSION_SIZE.set_for_testing(1) )
  607. response = self.c.get('/filebrowser/view=%s/test-view.stillsnappy?compression=snappy' % prefix)
  608. assert_true('File size is greater than allowed max snappy decompression size of 1' in response.context[0]['message'], response)
  609. finally:
  610. for done in finish:
  611. done()
  612. def test_view_snappy_compressed_avro(self):
  613. if not snappy_installed():
  614. raise SkipTest
  615. import snappy
  616. finish = []
  617. try:
  618. prefix = self.cluster.fs_prefix + '/test-snappy-avro-filebrowser'
  619. self.cluster.fs.mkdir(prefix)
  620. test_schema = schema.parse("""
  621. {
  622. "name": "test",
  623. "type": "record",
  624. "fields": [
  625. { "name": "name", "type": "string" },
  626. { "name": "integer", "type": "int" }
  627. ]
  628. }
  629. """)
  630. # Cannot use StringIO with datafile writer!
  631. f = self.cluster.fs.open(prefix +'/test-view.compressed.avro', "w")
  632. data_file_writer = datafile.DataFileWriter(f, io.DatumWriter(),
  633. writers_schema=test_schema,
  634. codec='snappy')
  635. dummy_datum = {
  636. 'name': 'Test',
  637. 'integer': 10,
  638. }
  639. data_file_writer.append(dummy_datum)
  640. data_file_writer.close()
  641. f.close()
  642. # Check to see if snappy is the codec
  643. f = self.cluster.fs.open(prefix + '/test-view.compressed.avro', "r")
  644. assert_true('snappy' in f.read())
  645. f.close()
  646. # Snappy compressed succeed
  647. response = self.c.get('/filebrowser/view=%s/test-view.compressed.avro' % prefix)
  648. assert_equal('avro', response.context[0]['view']['compression'])
  649. assert_equal(eval(response.context[0]['view']['contents']), dummy_datum, response)
  650. finally:
  651. for done in finish:
  652. done()
  653. def test_view_avro(self):
  654. prefix = self.cluster.fs_prefix + '/test_view_avro'
  655. self.cluster.fs.mkdir(prefix)
  656. test_schema = schema.parse("""
  657. {
  658. "name": "test",
  659. "type": "record",
  660. "fields": [
  661. { "name": "name", "type": "string" },
  662. { "name": "integer", "type": "int" }
  663. ]
  664. }
  665. """)
  666. f = self.cluster.fs.open(prefix + '/test-view.avro', "w")
  667. data_file_writer = datafile.DataFileWriter(f, io.DatumWriter(), writers_schema=test_schema, codec='deflate')
  668. dummy_datum = {
  669. 'name': 'Test',
  670. 'integer': 10,
  671. }
  672. data_file_writer.append(dummy_datum)
  673. data_file_writer.close()
  674. # autodetect
  675. response = self.c.get('/filebrowser/view=%s/test-view.avro' % prefix)
  676. # (Note: we use eval here cause of an incompatibility issue between
  677. # the representation string of JSON dicts in simplejson vs. json)
  678. assert_equal(eval(response.context[0]['view']['contents']), dummy_datum)
  679. # offsetting should work as well
  680. response = self.c.get('/filebrowser/view=%s/test-view.avro?offset=1' % prefix)
  681. assert_equal('avro', response.context[0]['view']['compression'])
  682. f = self.cluster.fs.open(prefix + '/test-view2.avro', "w")
  683. f.write("hello")
  684. f.close()
  685. # we shouldn't autodetect non avro files
  686. response = self.c.get('/filebrowser/view=%s/test-view2.avro' % prefix)
  687. assert_equal(response.context[0]['view']['contents'], "hello")
  688. # we should fail to do a bad thing if they specify compression when it's not set.
  689. response = self.c.get('/filebrowser/view=%s/test-view2.avro?compression=gzip' % prefix)
  690. assert_true('Failed to decompress' in response.context[0]['message'])
  691. def test_view_parquet(self):
  692. prefix = self.cluster.fs_prefix + '/test_view_parquet'
  693. self.cluster.fs.mkdir(prefix)
  694. # Parquet file encoded as hex.
  695. test_data = "50415231150015d40115d4012c15321500150615080000020000003201000000000100000002000000030000000400000005000000060000000700000008000000090000000a0000000b0000000c0000000d0000000e0000000f000000100000001100000012000000130000001400000015000000160000001700000018000000150015b60415b6042c1532150015061508000002000000320107000000414c474552494109000000415247454e54494e41060000004252415a494c0600000043414e41444105000000454759505408000000455448494f504941060000004652414e4345070000004745524d414e5905000000494e44494109000000494e444f4e45534941040000004952414e0400000049524151050000004a4150414e060000004a4f5244414e050000004b454e5941070000004d4f524f43434f0a0000004d4f5a414d42495155450400000050455255050000004348494e4107000000524f4d414e49410c00000053415544492041524142494107000000564945544e414d060000005255535349410e000000554e49544544204b494e47444f4d0d000000554e4954454420535441544553150015d40115d4012c1532150015061508000002000000320100000000010000000100000001000000040000000000000003000000030000000200000002000000040000000400000002000000040000000000000000000000000000000100000002000000030000000400000002000000030000000300000001000000150015d61e15d61e2c153215001506150800000200000032013300000020686167676c652e206361726566756c6c792066696e616c206465706f736974732064657465637420736c796c7920616761694c000000616c20666f7865732070726f6d69736520736c796c79206163636f7264696e6720746f2074686520726567756c6172206163636f756e74732e20626f6c6420726571756573747320616c6f6e6b0000007920616c6f6e6773696465206f66207468652070656e64696e67206465706f736974732e206361726566756c6c79207370656369616c207061636b61676573206172652061626f7574207468652069726f6e696320666f726765732e20736c796c79207370656369616c20650000006561732068616e672069726f6e69632c2073696c656e74207061636b616765732e20736c796c7920726567756c6172207061636b616765732061726520667572696f75736c79206f76657220746865207469746865732e20666c756666696c7920626f6c6463000000792061626f766520746865206361726566756c6c7920756e757375616c207468656f646f6c697465732e2066696e616c206475676f7574732061726520717569636b6c79206163726f73732074686520667572696f75736c7920726567756c617220641f00000076656e207061636b616765732077616b6520717569636b6c792e207265677526000000726566756c6c792066696e616c2072657175657374732e20726567756c61722c2069726f6e693a0000006c20706c6174656c6574732e20726567756c6172206163636f756e747320782d7261793a20756e757375616c2c20726567756c6172206163636f41000000737320657863757365732063616a6f6c6520736c796c79206163726f737320746865207061636b616765732e206465706f73697473207072696e742061726f756e7200000020736c796c792065787072657373206173796d70746f7465732e20726567756c6172206465706f7369747320686167676c6520736c796c792e206361726566756c6c792069726f6e696320686f636b657920706c617965727320736c65657020626c697468656c792e206361726566756c6c320000006566756c6c7920616c6f6e6773696465206f662074686520736c796c792066696e616c20646570656e64656e636965732e20420000006e6963206465706f7369747320626f6f73742061746f702074686520717569636b6c792066696e616c2072657175657374733f20717569636b6c7920726567756c61240000006f75736c792e2066696e616c2c20657870726573732067696674732063616a6f6c652061370000006963206465706f736974732061726520626c697468656c792061626f757420746865206361726566756c6c7920726567756c61722070615d0000002070656e64696e67206578637573657320686167676c6520667572696f75736c79206465706f736974732e2070656e64696e672c20657870726573732070696e746f206265616e732077616b6520666c756666696c79207061737420745a000000726e732e20626c697468656c7920626f6c6420636f7572747320616d6f6e672074686520636c6f73656c7920726567756c6172207061636b616765732075736520667572696f75736c7920626f6c6420706c6174656c6574733f2d000000732e2069726f6e69632c20756e757375616c206173796d70746f7465732077616b6520626c697468656c7920726a000000706c6174656c6574732e20626c697468656c792070656e64696e6720646570656e64656e636965732075736520666c756666696c79206163726f737320746865206576656e2070696e746f206265616e732e206361726566756c6c792073696c656e74206163636f756e5b0000006320646570656e64656e636965732e20667572696f75736c792065787072657373206e6f746f726e697320736c65657020736c796c7920726567756c6172206163636f756e74732e20696465617320736c6565702e206465706f736f000000756c6172206173796d70746f746573206172652061626f75742074686520667572696f7573206d756c7469706c696572732e206578707265737320646570656e64656e63696573206e61672061626f7665207468652069726f6e6963616c6c792069726f6e6963206163636f756e744e00000074732e2073696c656e7420726571756573747320686167676c652e20636c6f73656c792065787072657373207061636b6167657320736c656570206163726f73732074686520626c697468656c792e00000068656c7920656e746963696e676c792065787072657373206163636f756e74732e206576656e2c2066696e616c204f00000020726571756573747320616761696e73742074686520706c6174656c65747320757365206e65766572206163636f7264696e6720746f2074686520717569636b6c7920726567756c61722070696e743d00000065616e7320626f6f7374206361726566756c6c79207370656369616c2072657175657374732e206163636f756e7473206172652e206361726566756c6c6e000000792066696e616c207061636b616765732e20736c6f7720666f7865732063616a6f6c6520717569636b6c792e20717569636b6c792073696c656e7420706c6174656c657473206272656163682069726f6e6963206163636f756e74732e20756e757375616c2070696e746f2062651502195c48016d15080015022502180a6e6174696f6e5f6b657900150c250218046e616d650015022502180a726567696f6e5f6b657900150c2502180b636f6d6d656e745f636f6c001632191c194c26081c1502190519180a6e6174696f6e5f6b65791500163216fa0116fa01260800002682021c150c19051918046e616d651500163216dc0416dc04268202000026de061c1502190519180a726567696f6e5f6b65791500163216fa0116fa0126de06000026d8081c150c190519180b636f6d6d656e745f636f6c1500163216fc1e16fc1e26d80800001600163200280a706172717565742d6d7200ea00000050415231"
  696. f = self.cluster.fs.open(prefix + '/test-parquet.parquet', "w")
  697. f.write(test_data.decode('hex'))
  698. # autodetect
  699. response = self.c.get('/filebrowser/view=%s/test-parquet.parquet' % prefix)
  700. assert_true('FRANCE' in response.context[0]['view']['contents'])
  701. def test_view_parquet_snappy(self):
  702. if not snappy_installed():
  703. raise SkipTest
  704. prefix = self.cluster.fs_prefix + '/test_view_parquet_snappy'
  705. self.cluster.fs.mkdir(prefix)
  706. with open('apps/filebrowser/src/filebrowser/test_data/parquet-snappy.parquet') as f:
  707. hdfs = self.cluster.fs.open(prefix + '/test-parquet-snappy.parquet', "w")
  708. hdfs.write(f.read())
  709. # autodetect
  710. response = self.c.get('/filebrowser/view=%s/test-parquet-snappy.parquet' % prefix)
  711. assert_true('SR3_ndw_otlt_cmf_xref_INA' in response.context[0]['view']['contents'], response.context[0]['view']['contents'])
  712. def test_view_bz2(self):
  713. prefix = self.cluster.fs_prefix + '/test_view_bz2'
  714. self.cluster.fs.mkdir(prefix)
  715. # Bz2 file encoded as hex.
  716. test_data = "425a6839314159265359338bcfac000001018002000c00200021981984185dc914e14240ce2f3eb0"
  717. f = self.cluster.fs.open(prefix + '/test-view.bz2', "w")
  718. f.write(test_data.decode('hex'))
  719. # autodetect
  720. response = self.c.get('/filebrowser/view=%s/test-view.bz2?compression=bz2' % prefix)
  721. assert_true('test' in response.context[0]['view']['contents'])
  722. response = self.c.get('/filebrowser/view=%s/test-view.bz2' % prefix)
  723. assert_true('test' in response.context[0]['view']['contents'])
  724. def test_view_gz(self):
  725. prefix = self.cluster.fs_prefix + '/test_view_gz'
  726. self.cluster.fs.mkdir(prefix)
  727. f = self.cluster.fs.open(prefix + '/test-view.gz', "w")
  728. sdf_string = '\x1f\x8b\x08\x082r\xf4K\x00\x03f\x00+NI\xe3\x02\x00\xad\x96b\xc4\x04\x00\x00\x00'
  729. f.write(sdf_string)
  730. f.close()
  731. response = self.c.get('/filebrowser/view=%s/test-view.gz?compression=gzip' % prefix)
  732. assert_equal(response.context[0]['view']['contents'], "sdf\n")
  733. # autodetect
  734. response = self.c.get('/filebrowser/view=%s/test-view.gz' % prefix)
  735. assert_equal(response.context[0]['view']['contents'], "sdf\n")
  736. # ensure compression note is rendered
  737. assert_equal(response.context[0]['view']['compression'], "gzip")
  738. assert_true('Output rendered from compressed' in response.content, response.content)
  739. # offset should do nothing
  740. response = self.c.get('/filebrowser/view=%s/test-view.gz?compression=gzip&offset=1' % prefix)
  741. assert_true("Offsets are not supported" in response.context[0]['message'], response.context[0]['message'])
  742. f = self.cluster.fs.open(prefix + '/test-view2.gz', "w")
  743. f.write("hello")
  744. f.close()
  745. # we shouldn't autodetect non gzip files
  746. response = self.c.get('/filebrowser/view=%s/test-view2.gz' % prefix)
  747. assert_equal(response.context[0]['view']['contents'], "hello")
  748. # we should fail to do a bad thing if they specify compression when it's not set.
  749. response = self.c.get('/filebrowser/view=%s/test-view2.gz?compression=gzip' % prefix)
  750. assert_true("Failed to decompress" in response.context[0]['message'])
  751. def test_view_i18n(self):
  752. # Test viewing files in different encodings
  753. content = u'pt-Olá en-hello ch-你好 ko-안녕 ru-Здравствуйте'
  754. view_i18n_helper(self.c, self.cluster, 'utf-8', content)
  755. view_i18n_helper(self.c, self.cluster, 'utf-16', content)
  756. content = u'你好-big5'
  757. view_i18n_helper(self.c, self.cluster, 'big5', content)
  758. content = u'こんにちは-shift-jis'
  759. view_i18n_helper(self.c, self.cluster, 'shift_jis', content)
  760. content = u'안녕하세요-johab'
  761. view_i18n_helper(self.c, self.cluster, 'johab', content)
  762. # Test that the default view is home
  763. response = self.c.get('/filebrowser/view=/')
  764. assert_equal(response.context[0]['path'], '/')
  765. response = self.c.get('/filebrowser/view=/?default_to_home=1')
  766. assert_equal("/filebrowser/view=/user/test", urllib_unquote(response["location"]))
  767. def test_view_access(self):
  768. prefix = self.cluster.fs_prefix
  769. NO_PERM_DIR = prefix + '/test-no-perm'
  770. self.cluster.fs.mkdir(NO_PERM_DIR, mode='700')
  771. c_no_perm = make_logged_in_client(username='no_home')
  772. response = c_no_perm.get('/filebrowser/view=%s' % NO_PERM_DIR)
  773. assert_true('Cannot access' in response.context[0]['message'])
  774. response = self.c.get('/filebrowser/view=/test-does-not-exist')
  775. assert_true('Cannot access' in response.context[0]['message'])
  776. def test_index(self):
  777. HOME_DIR = '/user/test'
  778. NO_HOME_DIR = '/user/no_home'
  779. c_no_home = make_logged_in_client(username='no_home')
  780. if not self.cluster.fs.exists(HOME_DIR):
  781. self.cluster.fs.create_home_dir(HOME_DIR)
  782. assert_false(self.cluster.fs.exists(NO_HOME_DIR))
  783. response = self.c.get('/filebrowser', follow=True)
  784. assert_equal(HOME_DIR, response.context[0]['path'])
  785. assert_equal(HOME_DIR, response.context[0]['home_directory'])
  786. response = c_no_home.get('/filebrowser', follow=True)
  787. assert_equal('/', response.context[0]['path'])
  788. assert_equal(None, response.context[0]['home_directory'])
  789. def test_download(self):
  790. prefix = self.cluster.fs_prefix + '/test_download'
  791. self.cluster.fs.mkdir(prefix)
  792. f = self.cluster.fs.open(prefix + '/xss', "w")
  793. sdf_string = '''<html>
  794. <head>
  795. <title>Hello</title>
  796. <script>
  797. alert("XSS")
  798. </script>
  799. </head>
  800. <body>
  801. <h1>I am evil</h1>
  802. </body>
  803. </html>'''
  804. f.write(sdf_string)
  805. f.close()
  806. response = self.c.get('/filebrowser/download=%s/xss?disposition=inline' % prefix, follow=False) # The client does not support redirecting to another host. follow=False
  807. if response.status_code == 302: # Redirects to webhdfs
  808. assert_true(response.url.find('webhdfs') >= 0)
  809. else:
  810. assert_equal(200, response.status_code)
  811. assert_equal('attachment', response['Content-Disposition'])
  812. # Download fails and displays exception because of missing permissions
  813. self.cluster.fs.chmod(prefix + '/xss', 0o700)
  814. not_me = make_logged_in_client("not_me", is_superuser=False)
  815. grant_access("not_me", "not_me", "filebrowser")
  816. response = not_me.get('/filebrowser/download=%s/xss?disposition=inline' % prefix, follow=True)
  817. assert_true('User not_me is not authorized to download' in response.context[0]['message'], response.context[0]['message'])
  818. def test_edit_i18n(self):
  819. prefix = self.cluster.fs_prefix + '/test_view_gz'
  820. self.cluster.fs.mkdir(prefix)
  821. # Test utf-8
  822. pass_1 = u'en-hello pt-Olá ch-你好 ko-안녕 ru-Здравствуйте'
  823. pass_2 = pass_1 + u'yi-העלא'
  824. edit_i18n_helper(self.c, self.cluster, 'utf-8', pass_1, pass_2)
  825. # Test utf-16
  826. edit_i18n_helper(self.c, self.cluster, 'utf-16', pass_1, pass_2)
  827. # Test cjk
  828. pass_1 = u'big5-你好'
  829. pass_2 = pass_1 + u'世界'
  830. edit_i18n_helper(self.c, self.cluster, 'big5', pass_1, pass_2)
  831. pass_1 = u'shift_jis-こんにちは'
  832. pass_2 = pass_1 + u'世界'
  833. edit_i18n_helper(self.c, self.cluster, 'shift_jis', pass_1, pass_2)
  834. pass_1 = u'johab-안녕하세요'
  835. pass_2 = pass_1 + u'세상'
  836. edit_i18n_helper(self.c, self.cluster, 'johab', pass_1, pass_2)
  837. def test_upload_file(self):
  838. with tempfile.NamedTemporaryFile() as local_file:
  839. # Make sure we can upload larger than the UPLOAD chunk size
  840. file_size = UPLOAD_CHUNK_SIZE.get() * 2
  841. local_file.write('0' * file_size)
  842. local_file.flush()
  843. prefix = self.cluster.fs_prefix + '/test_upload_file'
  844. self.cluster.fs.mkdir(prefix)
  845. USER_NAME = 'test'
  846. HDFS_DEST_DIR = prefix + "/tmp/fb-upload-test"
  847. LOCAL_FILE = local_file.name
  848. HDFS_FILE = HDFS_DEST_DIR + '/' + os.path.basename(LOCAL_FILE)
  849. self.cluster.fs.do_as_superuser(self.cluster.fs.mkdir, HDFS_DEST_DIR)
  850. self.cluster.fs.do_as_superuser(self.cluster.fs.chown, HDFS_DEST_DIR, USER_NAME, USER_NAME)
  851. self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, HDFS_DEST_DIR, 0o700)
  852. stats = self.cluster.fs.stats(HDFS_DEST_DIR)
  853. assert_equal(stats['user'], USER_NAME)
  854. assert_equal(stats['group'], USER_NAME)
  855. # Just upload the current python file
  856. resp = self.c.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR, # GET param avoids infinite looping
  857. dict(dest=HDFS_DEST_DIR, hdfs_file=open_file(LOCAL_FILE)))
  858. response = json.loads(resp.content)
  859. assert_equal(0, response['status'], response)
  860. stats = self.cluster.fs.stats(HDFS_FILE)
  861. assert_equal(stats['user'], USER_NAME)
  862. assert_equal(stats['group'], USER_NAME)
  863. f = self.cluster.fs.open(HDFS_FILE)
  864. actual = f.read(file_size)
  865. expected = open_file(LOCAL_FILE).read()
  866. assert_equal(actual, expected, 'files do not match: %s != %s' % (len(actual), len(expected)))
  867. # Upload again and so fails because file already exits
  868. resp = self.c.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
  869. dict(dest=HDFS_DEST_DIR, hdfs_file=open_file(LOCAL_FILE)))
  870. response = json.loads(resp.content)
  871. assert_equal(-1, response['status'], response)
  872. assert_true('already exists' in response['data'], response)
  873. # Upload in / and fails because of missing permissions
  874. not_me = make_logged_in_client("not_me", is_superuser=False)
  875. grant_access("not_me", "not_me", "filebrowser")
  876. try:
  877. resp = not_me.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
  878. dict(dest=HDFS_DEST_DIR, hdfs_file=open_file(LOCAL_FILE)))
  879. response = json.loads(resp.content)
  880. assert_equal(-1, response['status'], response)
  881. assert_true('User not_me does not have permissions' in response['data'], response)
  882. except AttributeError:
  883. # Seems like a Django bug.
  884. # StopFutureHandlers() does not seem to work in test mode as it continues to MemoryFileUploadHandler after perm issue and so fails.
  885. pass
  886. def test_extract_zip(self):
  887. ENABLE_EXTRACT_UPLOADED_ARCHIVE.set_for_testing(True)
  888. prefix = self.cluster.fs_prefix + '/test_upload_zip'
  889. self.cluster.fs.mkdir(prefix)
  890. USER_NAME = 'test'
  891. HDFS_DEST_DIR = prefix + "/tmp/fb-upload-test"
  892. ZIP_FILE = os.path.realpath('apps/filebrowser/src/filebrowser/test_data/te st.zip')
  893. HDFS_ZIP_FILE = HDFS_DEST_DIR + '/te st.zip'
  894. try:
  895. self.cluster.fs.mkdir(HDFS_DEST_DIR)
  896. self.cluster.fs.chown(HDFS_DEST_DIR, USER_NAME)
  897. self.cluster.fs.chmod(HDFS_DEST_DIR, 0o700)
  898. # Upload archive
  899. resp = self.c.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
  900. dict(dest=HDFS_DEST_DIR, hdfs_file=open_file(ZIP_FILE)))
  901. response = json.loads(resp.content)
  902. assert_equal(0, response['status'], response)
  903. assert_true(self.cluster.fs.exists(HDFS_ZIP_FILE))
  904. resp = self.c.post('/filebrowser/extract_archive',
  905. dict(upload_path=HDFS_DEST_DIR, archive_name='te st.zip'))
  906. response = json.loads(resp.content)
  907. assert_equal(0, response['status'], response)
  908. assert_true('handle' in response and response['handle']['id'], response)
  909. finally:
  910. cleanup_file(self.cluster, HDFS_ZIP_FILE)
  911. def test_compress_hdfs_files(self):
  912. if not is_oozie_enabled():
  913. raise SkipTest
  914. def make_and_test_dir(pre, test_direct):
  915. test_dir = pre + "/" + test_direct
  916. test_file = test_dir + '/test.txt'
  917. self.cluster.fs.mkdir(test_dir)
  918. self.cluster.fs.chown(test_dir, 'test')
  919. self.cluster.fs.chmod(test_dir, 0o700)
  920. for i in range(3):
  921. f = self.cluster.fs.open(test_file + "%s" %i, "w")
  922. f.close()
  923. resp = self.c.post('/filebrowser/compress_files', {'upload_path': pre, 'files[]': [test_direct], 'archive_name': 'test_compress.zip'})
  924. response = json.loads(resp.content)
  925. assert_equal(0, response['status'], response)
  926. assert_true('handle' in response and response['handle']['id'], response)
  927. responseid = '"' + response['handle']['id'] + '"'
  928. timeout_time = time() + 25
  929. end_time = time()
  930. while timeout_time > end_time:
  931. resp2 = self.c.post('/jobbrowser/api/job/workflows', {'interface': '"workflows"', 'app_id': responseid})
  932. response2 = json.loads(resp2.content)
  933. if response2['app']['status'] != 'RUNNING':
  934. assert_equal(response2['app']['status'] , 'SUCCEEDED', response2)
  935. break
  936. sleep(3)
  937. end_time = time()
  938. assert_greater(timeout_time, end_time, response)
  939. ENABLE_EXTRACT_UPLOADED_ARCHIVE.set_for_testing(True)
  940. prefix = self.cluster.fs_prefix + '/test_compress_files'
  941. self.cluster.fs.mkdir(prefix)
  942. try:
  943. make_and_test_dir(prefix, 'testdir')
  944. make_and_test_dir(prefix, 'test dir1')
  945. #make_and_test_dir(prefix, 'test\ndir2')
  946. #make_and_test_dir(prefix, 'test\tdir3')
  947. finally:
  948. ENABLE_EXTRACT_UPLOADED_ARCHIVE.set_for_testing(False)
  949. cleanup_tree(self.cluster, prefix)
  950. def test_extract_tgz(self):
  951. ENABLE_EXTRACT_UPLOADED_ARCHIVE.set_for_testing(True)
  952. prefix = self.cluster.fs_prefix + '/test_upload_tgz'
  953. self.cluster.fs.mkdir(prefix)
  954. USER_NAME = 'test'
  955. HDFS_DEST_DIR = prefix + "/fb-upload-test"
  956. TGZ_FILE = os.path.realpath('apps/filebrowser/src/filebrowser/test_data/test.tar.gz')
  957. HDFS_TGZ_FILE = HDFS_DEST_DIR + '/test.tar.gz'
  958. self.cluster.fs.mkdir(HDFS_DEST_DIR)
  959. self.cluster.fs.chown(HDFS_DEST_DIR, USER_NAME)
  960. self.cluster.fs.chmod(HDFS_DEST_DIR, 0o700)
  961. try:
  962. # Upload archive
  963. resp = self.c.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
  964. dict(dest=HDFS_DEST_DIR, hdfs_file=open_file(TGZ_FILE)))
  965. response = json.loads(resp.content)
  966. assert_equal(0, response['status'], response)
  967. assert_true(self.cluster.fs.exists(HDFS_TGZ_FILE))
  968. resp = self.c.post('/filebrowser/extract_archive',
  969. dict(upload_path=HDFS_DEST_DIR, archive_name='test.tar.gz'))
  970. response = json.loads(resp.content)
  971. assert_equal(0, response['status'], response)
  972. assert_true('handle' in response and response['handle']['id'], response)
  973. finally:
  974. cleanup_file(self.cluster, HDFS_TGZ_FILE)
  975. def test_extract_bz2(self):
  976. ENABLE_EXTRACT_UPLOADED_ARCHIVE.set_for_testing(True)
  977. prefix = self.cluster.fs_prefix + '/test_upload_bz2'
  978. HDFS_DEST_DIR = prefix + "/fb-upload-test"
  979. BZ2_FILE = os.path.realpath('apps/filebrowser/src/filebrowser/test_data/test.txt.bz2')
  980. HDFS_BZ2_FILE = HDFS_DEST_DIR + '/test.txt.bz2'
  981. self.cluster.fs.mkdir(HDFS_DEST_DIR)
  982. try:
  983. # Upload archive
  984. resp = self.c.post('/filebrowser/upload/file?dest=%s' % HDFS_DEST_DIR,
  985. dict(dest=HDFS_DEST_DIR, hdfs_file=open_file(BZ2_FILE)))
  986. response = json.loads(resp.content)
  987. assert_equal(0, response['status'], response)
  988. assert_true(self.cluster.fs.exists(HDFS_BZ2_FILE))
  989. resp = self.c.post('/filebrowser/extract_archive',
  990. dict(upload_path=HDFS_DEST_DIR, archive_name='test.txt.bz2'))
  991. response = json.loads(resp.content)
  992. assert_equal(0, response['status'], response)
  993. assert_true('handle' in response and response['handle']['id'], response)
  994. finally:
  995. cleanup_file(self.cluster, HDFS_BZ2_FILE)
  996. def test_trash(self):
  997. prefix = self.cluster.fs_prefix + '/test_trash'
  998. self.cluster.fs.mkdir(prefix)
  999. USERNAME = 'test'
  1000. HOME_TRASH_DIR = '/user/%s/.Trash/Current/user/%s' % (USERNAME, USERNAME)
  1001. HOME_TRASH_DIR2 = '/user/%s/.Trash' % USERNAME
  1002. PATH_1 = '%s/1' % prefix
  1003. self.cluster.fs.mkdir(PATH_1)
  1004. self.c.post('/filebrowser/rmtree?skip_trash=true', dict(path=[HOME_TRASH_DIR]))
  1005. self.c.post('/filebrowser/rmtree?skip_trash=true', dict(path=[HOME_TRASH_DIR2]))
  1006. # No trash folder
  1007. response = self.c.get('/filebrowser/view=/user/test?default_to_trash', follow=True)
  1008. assert_equal([], response.redirect_chain)
  1009. self.c.post('/filebrowser/rmtree', dict(path=[PATH_1]))
  1010. # We have a trash folder so a redirect (Current not always there)
  1011. response = self.c.get('/filebrowser/view=/user/test?default_to_trash', follow=True)
  1012. assert_true(any(['.Trash' in page for page, code in response.redirect_chain]), response.redirect_chain)
  1013. self.c.post('/filebrowser/rmtree?skip_trash=true', dict(path=[HOME_TRASH_DIR]))
  1014. # No home trash, just regular root trash
  1015. response = self.c.get('/filebrowser/view=/user/test?default_to_trash', follow=True)
  1016. assert_true(any(['.Trash' in page for page, code in response.redirect_chain]), response.redirect_chain)
  1017. def view_i18n_helper(c, cluster, encoding, content):
  1018. """
  1019. Write the content in the given encoding directly into the filesystem.
  1020. Then try to view it and make sure the data is correct.
  1021. """
  1022. prefix = cluster.fs_prefix + '/test_view_i18n'
  1023. filename = prefix + u'/test-view-carácter-internacional'
  1024. bytestring = content.encode(encoding)
  1025. try:
  1026. f = cluster.fs.open(filename, "w")
  1027. f.write(bytestring)
  1028. f.close()
  1029. response = c.get('/filebrowser/view=%s?encoding=%s' % (filename, encoding))
  1030. assert_equal(response.context[0]['view']['contents'], content)
  1031. response = c.get('/filebrowser/view=%s?encoding=%s&end=8&begin=1' % (filename, encoding))
  1032. assert_equal(response.context[0]['view']['contents'],
  1033. str(bytestring[0:8], encoding, errors='replace'))
  1034. finally:
  1035. cleanup_file(cluster, filename)
  1036. def edit_i18n_helper(c, cluster, encoding, contents_pass_1, contents_pass_2):
  1037. """
  1038. Put the content into the file with a specific encoding.
  1039. """
  1040. prefix = cluster.fs_prefix + '/test_edit_i18n'
  1041. # This path is non-normalized to test normalization too
  1042. filename = prefix + u'//test-filebrowser//./test-edit-carácter-internacional with space and () en-hello pt-Olá ch-你好 ko-안녕 ru-Здравствуйте'
  1043. # File doesn't exist - should be empty
  1044. edit_url = '/filebrowser/edit=' + filename
  1045. response = c.get(edit_url)
  1046. assert_equal(response.context[0]['form'].data['path'], filename)
  1047. assert_equal(response.context[0]['form'].data['contents'], "")
  1048. # Just going to the edit page and not hitting save should not
  1049. # create the file
  1050. assert_false(cluster.fs.exists(filename))
  1051. try:
  1052. # Put some data in there and post
  1053. response = c.post("/filebrowser/save", dict(
  1054. path=filename,
  1055. contents=contents_pass_1,
  1056. encoding=encoding), follow=True)
  1057. assert_equal(response.context[0]['form'].data['path'], filename)
  1058. assert_equal(response.context[0]['form'].data['contents'], contents_pass_1)
  1059. # File should now exist
  1060. assert_true(cluster.fs.exists(filename))
  1061. # And its contents should be what we expect
  1062. f = cluster.fs.open(filename)
  1063. assert_equal(f.read(), contents_pass_1.encode(encoding))
  1064. assert_false('\r\n' in f.read()) # No CRLF line terminators
  1065. f.close()
  1066. # We should be able to overwrite the file with another save
  1067. response = c.post("/filebrowser/save", dict(
  1068. path=filename,
  1069. contents=contents_pass_2,
  1070. encoding=encoding), follow=True)
  1071. assert_equal(response.context[0]['form'].data['path'], filename)
  1072. assert_equal(response.context[0]['form'].data['contents'], contents_pass_2)
  1073. f = cluster.fs.open(filename)
  1074. assert_equal(f.read(), contents_pass_2.encode(encoding))
  1075. assert_false('\r\n' in f.read()) # No CRLF line terminators
  1076. f.close()
  1077. # TODO(todd) add test for maintaining ownership/permissions
  1078. finally:
  1079. cleanup_file(cluster, filename)
  1080. def test_location_to_url():
  1081. prefix = '/filebrowser/view='
  1082. assert_equal(prefix + '/var/lib/hadoop-hdfs', location_to_url('/var/lib/hadoop-hdfs', False))
  1083. assert_equal(prefix + '/var/lib/hadoop-hdfs', location_to_url('hdfs://localhost:8020/var/lib/hadoop-hdfs'))
  1084. assert_equal('/hue' + prefix + '/var/lib/hadoop-hdfs', location_to_url('hdfs://localhost:8020/var/lib/hadoop-hdfs', False, True))
  1085. assert_equal(prefix + '/', location_to_url('hdfs://localhost:8020'))
  1086. assert_equal(prefix + 's3a://bucket/key', location_to_url('s3a://bucket/key'))
  1087. class TestS3AccessPermissions(object):
  1088. def setUp(self):
  1089. self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
  1090. grant_access('test', 'test', 'filebrowser')
  1091. add_to_group('test')
  1092. self.user = User.objects.get(username="test")
  1093. def test_no_default_permissions(self):
  1094. response = self.client.get('/filebrowser/view=S3A://')
  1095. assert_equal(500, response.status_code)
  1096. response = self.client.get('/filebrowser/view=S3A://bucket')
  1097. assert_equal(500, response.status_code)
  1098. response = self.client.get('/filebrowser/view=s3a://bucket')
  1099. assert_equal(500, response.status_code)
  1100. response = self.client.get('/filebrowser/view=S3A://bucket/hue')
  1101. assert_equal(500, response.status_code)
  1102. response = self.client.post('/filebrowser/rmtree', dict(path=['S3A://bucket/hue']))
  1103. assert_equal(500, response.status_code)
  1104. # 500 for real currently
  1105. assert_raises(IOError, self.client.get, '/filebrowser/edit=S3A://bucket/hue')
  1106. # 500 for real currently
  1107. # with tempfile.NamedTemporaryFile() as local_file: # Flaky
  1108. # DEST_DIR = 'S3A://bucket/hue'
  1109. # LOCAL_FILE = local_file.name
  1110. # assert_raises(S3FileSystemException, self.client.post, '/filebrowser/upload/file?dest=%s' % DEST_DIR, dict(dest=DEST_DIR, hdfs_file=file(LOCAL_FILE)))
  1111. def test_has_default_permissions(self):
  1112. if not get_test_bucket():
  1113. raise SkipTest
  1114. add_permission(self.user.username, 'has_s3', permname='s3_access', appname='filebrowser')
  1115. try:
  1116. response = self.client.get('/filebrowser/view=S3A://')
  1117. assert_equal(200, response.status_code)
  1118. finally:
  1119. remove_from_group(self.user.username, 'has_s3')
  1120. class TestABFSAccessPermissions(object):
  1121. def setUp(self):
  1122. if not is_abfs_enabled():
  1123. raise SkipTest
  1124. self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
  1125. grant_access('test', 'test', 'filebrowser')
  1126. add_to_group('test')
  1127. self.user = User.objects.get(username="test")
  1128. def test_no_default_permissions(self):
  1129. response = self.client.get('/filebrowser/view=ABFS://')
  1130. assert_equal(500, response.status_code)
  1131. # 500 for real currently
  1132. # with tempfile.NamedTemporaryFile() as local_file: # Flaky
  1133. # DEST_DIR = 'S3A://bucket/hue'
  1134. # LOCAL_FILE = local_file.name
  1135. # assert_raises(S3FileSystemException, self.client.post, '/filebrowser/upload/file?dest=%s' % DEST_DIR, dict(dest=DEST_DIR, hdfs_file=file(LOCAL_FILE)))
  1136. def test_has_default_permissions(self):
  1137. add_permission(self.user.username, 'has_abfs', permname='abfs_access', appname='filebrowser')
  1138. try:
  1139. response = self.client.get('/filebrowser/view=ABFS://')
  1140. assert_equal(200, response.status_code)
  1141. finally:
  1142. remove_from_group(self.user.username, 'has_abfs')
  1143. class TestADLSAccessPermissions(object):
  1144. def setUp(self):
  1145. if not is_adls_enabled():
  1146. raise SkipTest
  1147. self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
  1148. grant_access('test', 'test', 'filebrowser')
  1149. add_to_group('test')
  1150. self.user = User.objects.get(username="test")
  1151. def test_no_default_permissions(self):
  1152. response = self.client.get('/filebrowser/view=ADL://')
  1153. assert_equal(500, response.status_code)
  1154. response = self.client.get('/filebrowser/view=ADL://hue_adls_testing')
  1155. assert_equal(500, response.status_code)
  1156. response = self.client.get('/filebrowser/view=adl://hue_adls_testing')
  1157. assert_equal(500, response.status_code)
  1158. response = self.client.get('/filebrowser/view=ADL://hue_adls_testing/ADLS_tables')
  1159. assert_equal(500, response.status_code)
  1160. response = self.client.post('/filebrowser/rmtree', dict(path=['ADL://hue-test-01']))
  1161. assert_equal(500, response.status_code)
  1162. # 500 for real currently
  1163. assert_raises(IOError, self.client.get, '/filebrowser/edit=ADL://hue-test-01')
  1164. # 500 for real currently
  1165. # with tempfile.NamedTemporaryFile() as local_file: # Flaky
  1166. # DEST_DIR = 'S3A://bucket/hue'
  1167. # LOCAL_FILE = local_file.name
  1168. # assert_raises(S3FileSystemException, self.client.post, '/filebrowser/upload/file?dest=%s' % DEST_DIR, dict(dest=DEST_DIR, hdfs_file=file(LOCAL_FILE)))
  1169. def test_has_default_permissions(self):
  1170. add_permission(self.user.username, 'has_adls', permname='adls_access', appname='filebrowser')
  1171. try:
  1172. response = self.client.get('/filebrowser/view=ADL://')
  1173. assert_equal(200, response.status_code)
  1174. finally:
  1175. remove_from_group(self.user.username, 'has_adls')