views.py 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502
  1. #!/usr/bin/env python
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. from future import standard_library
  18. standard_library.install_aliases()
  19. from builtins import object
  20. import errno
  21. import logging
  22. import mimetypes
  23. import operator
  24. import os
  25. import posixpath
  26. import re
  27. import stat as stat_module
  28. import sys
  29. import urllib.request, urllib.error
  30. from bz2 import decompress
  31. from datetime import datetime
  32. from django.core.paginator import EmptyPage, Paginator, Page, InvalidPage
  33. from django.urls import reverse
  34. from django.template.defaultfilters import stringformat, filesizeformat
  35. from django.http import Http404, StreamingHttpResponse, HttpResponseNotModified,\
  36. HttpResponseForbidden, HttpResponse, HttpResponseRedirect
  37. from django.views.decorators.http import require_http_methods
  38. from django.views.static import was_modified_since
  39. from django.shortcuts import redirect
  40. from functools import partial
  41. from django.utils.http import http_date
  42. from django.utils.html import escape
  43. from django.utils.translation import ugettext as _
  44. from aws.s3.s3fs import S3FileSystemException, S3ListAllBucketsException
  45. from desktop import appmanager
  46. from desktop.auth.backend import is_admin
  47. from desktop.lib import i18n
  48. from desktop.lib.conf import coerce_bool
  49. from desktop.lib.django_util import render, format_preserving_redirect
  50. from desktop.lib.django_util import JsonResponse
  51. from desktop.lib.export_csvxls import file_reader
  52. from desktop.lib.exceptions_renderable import PopupException
  53. from desktop.lib.fs import splitpath
  54. from desktop.lib.i18n import smart_str
  55. from desktop.lib.paths import SAFE_CHARACTERS_URI, SAFE_CHARACTERS_URI_COMPONENTS
  56. from desktop.lib.tasks.compress_files.compress_utils import compress_files_in_hdfs
  57. from desktop.lib.tasks.extract_archive.extract_utils import extract_archive_in_hdfs
  58. from desktop.views import serve_403_error
  59. from hadoop.core_site import get_trash_interval
  60. from hadoop.fs.hadoopfs import Hdfs
  61. from hadoop.fs.exceptions import WebHdfsException
  62. from hadoop.fs.fsutils import do_overwrite_save
  63. from useradmin.models import User, Group
  64. from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE, MAX_SNAPPY_DECOMPRESSION_SIZE,\
  65. SHOW_DOWNLOAD_BUTTON, SHOW_UPLOAD_BUTTON, REDIRECT_DOWNLOAD
  66. from filebrowser.lib.archives import archive_factory
  67. from filebrowser.lib.rwx import filetype, rwx
  68. from filebrowser.lib import xxd
  69. from filebrowser.forms import RenameForm, UploadFileForm, UploadArchiveForm, MkDirForm, EditorForm, TouchForm,\
  70. RenameFormSet, RmTreeFormSet, ChmodFormSet, ChownFormSet, CopyFormSet, RestoreFormSet,\
  71. TrashPurgeForm, SetReplicationFactorForm
  72. if sys.version_info[0] > 2:
  73. import io
  74. from io import StringIO as string_io
  75. from urllib.parse import quote as urllib_quote
  76. from urllib.parse import unquote as urllib_unquote
  77. from urllib.parse import urlparse as lib_urlparse
  78. from builtins import str as new_str
  79. from avro import datafile, io
  80. from gzip import decompress as decompress_gzip
  81. else:
  82. from cStringIO import StringIO as string_io
  83. from urllib import quote as urllib_quote
  84. from urllib import unquote as urllib_unquote
  85. from urlparse import urlparse as lib_urlparse
  86. new_str = unicode
  87. import parquet
  88. from avro import datafile, io
  89. from gzip import GzipFile
  90. DEFAULT_CHUNK_SIZE_BYTES = 1024 * 4 # 4KB
  91. MAX_CHUNK_SIZE_BYTES = 1024 * 1024 # 1MB
  92. # Defaults for "xxd"-style output.
  93. # Sentences refer to groups of bytes printed together, within a line.
  94. BYTES_PER_LINE = 16
  95. BYTES_PER_SENTENCE = 2
  96. # The maximum size the file editor will allow you to edit
  97. MAX_FILEEDITOR_SIZE = 256 * 1024
  98. INLINE_DISPLAY_MIMETYPE = re.compile(
  99. 'video/|image/|audio/|application/pdf|application/msword|application/excel|'
  100. 'application/vnd\.ms|'
  101. 'application/vnd\.openxmlformats'
  102. )
  103. INLINE_DISPLAY_MIMETYPE_EXCEPTIONS = re.compile('image/svg\+xml')
  104. logger = logging.getLogger(__name__)
  105. class ParquetOptions(object):
  106. def __init__(self, col=None, format='json', no_headers=True, limit=-1):
  107. self.col = col
  108. self.format = format
  109. self.no_headers = no_headers
  110. self.limit = limit
  111. def index(request):
  112. # Redirect to home directory by default
  113. path = request.user.get_home_directory()
  114. try:
  115. if not request.fs.isdir(path):
  116. path = '/'
  117. except Exception:
  118. pass
  119. return view(request, path)
  120. def download(request, path):
  121. """
  122. Downloads a file.
  123. This is inspired by django.views.static.serve.
  124. ?disposition={attachment, inline}
  125. """
  126. # check if protocol missing / and add it back
  127. if path.startswith('abfs:/') and not path.startswith('abfs://'):
  128. path = path.replace('abfs:/', 'abfs://')
  129. if path.startswith('s3a:/') and not path.startswith('s3a://'):
  130. path = path.replace('s3a:/', 's3a://')
  131. decoded_path = urllib_unquote(path)
  132. if path != decoded_path:
  133. path = decoded_path
  134. if not SHOW_DOWNLOAD_BUTTON.get():
  135. return serve_403_error(request)
  136. if not request.fs.exists(path):
  137. raise Http404(_("File not found: %(path)s.") % {'path': escape(path)})
  138. if not request.fs.isfile(path):
  139. raise PopupException(_("'%(path)s' is not a file.") % {'path': path})
  140. content_type = mimetypes.guess_type(path)[0] or 'application/octet-stream'
  141. stats = request.fs.stats(path)
  142. mtime = stats['mtime']
  143. size = stats['size']
  144. if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), mtime, size):
  145. return HttpResponseNotModified()
  146. # TODO(philip): Ideally a with statement would protect from leaks, but tricky to do here.
  147. fh = request.fs.open(path)
  148. # Verify read permissions on file first
  149. try:
  150. request.fs.read(path, offset=0, length=1)
  151. except WebHdfsException as e:
  152. if e.code == 403:
  153. raise PopupException(_('User %s is not authorized to download file at path "%s"') %
  154. (request.user.username, path))
  155. else:
  156. raise PopupException(_('Failed to download file at path "%s": %s') % (path, e))
  157. if REDIRECT_DOWNLOAD.get() and hasattr(fh, 'read_url'):
  158. response = HttpResponseRedirect(fh.read_url())
  159. setattr(response, 'redirect_override', True)
  160. else:
  161. response = StreamingHttpResponse(file_reader(fh), content_type=content_type)
  162. response["Last-Modified"] = http_date(stats['mtime'])
  163. response["Content-Length"] = stats['size']
  164. response['Content-Disposition'] = request.GET.get('disposition', 'attachment; filename="' + stats['name'] + '"') \
  165. if _can_inline_display(path) \
  166. else 'attachment'
  167. request.audit = {
  168. 'operation': 'DOWNLOAD',
  169. 'operationText': 'User %s downloaded file %s with size: %d bytes' % (request.user.username, path, stats['size']),
  170. 'allowed': True
  171. }
  172. return response
  173. def view(request, path):
  174. """Dispatches viewing of a path to either index() or fileview(), depending on type."""
  175. decoded_path = unquote_url(path)
  176. if path != decoded_path:
  177. path = decoded_path
  178. # default_abfs_home is set in jquery.filechooser.js
  179. if 'default_abfs_home' in request.GET:
  180. from azure.abfs.__init__ import get_home_dir_for_ABFS
  181. home_dir_path = get_home_dir_for_ABFS()
  182. if request.fs.isdir(home_dir_path):
  183. return format_preserving_redirect(
  184. request,
  185. '/filebrowser/view=' + urllib_quote(home_dir_path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
  186. )
  187. # default_to_home is set in jquery.filechooser.js
  188. if 'default_to_home' in request.GET:
  189. home_dir_path = request.user.get_home_directory()
  190. if request.fs.isdir(home_dir_path):
  191. return format_preserving_redirect(
  192. request,
  193. '/filebrowser/view=' + urllib_quote(home_dir_path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
  194. )
  195. # default_to_home is set in jquery.filechooser.js
  196. if 'default_to_trash' in request.GET:
  197. home_trash_path = _home_trash_path(request.fs, request.user, path)
  198. if request.fs.isdir(home_trash_path):
  199. return format_preserving_redirect(
  200. request,
  201. '/filebrowser/view=' + urllib_quote(home_trash_path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
  202. )
  203. trash_path = request.fs.trash_path(path)
  204. if request.fs.isdir(trash_path):
  205. return format_preserving_redirect(
  206. request,
  207. '/filebrowser/view=' + urllib_quote(trash_path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
  208. )
  209. try:
  210. stats = request.fs.stats(path)
  211. if stats.isDir:
  212. return listdir_paged(request, path)
  213. else:
  214. return display(request, path)
  215. except S3FileSystemException as e:
  216. msg = _("S3 filesystem exception.")
  217. if request.is_ajax():
  218. exception = {
  219. 'error': smart_str(e)
  220. }
  221. return JsonResponse(exception)
  222. else:
  223. raise PopupException(msg, detail=e)
  224. except (IOError, WebHdfsException) as e:
  225. msg = _("Cannot access: %(path)s. ") % {'path': escape(path)}
  226. if "Connection refused" in str(e):
  227. msg += _(" The HDFS REST service is not available. ")
  228. if request.is_ajax():
  229. exception = {
  230. 'error': msg
  231. }
  232. return JsonResponse(exception)
  233. else:
  234. raise PopupException(msg, detail=e)
  235. def _home_trash_path(fs, user, path):
  236. return fs.join(fs.trash_path(path), 'Current', user.get_home_directory()[1:])
  237. def home_relative_view(request, path):
  238. decoded_path = urllib_unquote(path)
  239. if path != decoded_path:
  240. path = decoded_path
  241. home_dir_path = request.user.get_home_directory()
  242. if request.fs.exists(home_dir_path):
  243. path = '%s%s' % (home_dir_path, path)
  244. return view(request, path)
  245. def edit(request, path, form=None):
  246. """Shows an edit form for the given path. Path does not necessarily have to exist."""
  247. decoded_path = unquote_url(path)
  248. if path != decoded_path:
  249. path = decoded_path
  250. try:
  251. stats = request.fs.stats(path)
  252. except IOError as ioe:
  253. # A file not found is OK, otherwise re-raise
  254. if ioe.errno == errno.ENOENT:
  255. stats = None
  256. else:
  257. raise
  258. # Can't edit a directory
  259. if stats and stats['mode'] & stat_module.S_IFDIR:
  260. raise PopupException(_("Cannot edit a directory: %(path)s") % {'path': path})
  261. # Maximum size of edit
  262. if stats and stats['size'] > MAX_FILEEDITOR_SIZE:
  263. raise PopupException(_("File too big to edit: %(path)s") % {'path': path})
  264. if not form:
  265. encoding = request.GET.get('encoding') or i18n.get_site_encoding()
  266. if stats:
  267. f = request.fs.open(path)
  268. try:
  269. try:
  270. current_contents = new_str(f.read(), encoding)
  271. except UnicodeDecodeError:
  272. raise PopupException(_("File is not encoded in %(encoding)s; cannot be edited: %(path)s.") % {'encoding': encoding, 'path': path})
  273. finally:
  274. f.close()
  275. else:
  276. current_contents = u""
  277. form = EditorForm(dict(path=path, contents=current_contents, encoding=encoding))
  278. data = dict(
  279. exists=(stats is not None),
  280. path=path,
  281. filename=os.path.basename(path),
  282. dirname=os.path.dirname(path),
  283. breadcrumbs=parse_breadcrumbs(path),
  284. is_embeddable=request.GET.get('is_embeddable', False),
  285. show_download_button=SHOW_DOWNLOAD_BUTTON.get())
  286. if not request.is_ajax():
  287. data['stats'] = stats;
  288. data['form'] = form;
  289. return render("edit.mako", request, data)
  290. def save_file(request):
  291. """
  292. The POST endpoint to save a file in the file editor.
  293. Does the save and then redirects back to the edit page.
  294. """
  295. form = EditorForm(request.POST)
  296. is_valid = form.is_valid()
  297. path = form.cleaned_data.get('path')
  298. decoded_path = unquote_url(path)
  299. if path != decoded_path:
  300. path = decoded_path
  301. if request.POST.get('save') == "Save As":
  302. if not is_valid:
  303. return edit(request, path, form=form)
  304. else:
  305. return render("saveas.mako", request, {'form': form})
  306. if not path:
  307. raise PopupException(_("No path specified"))
  308. if not is_valid:
  309. return edit(request, path, form=form)
  310. encoding = form.cleaned_data['encoding']
  311. data = form.cleaned_data['contents'].encode(encoding)
  312. try:
  313. if request.fs.exists(path):
  314. do_overwrite_save(request.fs, path, data)
  315. else:
  316. request.fs.create(path, overwrite=False, data=data)
  317. except WebHdfsException as e:
  318. raise PopupException(_("The file could not be saved"), detail=e.message.splitlines()[0])
  319. except Exception as e:
  320. raise PopupException(_("The file could not be saved"), detail=e)
  321. request.path = reverse("filebrowser:filebrowser_views_edit", kwargs=dict(path=path))
  322. return edit(request, path, form)
  323. def parse_breadcrumbs(path):
  324. parts = splitpath(path)
  325. url, breadcrumbs = '', []
  326. for part in parts:
  327. if url and not url.endswith('/'):
  328. url += '/'
  329. url += part
  330. breadcrumbs.append({'url': urllib_quote(url.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS), 'label': part})
  331. return breadcrumbs
  332. def listdir(request, path):
  333. """
  334. Implements directory listing (or index).
  335. Intended to be called via view().
  336. """
  337. decoded_path = urllib_unquote(path)
  338. if path != decoded_path:
  339. path = decoded_path
  340. if not request.fs.isdir(path):
  341. raise PopupException(_("Not a directory: %(path)s") % {'path': path})
  342. file_filter = request.GET.get('file_filter', 'any')
  343. assert file_filter in ['any', 'file', 'dir']
  344. home_dir_path = request.user.get_home_directory()
  345. breadcrumbs = parse_breadcrumbs(path)
  346. data = {
  347. 'path': path,
  348. 'file_filter': file_filter,
  349. 'breadcrumbs': breadcrumbs,
  350. 'current_dir_path': urllib_quote(path.encode('utf-8'), safe=SAFE_CHARACTERS_URI),
  351. 'current_request_path': '/filebrowser/view=' + urllib_quote(path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS),
  352. 'home_directory': home_dir_path if home_dir_path and request.fs.isdir(home_dir_path) else None,
  353. 'cwd_set': True,
  354. 'is_superuser': request.user.username == request.fs.superuser,
  355. 'groups': request.user.username == request.fs.superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [],
  356. 'users': request.user.username == request.fs.superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [],
  357. 'superuser': request.fs.superuser,
  358. 'show_upload': (request.GET.get('show_upload') == 'false' and (False,) or (True,))[0],
  359. 'show_download_button': SHOW_DOWNLOAD_BUTTON.get(),
  360. 'show_upload_button': SHOW_UPLOAD_BUTTON.get(),
  361. 'is_embeddable': request.GET.get('is_embeddable', False),
  362. }
  363. stats = request.fs.listdir_stats(path)
  364. # Include parent dir, unless at filesystem root.
  365. if not request.fs.isroot(path):
  366. parent_path = request.fs.parent_path(path)
  367. parent_stat = request.fs.stats(parent_path)
  368. # The 'path' field would be absolute, but we want its basename to be
  369. # actually '..' for display purposes. Encode it since _massage_stats expects byte strings.
  370. parent_stat['path'] = parent_path
  371. stats.insert(0, parent_stat)
  372. data['files'] = [_massage_stats(request, stat_absolute_path(path, stat)) for stat in stats]
  373. return render('listdir.mako', request, data)
  374. def _massage_page(page, paginator):
  375. try:
  376. prev_num = page.previous_page_number()
  377. except InvalidPage:
  378. prev_num = 0
  379. try:
  380. next_num = page.next_page_number()
  381. except InvalidPage:
  382. next_num = 0
  383. return {
  384. 'number': page.number,
  385. 'num_pages': paginator.num_pages,
  386. 'previous_page_number': prev_num,
  387. 'next_page_number': next_num,
  388. 'start_index': page.start_index(),
  389. 'end_index': page.end_index(),
  390. 'total_count': paginator.count
  391. }
  392. def listdir_paged(request, path):
  393. """
  394. A paginated version of listdir.
  395. Query parameters:
  396. pagenum - The page number to show. Defaults to 1.
  397. pagesize - How many to show on a page. Defaults to 15.
  398. sortby=? - Specify attribute to sort by. Accepts:
  399. (type, name, atime, mtime, size, user, group)
  400. Defaults to name.
  401. descending - Specify a descending sort order.
  402. Default to false.
  403. filter=? - Specify a substring filter to search for in
  404. the filename field.
  405. """
  406. decoded_path = urllib_unquote(path)
  407. if path != decoded_path:
  408. path = decoded_path
  409. if not request.fs.isdir(path):
  410. raise PopupException("Not a directory: %s" % (path,))
  411. pagenum = int(request.GET.get('pagenum', 1))
  412. pagesize = int(request.GET.get('pagesize', 30))
  413. do_as = None
  414. if is_admin(request.user) or request.user.has_hue_permission(action="impersonate", app="security"):
  415. do_as = request.GET.get('doas', request.user.username)
  416. if hasattr(request, 'doas'):
  417. do_as = request.doas
  418. if request.fs._get_scheme(path) == 'hdfs':
  419. home_dir_path = request.user.get_home_directory()
  420. else:
  421. home_dir_path = None
  422. breadcrumbs = parse_breadcrumbs(path)
  423. s3_listing_not_allowed = ''
  424. try:
  425. if do_as:
  426. all_stats = request.fs.do_as_user(do_as, request.fs.listdir_stats, path)
  427. else:
  428. all_stats = request.fs.listdir_stats(path)
  429. except S3ListAllBucketsException as e:
  430. s3_listing_not_allowed = e.message
  431. all_stats = []
  432. # Filter first
  433. filter_str = request.GET.get('filter', None)
  434. if filter_str:
  435. filtered_stats = [sb for sb in all_stats if filter_str in sb['name']]
  436. all_stats = filtered_stats
  437. # Sort next
  438. sortby = request.GET.get('sortby', None)
  439. descending_param = request.GET.get('descending', None)
  440. if sortby is not None:
  441. if sortby not in ('type', 'name', 'atime', 'mtime', 'user', 'group', 'size'):
  442. logger.info("Invalid sort attribute '%s' for listdir." % sortby)
  443. else:
  444. all_stats = sorted(all_stats, key=operator.attrgetter(sortby), reverse=coerce_bool(descending_param))
  445. # Do pagination
  446. try:
  447. paginator = Paginator(all_stats, pagesize, allow_empty_first_page=True)
  448. page = paginator.page(pagenum)
  449. shown_stats = page.object_list
  450. except EmptyPage:
  451. logger.warn("No results found for requested page.")
  452. paginator = None
  453. page = None
  454. shown_stats = []
  455. # Include parent dir always as second option, unless at filesystem root.
  456. if not request.fs.isroot(path):
  457. parent_path = request.fs.parent_path(path)
  458. parent_stat = request.fs.stats(parent_path)
  459. # The 'path' field would be absolute, but we want its basename to be
  460. # actually '..' for display purposes. Encode it since _massage_stats expects byte strings.
  461. parent_stat['path'] = parent_path
  462. parent_stat['name'] = ".."
  463. shown_stats.insert(0, parent_stat)
  464. # Include same dir always as first option to see stats of the current folder
  465. current_stat = request.fs.stats(path)
  466. # The 'path' field would be absolute, but we want its basename to be
  467. # actually '.' for display purposes. Encode it since _massage_stats expects byte strings.
  468. current_stat.path = path
  469. current_stat.name = "."
  470. shown_stats.insert(1, current_stat)
  471. if page:
  472. page.object_list = [_massage_stats(request, stat_absolute_path(path, s)) for s in shown_stats]
  473. is_trash_enabled = request.fs._get_scheme(path) == 'hdfs' and int(get_trash_interval()) > 0
  474. is_fs_superuser = _is_hdfs_superuser(request)
  475. data = {
  476. 'path': path,
  477. 'breadcrumbs': breadcrumbs,
  478. 'current_request_path': '/filebrowser/view=' + urllib_quote(path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS),
  479. 'is_trash_enabled': is_trash_enabled,
  480. 'files': page.object_list if page else [],
  481. 'page': _massage_page(page, paginator) if page else {},
  482. 'pagesize': pagesize,
  483. 'home_directory': home_dir_path if home_dir_path and request.fs.isdir(home_dir_path) else None,
  484. 'descending': descending_param,
  485. # The following should probably be deprecated
  486. 'cwd_set': True,
  487. 'file_filter': 'any',
  488. 'current_dir_path': urllib_quote(path.encode('utf-8'), safe=SAFE_CHARACTERS_URI),
  489. 'is_fs_superuser': is_fs_superuser,
  490. 'groups': is_fs_superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [],
  491. 'users': is_fs_superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [],
  492. 'superuser': request.fs.superuser,
  493. 'supergroup': request.fs.supergroup,
  494. 'is_sentry_managed': request.fs.is_sentry_managed(path),
  495. 'apps': list(appmanager.get_apps_dict(request.user).keys()),
  496. 'show_download_button': SHOW_DOWNLOAD_BUTTON.get(),
  497. 'show_upload_button': SHOW_UPLOAD_BUTTON.get(),
  498. 'is_embeddable': request.GET.get('is_embeddable', False),
  499. 's3_listing_not_allowed': s3_listing_not_allowed
  500. }
  501. return render('listdir.mako', request, data)
  502. def scheme_absolute_path(root, path):
  503. splitPath = lib_urlparse(path)
  504. splitRoot = lib_urlparse(root)
  505. if splitRoot.scheme and not splitPath.scheme:
  506. path = splitPath._replace(scheme=splitRoot.scheme).geturl()
  507. return path
  508. def stat_absolute_path(path, stat):
  509. stat.path = scheme_absolute_path(path, stat.path)
  510. return stat
  511. def _massage_stats(request, stats):
  512. """
  513. Massage a stats record as returned by the filesystem implementation
  514. into the format that the views would like it in.
  515. """
  516. path = stats.path
  517. normalized = request.fs.normpath(path)
  518. return {
  519. # Normally value of 'path' should be quoted, but we only use this in POST request so we're ok.
  520. # Changing this to quoted causes many issues.
  521. 'path': normalized,
  522. 'name': stats.name,
  523. 'stats': stats.to_json_dict(),
  524. 'mtime': datetime.fromtimestamp(stats.mtime).strftime('%B %d, %Y %I:%M %p') if stats.mtime else '',
  525. 'humansize': filesizeformat(stats.size),
  526. 'type': filetype(stats.mode),
  527. 'rwx': rwx(stats.mode, stats.aclBit),
  528. 'mode': stringformat(stats.mode, "o"),
  529. 'url': '/filebrowser/view=' + urllib_quote(normalized.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS),
  530. 'is_sentry_managed': request.fs.is_sentry_managed(path)
  531. }
  532. def stat(request, path):
  533. """
  534. Returns just the generic stats of a file.
  535. Intended for use via AJAX (and hence doesn't provide
  536. an HTML view).
  537. """
  538. decoded_path = urllib_unquote(path)
  539. if path != decoded_path:
  540. path = decoded_path
  541. if not request.fs.exists(path):
  542. raise Http404(_("File not found: %(path)s") % {'path': escape(path)})
  543. stats = request.fs.stats(path)
  544. return JsonResponse(_massage_stats(request, stat_absolute_path(path, stats)))
  545. def content_summary(request, path):
  546. decoded_path = urllib_unquote(path)
  547. if path != decoded_path:
  548. path = decoded_path
  549. if not request.fs.exists(path):
  550. raise Http404(_("File not found: %(path)s") % {'path': escape(path)})
  551. response = {'status': -1, 'message': '', 'summary': None}
  552. try:
  553. stats = request.fs.get_content_summary(path)
  554. replication_factor = request.fs.stats(path)['replication']
  555. stats.summary.update({'replication': replication_factor})
  556. response['status'] = 0
  557. response['summary'] = stats.summary
  558. except WebHdfsException as e:
  559. response['message'] = _("The file could not be saved") + e.message.splitlines()[0]
  560. return JsonResponse(response)
  561. def display(request, path):
  562. """
  563. Implements displaying part of a file.
  564. GET arguments are length, offset, mode, compression and encoding
  565. with reasonable defaults chosen.
  566. Note that display by length and offset are on bytes, not on characters.
  567. TODO(philip): Could easily built-in file type detection
  568. (perhaps using something similar to file(1)), as well
  569. as more advanced binary-file viewing capability (de-serialize
  570. sequence files, decompress gzipped text files, etc.).
  571. There exists a python-magic package to interface with libmagic.
  572. """
  573. decoded_path = urllib_unquote(path)
  574. if path != decoded_path:
  575. path = decoded_path
  576. if not request.fs.isfile(path):
  577. raise PopupException(_("Not a file: '%(path)s'") % {'path': path})
  578. # display inline files just if it's not an ajax request
  579. if not request.is_ajax():
  580. if _can_inline_display(path):
  581. return redirect(reverse('filebrowser:filebrowser_views_download', args=[path]) + '?disposition=inline')
  582. stats = request.fs.stats(path)
  583. encoding = request.GET.get('encoding') or i18n.get_site_encoding()
  584. # I'm mixing URL-based parameters and traditional
  585. # HTTP GET parameters, since URL-based parameters
  586. # can't naturally be optional.
  587. # Need to deal with possibility that length is not present
  588. # because the offset came in via the toolbar manual byte entry.
  589. end = request.GET.get("end")
  590. if end:
  591. end = int(end)
  592. begin = request.GET.get("begin", 1)
  593. if begin:
  594. # Subtract one to zero index for file read
  595. begin = int(begin) - 1
  596. if end:
  597. offset = begin
  598. length = end - begin
  599. if begin >= end:
  600. raise PopupException(_("First byte to display must be before last byte to display."))
  601. else:
  602. length = int(request.GET.get("length", DEFAULT_CHUNK_SIZE_BYTES))
  603. # Display first block by default.
  604. offset = int(request.GET.get("offset", 0))
  605. mode = request.GET.get("mode")
  606. compression = request.GET.get("compression")
  607. if mode and mode not in ["binary", "text"]:
  608. raise PopupException(_("Mode must be one of 'binary' or 'text'."))
  609. if offset < 0:
  610. raise PopupException(_("Offset may not be less than zero."))
  611. if length < 0:
  612. raise PopupException(_("Length may not be less than zero."))
  613. if length > MAX_CHUNK_SIZE_BYTES:
  614. raise PopupException(_("Cannot request chunks greater than %(bytes)d bytes.") % {'bytes': MAX_CHUNK_SIZE_BYTES})
  615. # Do not decompress in binary mode.
  616. if mode == 'binary':
  617. compression = 'none'
  618. # Read out based on meta.
  619. compression, offset, length, contents = read_contents(compression, path, request.fs, offset, length)
  620. # Get contents as string for text mode, or at least try
  621. uni_contents = None
  622. if not mode or mode == 'text':
  623. if sys.version_info[0] > 2:
  624. if not isinstance(contents, str):
  625. uni_contents = new_str(contents, encoding, errors='replace')
  626. is_binary = uni_contents.find(i18n.REPLACEMENT_CHAR) != -1
  627. # Auto-detect mode
  628. if not mode:
  629. mode = is_binary and 'binary' or 'text'
  630. else:
  631. # We already have a string.
  632. uni_contents = contents
  633. is_binary = False
  634. mode = 'text'
  635. else:
  636. uni_contents = new_str(contents, encoding, errors='replace')
  637. is_binary = uni_contents.find(i18n.REPLACEMENT_CHAR) != -1
  638. # Auto-detect mode
  639. if not mode:
  640. mode = is_binary and 'binary' or 'text'
  641. # Get contents as bytes
  642. if mode == "binary":
  643. xxd_out = list(xxd.xxd(offset, contents, BYTES_PER_LINE, BYTES_PER_SENTENCE))
  644. dirname = posixpath.dirname(path)
  645. # Start with index-like data:
  646. stats = request.fs.stats(path)
  647. data = _massage_stats(request, stat_absolute_path(path, stats))
  648. data["is_embeddable"] = request.GET.get('is_embeddable', False)
  649. # And add a view structure:
  650. data["success"] = True
  651. data["view"] = {
  652. 'offset': offset,
  653. 'length': length,
  654. 'end': offset + len(contents),
  655. 'dirname': dirname,
  656. 'mode': mode,
  657. 'compression': compression,
  658. 'size': stats.size,
  659. 'max_chunk_size': str(MAX_CHUNK_SIZE_BYTES)
  660. }
  661. data["filename"] = os.path.basename(path)
  662. data["editable"] = stats.size < MAX_FILEEDITOR_SIZE
  663. if mode == "binary":
  664. # This might be the wrong thing for ?format=json; doing the
  665. # xxd'ing in javascript might be more compact, or sending a less
  666. # intermediate representation...
  667. logger.debug("xxd: " + str(xxd_out))
  668. data['view']['xxd'] = xxd_out
  669. data['view']['masked_binary_data'] = False
  670. else:
  671. data['view']['contents'] = uni_contents
  672. data['view']['masked_binary_data'] = is_binary
  673. data['breadcrumbs'] = parse_breadcrumbs(path)
  674. data['show_download_button'] = SHOW_DOWNLOAD_BUTTON.get()
  675. return render("display.mako", request, data)
  676. def _can_inline_display(path):
  677. mimetype = mimetypes.guess_type(path)[0]
  678. return mimetype is not None and INLINE_DISPLAY_MIMETYPE.search(mimetype) and INLINE_DISPLAY_MIMETYPE_EXCEPTIONS.search(mimetype) is None
  679. def read_contents(codec_type, path, fs, offset, length):
  680. """
  681. Reads contents of a passed path, by appropriately decoding the data.
  682. Arguments:
  683. codec_type - The type of codec to use to decode. (Auto-detected if None).
  684. path - The path of the file to read.
  685. fs - The FileSystem instance to use to read.
  686. offset - Offset to seek to before read begins.
  687. length - Amount of bytes to read after offset.
  688. Returns: A tuple of codec_type, offset, length and contents read.
  689. """
  690. contents = ''
  691. fhandle = None
  692. decoded_path = urllib_unquote(path)
  693. if path != decoded_path:
  694. path = decoded_path
  695. try:
  696. fhandle = fs.open(path)
  697. stats = fs.stats(path)
  698. # Auto codec detection for [gzip, avro, snappy, none]
  699. if not codec_type:
  700. contents = fhandle.read(3)
  701. fhandle.seek(0)
  702. codec_type = 'none'
  703. if path.endswith('.gz') and detect_gzip(contents):
  704. codec_type = 'gzip'
  705. offset = 0
  706. elif (path.endswith('.bz2') or path.endswith('.bzip2')) and detect_bz2(contents):
  707. codec_type = 'bz2'
  708. elif path.endswith('.avro') and detect_avro(contents):
  709. codec_type = 'avro'
  710. elif detect_parquet(fhandle):
  711. codec_type = 'parquet'
  712. elif path.endswith('.snappy') and snappy_installed():
  713. codec_type = 'snappy'
  714. elif snappy_installed() and stats.size <= MAX_SNAPPY_DECOMPRESSION_SIZE.get():
  715. fhandle.seek(0)
  716. if detect_snappy(fhandle.read()):
  717. codec_type = 'snappy'
  718. fhandle.seek(0)
  719. if codec_type == 'gzip':
  720. contents = _read_gzip(fhandle, path, offset, length, stats)
  721. elif codec_type == 'bz2':
  722. contents = _read_bz2(fhandle, path, offset, length, stats)
  723. elif codec_type == 'avro':
  724. contents = _read_avro(fhandle, path, offset, length, stats)
  725. elif codec_type == 'parquet':
  726. contents = _read_parquet(fhandle, path, offset, length, stats)
  727. elif codec_type == 'snappy':
  728. contents = _read_snappy(fhandle, path, offset, length, stats)
  729. else:
  730. # for 'none' type.
  731. contents = _read_simple(fhandle, path, offset, length, stats)
  732. finally:
  733. if fhandle:
  734. fhandle.close()
  735. return (codec_type, offset, length, contents)
  736. def _decompress_snappy(compressed_content):
  737. try:
  738. import snappy
  739. return snappy.decompress(compressed_content)
  740. except Exception as e:
  741. raise PopupException(_('Failed to decompress snappy compressed file.'), detail=e)
  742. def _read_snappy(fhandle, path, offset, length, stats):
  743. if not snappy_installed():
  744. raise PopupException(_('Failed to decompress snappy compressed file. Snappy is not installed.'))
  745. if stats.size > MAX_SNAPPY_DECOMPRESSION_SIZE.get():
  746. raise PopupException(_('Failed to decompress snappy compressed file. '
  747. 'File size is greater than allowed max snappy decompression size of %d.')
  748. % MAX_SNAPPY_DECOMPRESSION_SIZE.get())
  749. return _read_simple(string_io(_decompress_snappy(fhandle.read())), path, offset, length, stats)
  750. def _read_avro(fhandle, path, offset, length, stats):
  751. contents = ''
  752. try:
  753. fhandle.seek(offset)
  754. data_file_reader = datafile.DataFileReader(fhandle, io.DatumReader())
  755. try:
  756. contents_list = []
  757. read_start = fhandle.tell()
  758. # Iterate over the entire sought file.
  759. for datum in data_file_reader:
  760. read_length = fhandle.tell() - read_start
  761. if read_length > length and len(contents_list) > 0:
  762. break
  763. else:
  764. datum_str = str(datum) + "\n"
  765. contents_list.append(datum_str)
  766. finally:
  767. data_file_reader.close()
  768. contents = "".join(contents_list)
  769. except Exception as e:
  770. logging.exception('Could not read avro file at "%s": %s' % (path, e))
  771. raise PopupException(_("Failed to read Avro file."))
  772. return contents
  773. def _read_parquet(fhandle, path, offset, length, stats):
  774. try:
  775. size = 1 * 128 * 1024 * 1024 # Buffer file stream to 128 MB chunks
  776. data = string_io(fhandle.read(size))
  777. dumped_data = string_io()
  778. parquet._dump(data, ParquetOptions(limit=1000), out=dumped_data)
  779. dumped_data.seek(offset)
  780. return dumped_data.read()
  781. except Exception as e:
  782. logging.exception('Could not read parquet file at "%s": %s' % (path, e))
  783. raise PopupException(_("Failed to read Parquet file."))
  784. def _read_gzip(fhandle, path, offset, length, stats):
  785. contents = ''
  786. if offset and offset != 0:
  787. raise PopupException(_("Offsets are not supported with Gzip compression."))
  788. try:
  789. if sys.version_info[0] > 2:
  790. contents = decompress_gzip(fhandle.read())
  791. else:
  792. contents = GzipFile('', 'r', 0, string_io(fhandle.read())).read(length)
  793. except Exception as e:
  794. logging.exception('Could not decompress file at "%s": %s' % (path, e))
  795. raise PopupException(_("Failed to decompress file."))
  796. return contents
  797. def _read_bz2(fhandle, path, offset, length, stats):
  798. contents = ''
  799. try:
  800. contents = decompress(fhandle.read(length))
  801. except Exception as e:
  802. logging.exception('Could not decompress file at "%s": %s' % (path, e))
  803. raise PopupException(_("Failed to decompress file."))
  804. return contents
  805. def _read_simple(fhandle, path, offset, length, stats):
  806. contents = ''
  807. try:
  808. fhandle.seek(offset)
  809. contents = fhandle.read(length)
  810. except Exception as e:
  811. logging.exception('Could not read file at "%s": %s' % (path, e))
  812. raise PopupException(_("Failed to read file."))
  813. return contents
  814. def detect_gzip(contents):
  815. '''This is a silly small function which checks to see if the file is Gzip'''
  816. if sys.version_info[0] > 2:
  817. return contents[:2] == b'\x1f\x8b'
  818. else:
  819. return contents[:2] == '\x1f\x8b'
  820. def detect_bz2(contents):
  821. '''This is a silly small function which checks to see if the file is Bz2'''
  822. if sys.version_info[0] > 2:
  823. return contents[:3] == b'BZh'
  824. else:
  825. return contents[:3] == 'BZh'
  826. def detect_avro(contents):
  827. '''This is a silly small function which checks to see if the file is Avro'''
  828. # Check if the first three bytes are 'O', 'b' and 'j'
  829. if sys.version_info[0] > 2:
  830. return contents[:3] == b'\x4F\x62\x6A'
  831. else:
  832. return contents[:3] == '\x4F\x62\x6A'
  833. def detect_snappy(contents):
  834. '''
  835. This is a silly small function which checks to see if the file is Snappy.
  836. It requires the entire contents of the compressed file.
  837. This will also return false if snappy decompression if we do not have the library available.
  838. '''
  839. try:
  840. import snappy
  841. return snappy.isValidCompressed(contents)
  842. except:
  843. logging.exception('failed to detect snappy')
  844. return False
  845. def detect_parquet(fhandle):
  846. """
  847. Detect parquet from magic header bytes.
  848. Python 2 only currently.
  849. """
  850. return False if sys.version_info[0] > 2 else parquet._check_header_magic_bytes(fhandle)
  851. def snappy_installed():
  852. '''Snappy is library that isn't supported by python2.4'''
  853. try:
  854. import snappy
  855. return True
  856. except ImportError:
  857. return False
  858. except:
  859. logging.exception('failed to verify if snappy is installed')
  860. return False
  861. def _calculate_navigation(offset, length, size):
  862. """
  863. List of (offset, length, string) tuples for suggested navigation through the file.
  864. If offset is -1, then this option is already "selected". (Whereas None would
  865. be the natural pythonic way, Django's template syntax doesn't let us test
  866. against None (since its truth value is the same as 0).)
  867. By all means this logic ought to be in the template, but the template
  868. language is too limiting.
  869. """
  870. if offset == 0:
  871. first, prev = (-1, None, _("First Block")), (-1, None, _("Previous Block"))
  872. else:
  873. first, prev = (0, length, _("First Block")), (max(0, offset - length), length, _("Previous Block"))
  874. if offset + length >= size:
  875. next, last = (-1, None, _("Next Block")), (-1, None, _("Last Block"))
  876. else:
  877. # 1-off Reasoning: if length is the same as size, you want to start at 0.
  878. next, last = (offset + length, length, _("Next Block")), (max(0, size - length), length, _("Last Block"))
  879. return first, prev, next, last
  880. def default_initial_value_extractor(request, parameter_names):
  881. initial_values = {}
  882. for p in parameter_names:
  883. val = request.GET.get(p)
  884. if val:
  885. initial_values[p] = val
  886. return initial_values
  887. def formset_initial_value_extractor(request, parameter_names):
  888. """
  889. Builds a list of data that formsets should use by extending some fields to every object,
  890. whilst others are assumed to be received in order.
  891. Formsets should receive data that looks like this: [{'param1': <something>,...}, ...].
  892. The formsets should then handle construction on their own.
  893. """
  894. def _intial_value_extractor(request):
  895. if not submitted:
  896. return []
  897. # Build data with list of in order parameters receive in POST data
  898. # Size can be inferred from largest list returned in POST data
  899. data = []
  900. for param in submitted:
  901. i = 0
  902. for val in request.POST.getlist(param):
  903. if len(data) == i:
  904. data.append({})
  905. data[i][param] = val
  906. i += 1
  907. # Extend every data object with recurring params
  908. for kwargs in data:
  909. for recurrent in recurring:
  910. kwargs[recurrent] = request.POST.get(recurrent)
  911. initial_data = data
  912. return {'initial': initial_data}
  913. return _intial_value_extractor
  914. def default_arg_extractor(request, form, parameter_names):
  915. return [form.cleaned_data[p] for p in parameter_names]
  916. def formset_arg_extractor(request, formset, parameter_names):
  917. data = []
  918. for form in formset.forms:
  919. data_dict = {}
  920. for p in parameter_names:
  921. data_dict[p] = form.cleaned_data[p]
  922. data.append(data_dict)
  923. return data
  924. def default_data_extractor(request):
  925. return {'data': request.POST.copy()}
  926. def formset_data_extractor(recurring=[], submitted=[]):
  927. """
  928. Builds a list of data that formsets should use by extending some fields to every object,
  929. whilst others are assumed to be received in order.
  930. Formsets should receive data that looks like this: [{'param1': <something>,...}, ...].
  931. The formsets should then handle construction on their own.
  932. """
  933. def _data_extractor(request):
  934. if not submitted:
  935. return []
  936. # Build data with list of in order parameters receive in POST data
  937. # Size can be inferred from largest list returned in POST data
  938. data = []
  939. for param in submitted:
  940. i = 0
  941. for val in request.POST.getlist(param):
  942. if len(data) == i:
  943. data.append({})
  944. data[i][param] = val
  945. i += 1
  946. # Extend every data object with recurring params
  947. for kwargs in data:
  948. for recurrent in recurring:
  949. kwargs[recurrent] = request.POST.get(recurrent)
  950. initial = list(data)
  951. return {'initial': initial, 'data': data}
  952. return _data_extractor
  953. def generic_op(form_class, request, op, parameter_names, piggyback=None, template="fileop.mako",
  954. data_extractor=default_data_extractor, arg_extractor=default_arg_extractor,
  955. initial_value_extractor=default_initial_value_extractor, extra_params=None):
  956. """
  957. Generic implementation for several operations.
  958. @param form_class form to instantiate
  959. @param request incoming request, used for parameters
  960. @param op callable with the filesystem operation
  961. @param parameter_names list of form parameters that are extracted and then passed to op
  962. @param piggyback list of form parameters whose file stats to look up after the operation
  963. @param data_extractor function that extracts POST data to be used by op
  964. @param arg_extractor function that extracts args from a given form or formset
  965. @param initial_value_extractor function that extracts the initial values of a form or formset
  966. @param extra_params dictionary of extra parameters to send to the template for rendering
  967. """
  968. # Use next for non-ajax requests, when available.
  969. next = request.GET.get("next", request.POST.get("next", None))
  970. ret = dict({
  971. 'next': next
  972. })
  973. if extra_params is not None:
  974. ret['extra_params'] = extra_params
  975. for p in parameter_names:
  976. val = request.GET.get(p)
  977. if val:
  978. ret[p] = val
  979. if request.method == 'POST':
  980. form = form_class(**data_extractor(request))
  981. ret['form'] = form
  982. if form.is_valid():
  983. args = arg_extractor(request, form, parameter_names)
  984. try:
  985. op(*args)
  986. except (IOError, WebHdfsException) as e:
  987. msg = _("Cannot perform operation.")
  988. raise PopupException(msg, detail=e)
  989. except S3FileSystemException as e:
  990. msg = _("S3 filesystem exception.")
  991. raise PopupException(msg, detail=e)
  992. except NotImplementedError as e:
  993. msg = _("Cannot perform operation.")
  994. raise PopupException(msg, detail=e)
  995. if next:
  996. logging.debug("Next: %s" % next)
  997. # Doesn't need to be quoted: quoting is done by HttpResponseRedirect.
  998. return format_preserving_redirect(request, next)
  999. ret["success"] = True
  1000. try:
  1001. if piggyback:
  1002. piggy_path = form.cleaned_data.get(piggyback)
  1003. ret["result"] = _massage_stats(request, stat_absolute_path(piggy_path, request.fs.stats(piggy_path)))
  1004. except Exception as e:
  1005. # Hard to report these more naturally here. These happen either
  1006. # because of a bug in the piggy-back code or because of a
  1007. # race condition.
  1008. logger.exception("Exception while processing piggyback data")
  1009. ret["result_error"] = True
  1010. ret['user'] = request.user
  1011. if request.is_ajax():
  1012. return HttpResponse()
  1013. else:
  1014. return render(template, request, ret)
  1015. else:
  1016. # Initial parameters may be specified with get with the default extractor
  1017. initial_values = initial_value_extractor(request, parameter_names)
  1018. formset = form_class(initial=initial_values)
  1019. ret['form'] = formset
  1020. return render(template, request, ret)
  1021. def rename(request):
  1022. def smart_rename(src_path, dest_path):
  1023. """If dest_path doesn't have a directory specified, use same dir."""
  1024. if "#" in dest_path:
  1025. raise PopupException(_("Could not rename folder \"%s\" to \"%s\": Hashes are not allowed in filenames." % (src_path, dest_path)))
  1026. if "/" not in dest_path:
  1027. src_dir = os.path.dirname(src_path)
  1028. dest_path = request.fs.join(urllib_unquote(src_dir), urllib_unquote(dest_path))
  1029. if request.fs.exists(dest_path):
  1030. raise PopupException(_('The destination path "%s" already exists.') % dest_path)
  1031. request.fs.rename(src_path, dest_path)
  1032. return generic_op(RenameForm, request, smart_rename, ["src_path", "dest_path"], None)
  1033. def set_replication(request):
  1034. def smart_set_replication(src_path, replication_factor):
  1035. result = request.fs.set_replication(urllib_unquote(src_path), replication_factor)
  1036. if not result:
  1037. raise PopupException(_("Setting of replication factor failed"))
  1038. return generic_op(SetReplicationFactorForm, request, smart_set_replication, ["src_path", "replication_factor"], None)
  1039. def mkdir(request):
  1040. def smart_mkdir(path, name):
  1041. # Make sure only one directory is specified at a time.
  1042. # No absolute directory specification allowed.
  1043. if posixpath.sep in name or "#" in name:
  1044. raise PopupException(_("Could not name folder \"%s\": Slashes or hashes are not allowed in filenames." % name))
  1045. request.fs.mkdir(request.fs.join(urllib_unquote(path), urllib_unquote(name)))
  1046. return generic_op(MkDirForm, request, smart_mkdir, ["path", "name"], "path")
  1047. def touch(request):
  1048. def smart_touch(path, name):
  1049. # Make sure only the filename is specified.
  1050. # No absolute path specification allowed.
  1051. if posixpath.sep in name:
  1052. raise PopupException(_("Could not name file \"%s\": Slashes are not allowed in filenames." % name))
  1053. request.fs.create(
  1054. request.fs.join(
  1055. urllib_unquote(path.encode('utf-8') if not isinstance(path, str) else path),
  1056. urllib_unquote(name.encode('utf-8') if not isinstance(name, str) else name)
  1057. )
  1058. )
  1059. return generic_op(TouchForm, request, smart_touch, ["path", "name"], "path")
  1060. @require_http_methods(["POST"])
  1061. def rmtree(request):
  1062. recurring = []
  1063. params = ["path"]
  1064. def bulk_rmtree(*args, **kwargs):
  1065. for arg in args:
  1066. request.fs.do_as_user(request.user, request.fs.rmtree, urllib_unquote(arg['path']), 'skip_trash' in request.GET)
  1067. return generic_op(RmTreeFormSet, request, bulk_rmtree, ["path"], None,
  1068. data_extractor=formset_data_extractor(recurring, params),
  1069. arg_extractor=formset_arg_extractor,
  1070. initial_value_extractor=formset_initial_value_extractor)
  1071. @require_http_methods(["POST"])
  1072. def move(request):
  1073. recurring = ['dest_path']
  1074. params = ['src_path']
  1075. def bulk_move(*args, **kwargs):
  1076. for arg in args:
  1077. if arg['src_path'] == arg['dest_path']:
  1078. raise PopupException(_('Source path and destination path cannot be same'))
  1079. request.fs.rename(
  1080. urllib_unquote(arg['src_path'].encode('utf-8') if not isinstance(arg['src_path'], str) else arg['src_path']),
  1081. urllib_unquote(arg['dest_path'].encode('utf-8') if not isinstance(arg['dest_path'], str) else arg['dest_path'])
  1082. )
  1083. return generic_op(RenameFormSet, request, bulk_move, ["src_path", "dest_path"], None,
  1084. data_extractor=formset_data_extractor(recurring, params),
  1085. arg_extractor=formset_arg_extractor,
  1086. initial_value_extractor=formset_initial_value_extractor)
  1087. @require_http_methods(["POST"])
  1088. def copy(request):
  1089. recurring = ['dest_path']
  1090. params = ['src_path']
  1091. def bulk_copy(*args, **kwargs):
  1092. for arg in args:
  1093. if arg['src_path'] == arg['dest_path']:
  1094. raise PopupException(_('Source path and destination path cannot be same'))
  1095. request.fs.copy(unquote_url(arg['src_path']), unquote_url(arg['dest_path']), recursive=True, owner=request.user)
  1096. return generic_op(CopyFormSet, request, bulk_copy, ["src_path", "dest_path"], None,
  1097. data_extractor=formset_data_extractor(recurring, params),
  1098. arg_extractor=formset_arg_extractor,
  1099. initial_value_extractor=formset_initial_value_extractor)
  1100. @require_http_methods(["POST"])
  1101. def chmod(request):
  1102. recurring = ["sticky", "user_read", "user_write", "user_execute",
  1103. "group_read", "group_write", "group_execute",
  1104. "other_read", "other_write", "other_execute"]
  1105. params = ["path"]
  1106. def bulk_chmod(*args, **kwargs):
  1107. op = partial(request.fs.chmod, recursive=request.POST.get('recursive', False))
  1108. for arg in args:
  1109. op(urllib_unquote(arg['path']), arg['mode'])
  1110. # mode here is abused: on input, it's a string, but when retrieved,
  1111. # it's an int.
  1112. return generic_op(ChmodFormSet, request, bulk_chmod, ['path', 'mode'], "path",
  1113. data_extractor=formset_data_extractor(recurring, params),
  1114. arg_extractor=formset_arg_extractor,
  1115. initial_value_extractor=formset_initial_value_extractor)
  1116. @require_http_methods(["POST"])
  1117. def chown(request):
  1118. # This is a bit clever: generic_op takes an argument (here, args), indicating
  1119. # which POST parameters to pick out and pass to the given function.
  1120. # We update that mapping based on whether or not the user selected "other".
  1121. param_names = ["path", "user", "group"]
  1122. if request.POST.get("user") == "__other__":
  1123. param_names[1] = "user_other"
  1124. if request.POST.get("group") == "__other__":
  1125. param_names[2] = "group_other"
  1126. recurring = ["user", "group", "user_other", "group_other"]
  1127. params = ["path"]
  1128. def bulk_chown(*args, **kwargs):
  1129. op = partial(request.fs.chown, recursive=request.POST.get('recursive', False))
  1130. for arg in args:
  1131. varg = [urllib_unquote(arg[param]) for param in param_names]
  1132. op(*varg)
  1133. return generic_op(ChownFormSet, request, bulk_chown, param_names, "path",
  1134. data_extractor=formset_data_extractor(recurring, params),
  1135. arg_extractor=formset_arg_extractor,
  1136. initial_value_extractor=formset_initial_value_extractor)
  1137. @require_http_methods(["POST"])
  1138. def trash_restore(request):
  1139. recurring = []
  1140. params = ["path"]
  1141. def bulk_restore(*args, **kwargs):
  1142. for arg in args:
  1143. request.fs.do_as_user(request.user, request.fs.restore, urllib_unquote(arg['path']))
  1144. return generic_op(RestoreFormSet, request, bulk_restore, ["path"], None,
  1145. data_extractor=formset_data_extractor(recurring, params),
  1146. arg_extractor=formset_arg_extractor,
  1147. initial_value_extractor=formset_initial_value_extractor)
  1148. @require_http_methods(["POST"])
  1149. def trash_purge(request):
  1150. return generic_op(TrashPurgeForm, request, request.fs.purge_trash, [], None)
  1151. @require_http_methods(["POST"])
  1152. def upload_file(request):
  1153. """
  1154. A wrapper around the actual upload view function to clean up the temporary file afterwards if it fails.
  1155. Returns JSON.
  1156. e.g. {'status' 0/1, data:'message'...}
  1157. """
  1158. response = {'status': -1, 'data': ''}
  1159. try:
  1160. resp = _upload_file(request)
  1161. response.update(resp)
  1162. except Exception as ex:
  1163. logger.exception('Upload failure')
  1164. response['data'] = smart_str(ex).split('\n', 1)[0]
  1165. hdfs_file = request.FILES.get('hdfs_file')
  1166. if hdfs_file and hasattr(hdfs_file, 'remove'): # TODO: Call from proxyFS
  1167. hdfs_file.remove()
  1168. return JsonResponse(response)
  1169. def _upload_file(request):
  1170. """
  1171. Handles file uploaded by HDFSfileUploadHandler.
  1172. The uploaded file is stored in HDFS at its destination with a .tmp suffix.
  1173. We just need to rename it to the destination path.
  1174. """
  1175. form = UploadFileForm(request.POST, request.FILES)
  1176. response = {'status': -1, 'data': ''}
  1177. if request.META.get('upload_failed'):
  1178. raise PopupException(request.META.get('upload_failed'))
  1179. if form.is_valid():
  1180. uploaded_file = request.FILES['hdfs_file']
  1181. dest = scheme_absolute_path(unquote_url(request.GET['dest']), unquote_url(request.GET['dest']))
  1182. filepath = request.fs.join(dest, unquote_url(uploaded_file.name))
  1183. if request.fs.isdir(dest) and posixpath.sep in uploaded_file.name:
  1184. raise PopupException(_('Sorry, no "%(sep)s" in the filename %(name)s.' % {'sep': posixpath.sep, 'name': uploaded_file.name}))
  1185. try:
  1186. request.fs.upload(file=uploaded_file, path=dest, username=request.user.username)
  1187. response['status'] = 0
  1188. except IOError as ex:
  1189. already_exists = False
  1190. try:
  1191. already_exists = request.fs.exists(dest)
  1192. except Exception:
  1193. pass
  1194. if already_exists:
  1195. msg = _('Destination %(name)s already exists.') % {'name': filepath}
  1196. else:
  1197. msg = _('Copy to %(name)s failed: %(error)s') % {'name': filepath, 'error': ex}
  1198. raise PopupException(msg)
  1199. response.update({
  1200. 'path': filepath,
  1201. 'result': _massage_stats(request, stat_absolute_path(filepath, request.fs.stats(filepath))),
  1202. 'next': request.GET.get("next")
  1203. })
  1204. return response
  1205. else:
  1206. raise PopupException(_("Error in upload form: %s") % (form.errors,))
  1207. @require_http_methods(["POST"])
  1208. def extract_archive_using_batch_job(request):
  1209. response = {'status': -1, 'data': ''}
  1210. if ENABLE_EXTRACT_UPLOADED_ARCHIVE.get():
  1211. upload_path = request.fs.netnormpath(request.POST.get('upload_path', None))
  1212. archive_name = request.POST.get('archive_name', None)
  1213. if upload_path and archive_name:
  1214. try:
  1215. upload_path = urllib_unquote(upload_path)
  1216. archive_name = urllib_unquote(archive_name)
  1217. response = extract_archive_in_hdfs(request, upload_path, archive_name)
  1218. except Exception as e:
  1219. response['message'] = _('Exception occurred while extracting archive: %s' % e)
  1220. else:
  1221. response['message'] = _('ERROR: Configuration parameter enable_extract_uploaded_archive ' +
  1222. 'has to be enabled before calling this method.')
  1223. return JsonResponse(response)
  1224. @require_http_methods(["POST"])
  1225. def compress_files_using_batch_job(request):
  1226. response = {'status': -1, 'data': ''}
  1227. if ENABLE_EXTRACT_UPLOADED_ARCHIVE.get():
  1228. upload_path = request.fs.netnormpath(request.POST.get('upload_path', None))
  1229. archive_name = request.POST.get('archive_name', None)
  1230. file_names = request.POST.getlist('files[]')
  1231. if upload_path and file_names and archive_name:
  1232. try:
  1233. upload_path = urllib_unquote(upload_path)
  1234. archive_name = urllib_unquote(archive_name)
  1235. file_names = [urllib_unquote(name) for name in file_names]
  1236. response = compress_files_in_hdfs(request, file_names, upload_path, archive_name)
  1237. except Exception as e:
  1238. response['message'] = _('Exception occurred while compressing files: %s' % e)
  1239. else:
  1240. response['message'] = _('Error: Output directory is not set.');
  1241. else:
  1242. response['message'] = _('ERROR: Configuration parameter enable_extract_uploaded_archive ' +
  1243. 'has to be enabled before calling this method.')
  1244. return JsonResponse(response)
  1245. def status(request):
  1246. status = request.fs.status()
  1247. data = {
  1248. # Beware: "messages" is special in the context browser.
  1249. 'msgs': status.get_messages(),
  1250. 'health': status.get_health(),
  1251. 'datanode_report': status.get_datanode_report(),
  1252. 'name': request.fs.name
  1253. }
  1254. return render("status.mako", request, data)
  1255. def truncate(toTruncate, charsToKeep=50):
  1256. """
  1257. Returns a string truncated to 'charsToKeep' length plus ellipses.
  1258. """
  1259. if len(toTruncate) > charsToKeep:
  1260. truncated = toTruncate[:charsToKeep] + "..."
  1261. return truncated
  1262. else:
  1263. return toTruncate
  1264. def unquote_url(url):
  1265. url = urllib_unquote(url.encode('utf-8') if not isinstance(url, str) else url)
  1266. return url.decode('utf-8') if isinstance(url, bytes) else url
  1267. def _is_hdfs_superuser(request):
  1268. return request.user.username == request.fs.superuser or request.user.groups.filter(name__exact=request.fs.supergroup).exists()