Explorar el Código

HUE-310. Handle large file upload.

* Use a custom uploader to detect any fields starting with
  an `hdfs' prefix. It'll stream directly to a temp location
  in HDFS.
* Assume there is only one HDFS cluster.
* It will write data to HDFS before authenticating the user.
  (Just as the previous implementation writes data to the
  local disk always.)
bc Wong hace 15 años
padre
commit
26713916df

+ 2 - 1
apps/filebrowser/src/filebrowser/forms.py

@@ -53,7 +53,8 @@ class RenameForm(forms.Form):
 
 class UploadForm(forms.Form):
   op = "upload"
-  file = FileField(forms.Form, label="File to Upload")
+  # The "hdfs" prefix in "hdfs_file" triggers the HDFSfileUploadHandler
+  hdfs_file = FileField(forms.Form, label="File to Upload")
   dest = PathField(label="Destination Path", help_text="Filename or directory to upload to.")
 
 class RemoveForm(forms.Form):

+ 1 - 1
apps/filebrowser/src/filebrowser/static/js/Source/FileBrowser/Hue.FileBrowser.js

@@ -585,7 +585,7 @@ Hue.FileBrowser.Uploader = new Class({
 			//container: null
 			path: '/static/js/ThirdParty/digitarald-fancyupload/source/Swiff.Uploader.swf',
 			//this is the key for the file in the POST params
-			fieldName: 'file',
+			fieldName: 'hdfs_file',
 			method: 'post',
 			appendCookieData: true
 		}

+ 1 - 1
apps/filebrowser/src/filebrowser/templates/upload.mako

@@ -22,7 +22,7 @@ ${comps.header('Upload Files')}
   <h4 class="jframe-hidden">Upload Files</h4>
   <dl>
     ${edit.render_field(form["dest"], hidden=True)}
-    ${edit.render_field(form["file"], render_default=True, notitle=True)}
+    ${edit.render_field(form["hdfs_file"], render_default=True, notitle=True)}
   </dl>
   <input class="jframe-hidden" type="submit" value="Submit" />
 </form>

+ 44 - 17
apps/filebrowser/src/filebrowser/views.py

@@ -627,31 +627,58 @@ def upload_flash(request):
 
 def upload(request):
   """
-  Handles file uploads.
-
-  Django has a hook for "upload handlers" that could make this more
-  efficient.  Currently Django will store files greater than 2.5MB in
-  a temp directory.  We could implement an upload handler to
-  send data right through to the destination.
+  A wrapper around the actual upload view function to clean up the
+  temporary file afterwards.
+  """
+  try:
+    return _upload(request)
+  finally:
+    if request.method == 'POST':
+      try:
+        upload_file = request.FILES['hdfs_file']
+        upload_file.remove()
+      except KeyError:
+        pass
 
-  http://docs.djangoproject.com/en/1.0/topics/http/file-uploads/#upload-handlers
+def _upload(request):
+  """
+  Handles file uploads. The uploaded file is stored in HDFS. We just
+  need to rename it to the right path.
   """
   if request.method == 'POST':
     form = UploadForm(request.POST, request.FILES)
-    if form.is_valid():
-      # Bit of a wart that form.file doesn't give you the file,
-      # and you have to do form.files.get("file").
-      file = form.files.get("file")
+    if not form.is_valid():
+      logger.error("Error in upload form: %s" % (form.errors,))
+    else:
+      uploaded_file = request.FILES['hdfs_file']
       dest = form.cleaned_data["dest"]
       if request.fs.isdir(dest):
-        assert posixpath.sep not in file.name
-        dest = posixpath.join(dest, file.name)
-      output = request.fs.open(dest, "w")
+        assert posixpath.sep not in uploaded_file.name
+        dest = request.fs.join(dest, uploaded_file.name)
+
+      # Temp file is created by superuser. Chown the file.
+      tmp_file = uploaded_file.get_temp_path()
+      username = request.user.username
       try:
-        for chunk in file.chunks():
-          output.write(chunk)
+        try:
+          request.fs.setuser(request.fs.superuser)
+          request.fs.chmod(tmp_file, 0644)
+          request.fs.chown(tmp_file, username, username)
+        except IOError, ex:
+          msg = 'Failed to chown uploaded file ("%s") as superuser %s' % \
+                (tmp_file, request.fs.superuser)
+          logger.exception(msg)
+          raise PopupException(msg, detail=str(ex))
       finally:
-        output.close()
+        request.fs.setuser(username)
+
+      # Move the file to where it belongs
+      try:
+        request.fs.rename(uploaded_file.get_temp_path(), dest)
+      except IOError, ex:
+        raise PopupException(
+            'Failed to rename uploaded temporary file ("%s") to "%s": %s' %
+            (tmp_file, dest, ex))
 
       dest_stats = request.fs.stats(dest)
       return render_with_toolbars('upload_done.mako', request, {

+ 31 - 0
apps/filebrowser/src/filebrowser/views_test.py

@@ -305,3 +305,34 @@ def edit_helper(cluster, encoding, contents_pass_1, contents_pass_2):
       cluster.fs.remove(filename)
     except Exception, ex:
       LOG.error('Failed to remove %s: %s' % (filename, ex))
+
+
+@attr('requires_hadoop')
+def test_upload():
+  """Test file upload"""
+  cluster = mini_cluster.shared_cluster(conf=True)
+  try:
+    USER_NAME = cluster.fs.superuser
+    cluster.fs.setuser(USER_NAME)
+    DEST = "/tmp/fb-upload-test"
+    client = make_logged_in_client(USER_NAME)
+
+    # Just upload the current python file
+    resp = client.post('/filebrowser/upload',
+                       dict(dest=DEST, hdfs_file=file(__file__)))
+
+    assert_true("Upload Complete" in resp.content)
+    stats = cluster.fs.stats(DEST)
+    assert_equal(stats['user'], USER_NAME)
+    assert_equal(stats['group'], USER_NAME)
+
+    f = cluster.fs.open(DEST)
+    actual = f.read()
+    expected = file(__file__).read()
+    assert_equal(actual, expected)
+  finally:
+    try:
+      cluster.fs.remove(DEST)
+    except Exception, ex:
+      pass
+    cluster.shutdown()

+ 7 - 6
desktop/core/src/desktop/lib/django_test_util.py

@@ -18,13 +18,14 @@
 Common utilities for testing Desktop django apps.
 """
 
-import django.test.client
+import logging
+import re
 import simplejson
-from django.contrib.auth.models import User, Group
 
-import re
+import django.test.client
+from django.contrib.auth.models import User
+
 import nose.tools
-import logging
 
 class Client(django.test.client.Client):
   """
@@ -40,8 +41,8 @@ def assert_ok_response(response):
 
   Returns the response.
   """
-  assert_true(200, response.status_code)
-  return reponse
+  nose.tools.assert_true(200, response.status_code)
+  return response
 
 def make_logged_in_client(username="test", password="test", is_superuser=True, recreate=False):
   """

+ 6 - 0
desktop/core/src/desktop/settings.py

@@ -146,6 +146,12 @@ LOGIN_REDIRECT_URL = "/"
 
 PYLINTRC = get_desktop_root('.pylintrc')
 
+# Insert our HDFS upload handler
+FILE_UPLOAD_HANDLERS = (
+  'hadoop.fs.upload.HDFSfileUploadHandler',
+  'django.core.files.uploadhandler.MemoryFileUploadHandler',
+  'django.core.files.uploadhandler.TemporaryFileUploadHandler',
+)
 
 ############################################################
 # Part 4: Installation of apps

+ 2 - 0
desktop/libs/hadoop/src/hadoop/conf.py

@@ -130,6 +130,8 @@ HDFS_CLUSTERS = UnspecifiedConfigSection(
                                    default="hdfs", type=str),
       SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
                               default=False, type=coerce_bool),
+      TEMP_DIR=Config("temp_dir", help="HDFS directory for temporary files",
+                      default='/tmp', type=str),
     )
   )
 )

+ 23 - 1
desktop/libs/hadoop/src/hadoop/fs/hadoopfs.py

@@ -181,7 +181,8 @@ class HadoopFileSystem(object):
                nn_kerberos_principal="hdfs",
                dn_kerberos_principal="hdfs",
                security_enabled=False,
-               hadoop_bin_path="hadoop"):
+               hadoop_bin_path="hadoop",
+               temp_dir='/tmp'):
     """
     @param host hostname or IP of the namenode
     @param thrift_port port on which the Thrift plugin is listening
@@ -189,6 +190,7 @@ class HadoopFileSystem(object):
     @param hadoop_bin_path path to find the hadoop wrapper script on the
                            installed system - default is fine if it is in
                            the user's PATH env
+    @param temp_dir Temporary directory, for mktemp()
     """
     self.host = host
     self.thrift_port = thrift_port
@@ -199,6 +201,7 @@ class HadoopFileSystem(object):
     self.hadoop_bin_path = hadoop_bin_path
     self._resolve_hadoop_path()
     self.security_enabled = security_enabled
+    self._temp_dir = temp_dir
 
     self.nn_client = thrift_util.get_client(
       Namenode.Client, host, thrift_port,
@@ -408,6 +411,24 @@ class HadoopFileSystem(object):
     path = encode_fs_path(path)
     self.nn_client.chown(self.request_context, normpath(path), user, group)
 
+  @_coerce_exceptions
+  def mktemp(self, subdir='', prefix='tmp'):
+    """
+    mktemp(prefix) ->  <temp_dir>/subdir/prefix.<rand>
+    Return a unique temporary filename with prefix in the cluster's temp dir.
+    """
+    RANDOM_BITS = 64
+
+    base = self.join(self._temp_dir, subdir)
+    if not self.isdir(base):
+      self.mkdir(base)
+
+    while True:
+      name = prefix + '.' + str(random.getrandbits(RANDOM_BITS))
+      candidate = self.join(base, name)
+      if not self.exists(candidate):
+        return candidate
+
   @_coerce_exceptions
   def get_namenode_info(self):
     (capacity, used, available) = self.nn_client.df(self.request_context)
@@ -817,6 +838,7 @@ class FileUpload(object):
                                    bufsize=WRITE_BUFFER_SIZE)
   @require_open
   def write(self, data):
+    """May raise IOError, particularly EPIPE"""
     self.putter.stdin.write(data)
 
   @require_open

+ 160 - 0
desktop/libs/hadoop/src/hadoop/fs/upload.py

@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Classes for a custom upload handler to stream into HDFS.
+
+Note that since our middlewares inspect request.POST, we cannot inject a custom
+handler into a specific view. Therefore we always use the HDFSfileUploadHandler,
+which is triggered by a magic prefix ("HDFS") in the field name.
+
+See http://docs.djangoproject.com/en/1.2/topics/http/file-uploads/
+"""
+
+import errno
+import logging
+import time
+
+from django.core.files.uploadhandler import \
+    FileUploadHandler, StopFutureHandlers, StopUpload
+from desktop.lib import fsmanager
+import hadoop.fs.hadoopfs
+
+UPLOAD_SUBDIR = 'hue-uploads'
+LOG = logging.getLogger(__name__)
+
+
+class HDFSerror(Exception):
+  pass
+
+class HDFStemporaryUploadedFile(hadoop.fs.hadoopfs.FileUpload):
+  """
+  A temporary HDFS file to store upload data.
+  This class does not have any file read methods.
+  """
+  def __init__(self, request, name):
+    self.name = name
+    self.size = None
+    self._do_cleanup = False
+    try:
+      self._fs = request.fs
+    except AttributeError:
+      _, self._fs = fsmanager.get_default_hdfs()
+
+    # Don't want to handle this upload if we don't have an HDFS
+    if not self._fs:
+      raise HDFSerror("No HDFS found")
+
+    # We want to set the user to be the superuser. But any operation
+    # in the fs needs a username, including the retrieval of the superuser.
+    # So we first set it to the DEFAULT_USER to break this chicken-&-egg.
+    self._fs.setuser(hadoop.fs.hadoopfs.DEFAULT_USER)
+    self._fs.setuser(self._fs.superuser)
+
+    self._path = self._fs.mktemp(
+        subdir='hue-uploads',
+        prefix='tmp.%s' % (request.environ['REMOTE_ADDR'],))
+
+    # Make the tmp dir 0777
+    self._fs.chmod(self._fs.dirname(self._path), 0777)
+    hadoop.fs.hadoopfs.FileUpload.__init__(self, self._fs, self._path)
+    self._do_cleanup = True
+
+  def __del__(self):
+    if self._do_cleanup:
+      # Do not do cleanup here. It's hopeless. The self._fs threadlocal states
+      # are going to be all wrong.
+      LOG.error("Left-over upload file is not cleaned up: %s" % (self._path,))
+
+  def get_temp_path(self):
+    return self._path
+
+  def finish_upload(self, size):
+    try:
+      self.size = size
+      self.close()
+    except Exception, ex:
+      LOG.exception('Error uploading file to %s' % (self.path,))
+      raise
+
+  def remove(self):
+    try:
+      self._fs.remove(self._path)
+      self._do_cleanup = False
+    except IOError, ex:
+      if ex.errno != errno.ENOENT:
+        LOG.exception('Failed to remove temporary upload file "%s". '
+                      'Please cleanup manually: %s' % (self._path, ex))
+
+
+class HDFSfileUploadHandler(FileUploadHandler):
+  """
+  Handle file upload by storing data in a temp HDFS file.
+
+  This handler is triggered by any upload field whose name starts with
+  "HDFS" (case insensitive).
+  """
+  def __init__(self, request):
+    FileUploadHandler.__init__(self, request)
+    self._file = None
+    self._starttime = 0
+    self._activated = False
+
+  def new_file(self, field_name, file_name, *args, **kwargs):
+    # Detect "HDFS" in the field name.
+    # NOTE: The user is not authenticated at this point, and it's
+    #       very difficult to do so because we handle upload before
+    #       running the auth middleware.
+    if field_name.upper().startswith('HDFS'):
+      try:
+        self._file = HDFStemporaryUploadedFile(self.request, file_name)
+      except (HDFSerror, IOError), ex:
+        LOG.error("Not using HDFS upload handler: %s" % (ex,))
+        return
+
+      LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(),))
+      self._activated = True
+      self._starttime = time.time()
+      raise StopFutureHandlers()
+
+  def receive_data_chunk(self, raw_data, start):
+    if not self._activated:
+      return raw_data
+
+    try:
+      self._file.write(raw_data)
+      self._file.flush()
+      return None
+    except IOError:
+      LOG.exception('Error storing upload data in temporary file "%s"' %
+                    (self._file.get_temp_path(),))
+      raise StopUpload()
+
+  def file_complete(self, file_size):
+    if not self._activated:
+      return None
+
+    try:
+      self._file.finish_upload(file_size)
+    except IOError:
+      LOG.exception('Error closing uploaded temporary file "%s"' %
+                    (self._file.get_temp_path(),))
+      raise
+
+    elapsed = time.time() - self._starttime
+    LOG.debug('Uploaded %s bytes to HDFS in %s seconds' % (file_size, elapsed))
+    return self._file