浏览代码

HUE-2975 [aws] Add S3 UploadFileHandler and implement upload to S3

Also includes:
HUE-2975 [aws] Implement direct streaming to S3
HUE-2975 [aws] Add live-cluster S3 upload test
Jenny Kim 9 年之前
父节点
当前提交
88d01c5

+ 23 - 32
apps/filebrowser/src/filebrowser/views.py

@@ -16,7 +16,6 @@
 # limitations under the License.
 # limitations under the License.
 
 
 import errno
 import errno
-import json
 import logging
 import logging
 import mimetypes
 import mimetypes
 import operator
 import operator
@@ -1142,6 +1141,7 @@ def trash_purge(request):
     return generic_op(TrashPurgeForm, request, request.fs.purge_trash, [], None)
     return generic_op(TrashPurgeForm, request, request.fs.purge_trash, [], None)
 
 
 
 
+@require_http_methods(["POST"])
 def upload_file(request):
 def upload_file(request):
     """
     """
     A wrapper around the actual upload view function to clean up the temporary file afterwards if it fails.
     A wrapper around the actual upload view function to clean up the temporary file afterwards if it fails.
@@ -1151,19 +1151,17 @@ def upload_file(request):
     """
     """
     response = {'status': -1, 'data': ''}
     response = {'status': -1, 'data': ''}
 
 
-    if request.method == 'POST':
-        try:
-            resp = _upload_file(request)
-            response.update(resp)
-        except Exception, ex:
-            response['data'] = str(ex).split('\n', 1)[0]
-            hdfs_file = request.FILES.get('hdfs_file')
-            if hdfs_file:
-                hdfs_file.remove()
-    else:
-        response['data'] = _('A POST request is required.')
+    try:
+        resp = _upload_file(request)
+        response.update(resp)
+    except Exception, ex:
+        response['data'] = str(ex).split('\n', 1)[0]
+        hdfs_file = request.FILES.get('hdfs_file')
+        if hdfs_file and hasattr(hdfs_file, 'remove'):  # TODO: Call from proxyFS
+            hdfs_file.remove()
+
+    return JsonResponse(response)
 
 
-    return HttpResponse(json.dumps(response), content_type="text/plain")
 
 
 def _upload_file(request):
 def _upload_file(request):
     """
     """
@@ -1185,13 +1183,8 @@ def _upload_file(request):
         if request.fs.isdir(dest) and posixpath.sep in uploaded_file.name:
         if request.fs.isdir(dest) and posixpath.sep in uploaded_file.name:
             raise PopupException(_('Sorry, no "%(sep)s" in the filename %(name)s.' % {'sep': posixpath.sep, 'name': uploaded_file.name}))
             raise PopupException(_('Sorry, no "%(sep)s" in the filename %(name)s.' % {'sep': posixpath.sep, 'name': uploaded_file.name}))
 
 
-        dest = request.fs.join(dest, uploaded_file.name)
-        tmp_file = uploaded_file.get_temp_path()
-        username = request.user.username
-
         try:
         try:
-            # Remove tmp suffix of the file
-            request.fs.do_as_user(username, request.fs.rename, tmp_file, dest)
+            request.fs.upload(file=uploaded_file, path=dest, username=request.user.username)
             response['status'] = 0
             response['status'] = 0
         except IOError, ex:
         except IOError, ex:
             already_exists = False
             already_exists = False
@@ -1216,6 +1209,7 @@ def _upload_file(request):
         raise PopupException(_("Error in upload form: %s") % (form.errors,))
         raise PopupException(_("Error in upload form: %s") % (form.errors,))
 
 
 
 
+@require_http_methods(["POST"])
 def upload_archive(request):
 def upload_archive(request):
     """
     """
     A wrapper around the actual upload view function to clean up the temporary file afterwards.
     A wrapper around the actual upload view function to clean up the temporary file afterwards.
@@ -1225,21 +1219,18 @@ def upload_archive(request):
     """
     """
     response = {'status': -1, 'data': ''}
     response = {'status': -1, 'data': ''}
 
 
-    if request.method == 'POST':
+    try:
         try:
         try:
-            try:
-                resp = _upload_archive(request)
-                response.update(resp)
-            except Exception, ex:
-                response['data'] = str(ex)
-        finally:
-            hdfs_file = request.FILES.get('hdfs_file')
-            if hdfs_file:
-                hdfs_file.remove()
-    else:
-        response['data'] = _('A POST request is required.')
+            resp = _upload_archive(request)
+            response.update(resp)
+        except Exception, ex:
+            response['data'] = str(ex)
+    finally:
+        hdfs_file = request.FILES.get('hdfs_file')
+        if hdfs_file:
+            hdfs_file.remove()
 
 
-    return HttpResponse(json.dumps(response), content_type="text/plain")
+    return JsonResponse(response)
 
 
 
 
 def _upload_archive(request):
 def _upload_archive(request):

+ 3 - 0
desktop/core/src/desktop/lib/fs/proxyfs.py

@@ -210,3 +210,6 @@ class ProxyFS(object):
 
 
   def _rename_star_between_filesystems(self, old, new):
   def _rename_star_between_filesystems(self, old, new):
     raise NotImplementedError("Will be addressed in HUE-2934")
     raise NotImplementedError("Will be addressed in HUE-2934")
+
+  def upload(self, file, path, *args, **kwargs):
+    self._get_fs(path).upload(file, path, *args, **kwargs)

+ 2 - 1
desktop/core/src/desktop/settings.py

@@ -228,8 +228,9 @@ LOGOUT_REDIRECT_URL = "/" # For djangosaml2 bug.
 
 
 PYLINTRC = get_desktop_root('.pylintrc')
 PYLINTRC = get_desktop_root('.pylintrc')
 
 
-# Insert our HDFS upload handler
+# Insert our custom upload handlers
 FILE_UPLOAD_HANDLERS = (
 FILE_UPLOAD_HANDLERS = (
+  'aws.s3.upload.S3FileUploadHandler',
   'hadoop.fs.upload.HDFSfileUploadHandler',
   'hadoop.fs.upload.HDFSfileUploadHandler',
   'django.core.files.uploadhandler.MemoryFileUploadHandler',
   'django.core.files.uploadhandler.MemoryFileUploadHandler',
   'django.core.files.uploadhandler.TemporaryFileUploadHandler',
   'django.core.files.uploadhandler.TemporaryFileUploadHandler',

+ 6 - 0
desktop/libs/aws/src/aws/s3/s3fs.py

@@ -36,6 +36,7 @@ from aws.s3.s3stat import S3Stat
 
 
 
 
 DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
 DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
+
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
 
 
@@ -356,5 +357,10 @@ class S3FileSystem(object):
         remote_file = remote_dst
         remote_file = remote_dst
       _copy_file(local_src, remote_file)
       _copy_file(local_src, remote_file)
 
 
+  @translate_s3_error
+  def upload(self, file, path, *args, **kwargs):
+    pass  # upload is handled by S3FileUploadHandler
+
+
   def setuser(self, user):
   def setuser(self, user):
     pass  # user-concept doesn't have sense for this implementation
     pass  # user-concept doesn't have sense for this implementation

+ 50 - 2
desktop/libs/aws/src/aws/s3/s3fs_test.py

@@ -15,24 +15,35 @@
 # limitations under the License.
 # limitations under the License.
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
+import json
 import os
 import os
 import tempfile
 import tempfile
 import string
 import string
 
 
-from nose.tools import assert_true, assert_false, assert_raises, eq_
+from nose.tools import assert_equal, assert_false, assert_true, assert_raises, eq_
+
+from desktop.lib.django_test_util import make_logged_in_client
+from desktop.lib.test_utils import grant_access, add_to_group
 
 
 from aws.s3 import join, parse_uri
 from aws.s3 import join, parse_uri
 from aws.s3.s3fs import S3FileSystem
 from aws.s3.s3fs import S3FileSystem
 from aws.s3.s3test_utils import S3TestBase, generate_id
 from aws.s3.s3test_utils import S3TestBase, generate_id
+from aws.s3.upload import DEFAULT_WRITE_SIZE
 
 
 
 
 class S3FSTest(S3TestBase):
 class S3FSTest(S3TestBase):
+
   @classmethod
   @classmethod
   def setUpClass(cls):
   def setUpClass(cls):
     S3TestBase.setUpClass()
     S3TestBase.setUpClass()
     if not cls.shouldSkip():
     if not cls.shouldSkip():
       cls.fs = S3FileSystem(cls.s3_connection)
       cls.fs = S3FileSystem(cls.s3_connection)
 
 
+      cls.c = make_logged_in_client(username='test', is_superuser=False)
+      grant_access('test', 'test', 'filebrowser')
+      add_to_group('test')
+
+
   def test_open(self):
   def test_open(self):
     path = self.get_test_path('test_open.txt')
     path = self.get_test_path('test_open.txt')
 
 
@@ -53,6 +64,7 @@ class S3FSTest(S3TestBase):
       assert_raises(Exception, self.fs.open, path, mode='w')
       assert_raises(Exception, self.fs.open, path, mode='w')
       assert_raises(Exception, self.fs.open, path, mode='?r')
       assert_raises(Exception, self.fs.open, path, mode='?r')
 
 
+
   def test_read(self):
   def test_read(self):
     path = self.get_test_path('test_read.txt')
     path = self.get_test_path('test_read.txt')
     with self.cleaning(path):
     with self.cleaning(path):
@@ -62,12 +74,15 @@ class S3FSTest(S3TestBase):
       eq_('Hel', self.fs.read(path, 0, 3))
       eq_('Hel', self.fs.read(path, 0, 3))
       eq_('ell', self.fs.read(path, 1, 3))
       eq_('ell', self.fs.read(path, 1, 3))
 
 
+
   def test_isfile(self):
   def test_isfile(self):
     pass
     pass
 
 
+
   def test_isdir(self):
   def test_isdir(self):
     pass
     pass
 
 
+
   def test_exists(self):
   def test_exists(self):
     dir_path = self.get_test_path('test_exists')
     dir_path = self.get_test_path('test_exists')
     file_path = join(dir_path, 'file')
     file_path = join(dir_path, 'file')
@@ -85,6 +100,7 @@ class S3FSTest(S3TestBase):
     fake_bucket = 'fake%s' % generate_id(8, string.ascii_lowercase + string.digits)
     fake_bucket = 'fake%s' % generate_id(8, string.ascii_lowercase + string.digits)
     assert_false(self.fs.exists('s3://%s' % fake_bucket))
     assert_false(self.fs.exists('s3://%s' % fake_bucket))
 
 
+
   def test_stats(self):
   def test_stats(self):
     assert_raises(ValueError, self.fs.stats, 'ftp://archive')
     assert_raises(ValueError, self.fs.stats, 'ftp://archive')
     not_exists = self.get_test_path('does_not_exist')
     not_exists = self.get_test_path('does_not_exist')
@@ -97,7 +113,8 @@ class S3FSTest(S3TestBase):
     bucket_stat = self.fs.stats('s3://%s' % self.bucket_name)
     bucket_stat = self.fs.stats('s3://%s' % self.bucket_name)
     eq_(True, bucket_stat.isDir)
     eq_(True, bucket_stat.isDir)
     eq_('s3://%s' % self.bucket_name, bucket_stat.path)
     eq_('s3://%s' % self.bucket_name, bucket_stat.path)
-    
+
+
   def test_copyfile(self):
   def test_copyfile(self):
     src_path = self.get_test_path('test_copy_file_src')
     src_path = self.get_test_path('test_copy_file_src')
     dst_path = self.get_test_path('test_copy_file_dst')
     dst_path = self.get_test_path('test_copy_file_dst')
@@ -110,6 +127,7 @@ class S3FSTest(S3TestBase):
       actual = self.fs.read(dst_path, 0, len(data) + 100)
       actual = self.fs.read(dst_path, 0, len(data) + 100)
       eq_(data, actual)
       eq_(data, actual)
 
 
+
   def test_full_copy(self):
   def test_full_copy(self):
     src_path = self.get_test_path('test_full_copy_src')
     src_path = self.get_test_path('test_full_copy_src')
     dst_path = self.get_test_path('test_full_copy_dst')
     dst_path = self.get_test_path('test_full_copy_dst')
@@ -138,6 +156,7 @@ class S3FSTest(S3TestBase):
       # Copy directory to file should fail.
       # Copy directory to file should fail.
       assert_raises(IOError, self.fs.copy, src_path, dst_file_path, True)
       assert_raises(IOError, self.fs.copy, src_path, dst_file_path, True)
 
 
+
   def test_copy_remote_dir(self):
   def test_copy_remote_dir(self):
     src_dir = self.get_test_path('test_copy_remote_dir_src')
     src_dir = self.get_test_path('test_copy_remote_dir_src')
     dst_dir = self.get_test_path('test_copy_remote_dir_dst')
     dst_dir = self.get_test_path('test_copy_remote_dir_dst')
@@ -159,6 +178,7 @@ class S3FSTest(S3TestBase):
       assert_true(src_names)
       assert_true(src_names)
       eq_(src_names, dst_names)
       eq_(src_names, dst_names)
 
 
+
   def test_copy_from_local(self):
   def test_copy_from_local(self):
     src_name = 'test_copy_from_local_src'
     src_name = 'test_copy_from_local_src'
     src_path = os.path.join(tempfile.gettempdir(), src_name)
     src_path = os.path.join(tempfile.gettempdir(), src_name)
@@ -174,6 +194,7 @@ class S3FSTest(S3TestBase):
       actual = self.fs.read(dst_path, 0, len(data) + 100)
       actual = self.fs.read(dst_path, 0, len(data) + 100)
       eq_(data, actual)
       eq_(data, actual)
 
 
+
   def test_rename_star(self):
   def test_rename_star(self):
     src_dir = self.get_test_path('test_rename_star_src')
     src_dir = self.get_test_path('test_rename_star_src')
     dst_dir = self.get_test_path('test_rename_star_dst')
     dst_dir = self.get_test_path('test_rename_star_dst')
@@ -200,6 +221,7 @@ class S3FSTest(S3TestBase):
       assert_true(src_names)
       assert_true(src_names)
       eq_(src_names, dst_names)
       eq_(src_names, dst_names)
 
 
+
   def test_rmtree(self):
   def test_rmtree(self):
     assert_raises(NotImplementedError, self.fs.rmtree, 'universe', skipTrash=False)
     assert_raises(NotImplementedError, self.fs.rmtree, 'universe', skipTrash=False)
 
 
@@ -218,13 +240,39 @@ class S3FSTest(S3TestBase):
       assert_false(self.fs.exists(nested_dir))
       assert_false(self.fs.exists(nested_dir))
       assert_false(self.fs.exists(directory))
       assert_false(self.fs.exists(directory))
 
 
+
   def test_listing_buckets(self):
   def test_listing_buckets(self):
     buckets = self.fs.listdir('s3://')
     buckets = self.fs.listdir('s3://')
     assert_true(len(buckets) > 0)
     assert_true(len(buckets) > 0)
 
 
+
   def test_mkdir(self):
   def test_mkdir(self):
     dir_path = self.get_test_path('test_mkdir')
     dir_path = self.get_test_path('test_mkdir')
     assert_false(self.fs.exists(dir_path))
     assert_false(self.fs.exists(dir_path))
     
     
     self.fs.mkdir(dir_path)
     self.fs.mkdir(dir_path)
     assert_true(self.fs.exists(dir_path))
     assert_true(self.fs.exists(dir_path))
+
+
+  def test_upload_file(self):
+    with tempfile.NamedTemporaryFile() as local_file:
+      # Make sure we can upload larger than the UPLOAD chunk size
+      file_size = DEFAULT_WRITE_SIZE * 2
+      local_file.write('0' * file_size)
+      local_file.flush()
+
+      dest_dir = self.get_test_path('test_upload')
+      local_file = local_file.name
+      dest_path = '%s/%s' % (dest_dir, os.path.basename(local_file))
+
+      # Just upload the current python file
+      resp = self.c.post('/filebrowser/upload/file?dest=%s' % dest_dir, dict(dest=dest_dir, hdfs_file=file(local_file)))
+      response = json.loads(resp.content)
+
+      assert_equal(0, response['status'], response)
+      stats = self.fs.stats(dest_path)
+
+      f = self.fs.open(dest_path)
+      actual = f.read(file_size)
+      expected = file(local_file).read()
+      assert_equal(actual, expected, 'files do not match: %s != %s' % (len(actual), len(expected)))

+ 139 - 0
desktop/libs/aws/src/aws/s3/upload.py

@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Classes for a custom upload handler to stream into S3.
+
+See http://docs.djangoproject.com/en/1.9/topics/http/file-uploads/
+"""
+
+import logging
+import StringIO
+
+from django.core.files.uploadedfile import SimpleUploadedFile
+from django.core.files.uploadhandler import FileUploadHandler, SkipFile, StopFutureHandlers, StopUpload
+
+from aws import get_s3fs
+from aws.s3 import parse_uri
+
+
+DEFAULT_WRITE_SIZE = 1024 * 1024 * 50  # TODO: set in configuration (currently 50 MiB)
+
+LOG = logging.getLogger(__name__)
+
+
+class S3FileUploadError(Exception):
+  pass
+
+
+class S3FileUploadHandler(FileUploadHandler):
+  """
+  This handler is triggered by any upload field whose destination path starts with "S3" (case insensitive).
+
+  Streams data chunks directly to S3
+  """
+  def __init__(self, request):
+    super(S3FileUploadHandler, self).__init__(request)
+    self.chunk_size = DEFAULT_WRITE_SIZE
+    self.destination = request.GET.get('dest', None)  # GET param avoids infinite looping
+    self.target_path = None
+    self.file = None
+    self._request = request
+    self._fs = self._get_s3fs(request)
+    self._mp = None
+    self._part_num = 1
+
+    if self._is_s3_upload():
+      self.bucket_name, self.key_name = parse_uri(self.destination)[:2]
+      # Verify that the path exists
+      self._fs._stats(self.destination)
+      self._bucket = self._fs._get_bucket(self.bucket_name)
+
+
+  def new_file(self, field_name, file_name, *args, **kwargs):
+    if self._is_s3_upload():
+      super(S3FileUploadHandler, self).new_file(field_name, file_name, *args, **kwargs)
+
+      LOG.info('Using S3FileUploadHandler to handle file upload.')
+      self.target_path = self._fs.join(self.key_name, file_name)
+
+      # Create a multipart upload request
+      LOG.debug("Initiating S3 multipart upload to target path: %s" % self.target_path)
+      self._mp = self._bucket.initiate_multipart_upload(self.target_path)
+      self.file = SimpleUploadedFile(name=file_name, content='')
+      raise StopFutureHandlers()
+
+
+  def receive_data_chunk(self, raw_data, start):
+    if self._is_s3_upload():
+      try:
+        LOG.debug("S3FileUploadHandler uploading file part: %d" % self._part_num)
+        fp = self._get_file_part(raw_data)
+        self._mp.upload_part_from_file(fp=fp, part_num=self._part_num)
+        self._part_num += 1
+        return None
+      except Exception, e:
+        self._mp.cancel_upload()
+        LOG.exception('Failed to upload file to S3 at %s: %s' % (self.target_path, e))
+        raise StopUpload()
+    else:
+      return raw_data
+
+
+  def file_complete(self, file_size):
+    if self._is_s3_upload():
+      # Finish the upload
+      LOG.info("S3FileUploadHandler has completed file upload to S3, total file size is: %d." % file_size)
+      self._mp.complete_upload()
+      self.file.size = file_size
+      return self.file
+    else:
+      return None
+
+
+  def _get_s3fs(self, request):
+    try:
+      fs = request.fs
+    except AttributeError:
+      fs = get_s3fs()
+
+    if not fs:
+      raise S3FileUploadError("No S3 filesystem found.")
+
+    return fs
+
+
+  def _is_s3_upload(self):
+    return self._get_scheme() and self._get_scheme().startswith('S3')
+
+
+  def _get_scheme(self):
+    if self.destination:
+      dst_parts = self.destination.split('://')
+      if dst_parts > 0:
+        return dst_parts[0].upper()
+      else:
+        raise IOError('Destination does not start with a valid scheme.')
+    else:
+      return None
+
+
+  def _get_file_part(self, raw_data):
+    fp = StringIO.StringIO()
+    fp.write(raw_data)
+    fp.seek(0)
+    return fp

+ 7 - 4
desktop/libs/hadoop/src/hadoop/fs/upload.py

@@ -30,12 +30,10 @@ import logging
 import time
 import time
 
 
 from django.core.files.uploadhandler import FileUploadHandler, StopFutureHandlers, StopUpload
 from django.core.files.uploadhandler import FileUploadHandler, StopFutureHandlers, StopUpload
-from django.utils.translation import ugettext as _
 
 
 import hadoop.cluster
 import hadoop.cluster
-
 from hadoop.conf import UPLOAD_CHUNK_SIZE
 from hadoop.conf import UPLOAD_CHUNK_SIZE
-from hadoop.fs.exceptions import WebHdfsException
+
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
@@ -135,9 +133,12 @@ class HDFSfileUploadHandler(FileUploadHandler):
     # Need to directly modify FileUploadHandler.chunk_size
     # Need to directly modify FileUploadHandler.chunk_size
     FileUploadHandler.chunk_size = UPLOAD_CHUNK_SIZE.get()
     FileUploadHandler.chunk_size = UPLOAD_CHUNK_SIZE.get()
 
 
+    LOG.debug("Chunk size = %d" % FileUploadHandler.chunk_size)
+
   def new_file(self, field_name, file_name, *args, **kwargs):
   def new_file(self, field_name, file_name, *args, **kwargs):
     # Detect "HDFS" in the field name.
     # Detect "HDFS" in the field name.
     if field_name.upper().startswith('HDFS'):
     if field_name.upper().startswith('HDFS'):
+      LOG.info('Using HDFSfileUploadHandler to handle file upload.')
       try:
       try:
         self._file = HDFStemporaryUploadedFile(self.request, file_name, self._destination)
         self._file = HDFStemporaryUploadedFile(self.request, file_name, self._destination)
         LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(),))
         LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(),))
@@ -150,6 +151,8 @@ class HDFSfileUploadHandler(FileUploadHandler):
       raise StopFutureHandlers()
       raise StopFutureHandlers()
 
 
   def receive_data_chunk(self, raw_data, start):
   def receive_data_chunk(self, raw_data, start):
+    LOG.debug("HDFSfileUploadHandler receive_data_chunk")
+
     if not self._activated:
     if not self._activated:
       if self.request.META.get('PATH_INFO').startswith('/filebrowser') and self.request.META.get('PATH_INFO') != '/filebrowser/upload/archive':
       if self.request.META.get('PATH_INFO').startswith('/filebrowser') and self.request.META.get('PATH_INFO') != '/filebrowser/upload/archive':
         raise StopUpload()
         raise StopUpload()
@@ -176,5 +179,5 @@ class HDFSfileUploadHandler(FileUploadHandler):
       raise
       raise
 
 
     elapsed = time.time() - self._starttime
     elapsed = time.time() - self._starttime
-    LOG.debug('Uploaded %s bytes to HDFS in %s seconds' % (file_size, elapsed))
+    LOG.info('Uploaded %s bytes to HDFS in %s seconds' % (file_size, elapsed))
     return self._file
     return self._file

+ 10 - 0
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py

@@ -771,6 +771,16 @@ class WebHdfs(Hdfs):
       except Exception:
       except Exception:
         pass
         pass
 
 
+  def upload(self, file, path, *args, **kwargs):
+    username = kwargs.get('username')
+    if not username:
+      raise WebHdfsException(_("Failed to upload file. WebHdfs requires a valid username to upload files."))
+
+    dst = self.join(path, file.name)
+    tmp_file = file.get_temp_path()
+
+    self.do_as_user(username, self.rename, tmp_file, dst)
+
 
 
 class File(object):
 class File(object):
   """
   """