فهرست منبع

HUE-4163 [aws] Implement S3 mkdir operation

Jenny Kim 9 سال پیش
والد
کامیت
c741084

+ 7 - 1
apps/filebrowser/src/filebrowser/forms.py

@@ -23,6 +23,7 @@ from django.contrib.auth.models import User, Group
 from django.forms import FileField, CharField, BooleanField, Textarea
 from django.forms.formsets import formset_factory, BaseFormSet
 
+from aws.s3 import S3_ROOT, normpath as s3_normpath
 from desktop.lib import i18n
 from hadoop.fs import normpath
 from filebrowser.lib import rwx
@@ -60,7 +61,12 @@ class PathField(CharField):
     forms.CharField.__init__(self, label=label, help_text=help_text, **kwargs)
 
   def clean(self, value):
-    return normpath(CharField.clean(self, value))
+    cleaned_path = CharField.clean(self, value)
+    if value.lower().startswith(S3_ROOT):
+      cleaned_path = s3_normpath(cleaned_path)
+    else:
+      cleaned_path = normpath(cleaned_path)
+    return cleaned_path
 
 
 class EditorForm(forms.Form):

+ 5 - 3
apps/filebrowser/src/filebrowser/views.py

@@ -56,6 +56,7 @@ from desktop.lib.fs import splitpath
 from hadoop.fs.hadoopfs import Hdfs
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.fs.fsutils import do_overwrite_save
+from hadoop.fs.webhdfs import WebHdfs
 
 from filebrowser.conf import MAX_SNAPPY_DECOMPRESSION_SIZE
 from filebrowser.conf import SHOW_DOWNLOAD_BUTTON
@@ -977,6 +978,7 @@ def generic_op(form_class, request, op, parameter_names, piggyback=None, templat
                 op(*args)
             except (IOError, WebHdfsException), e:
                 msg = _("Cannot perform operation.")
+                # TODO: Only apply this message for HDFS
                 if request.user.is_superuser and not _is_hdfs_superuser(request):
                     msg += _(' Note: you are a Hue admin but not a HDFS superuser, "%(superuser)s" or part of HDFS supergroup, "%(supergroup)s".') \
                            % {'superuser': request.fs.superuser, 'supergroup': request.fs.supergroup}
@@ -1014,7 +1016,7 @@ def rename(request):
           raise PopupException(_("Could not rename folder \"%s\" to \"%s\": Hashes are not allowed in filenames." % (src_path, dest_path)))
         if "/" not in dest_path:
             src_dir = os.path.dirname(src_path)
-            dest_path = os.path.join(src_dir, dest_path)
+            dest_path = request.fs.join(src_dir, dest_path)
         request.fs.rename(src_path, dest_path)
 
     return generic_op(RenameForm, request, smart_rename, ["src_path", "dest_path"], None)
@@ -1026,7 +1028,7 @@ def mkdir(request):
         # No absolute directory specification allowed.
         if posixpath.sep in name or "#" in name:
             raise PopupException(_("Could not name folder \"%s\": Slashes or hashes are not allowed in filenames." % name))
-        request.fs.mkdir(os.path.join(path, name))
+        request.fs.mkdir(request.fs.join(path, name))
 
     return generic_op(MkDirForm, request, smart_mkdir, ["path", "name"], "path")
 
@@ -1036,7 +1038,7 @@ def touch(request):
         # No absolute path specification allowed.
         if posixpath.sep in name:
             raise PopupException(_("Could not name file \"%s\": Slashes are not allowed in filenames." % name))
-        request.fs.create(os.path.join(path, name))
+        request.fs.create(request.fs.join(path, name))
 
     return generic_op(TouchForm, request, smart_touch, ["path", "name"], "path")
 

+ 13 - 0
desktop/libs/aws/src/aws/s3/s3fs.py

@@ -55,6 +55,17 @@ class S3FileSystem(object):
       self._bucket_cache[name] = self._s3_connection.get_bucket(name)
     return self._bucket_cache[name]
 
+  def _get_or_create_bucket(self, name):
+    try:
+      bucket = self._get_bucket(name)
+    except S3ResponseError, e:
+      if e.status == 404:
+        bucket = self._s3_connection.create_bucket(name)
+        self._bucket_cache[name] = bucket
+      else:
+        raise e
+    return bucket
+
   def _get_key(self, path, validate=True):
     bucket_name, key_name = s3.parse_uri(path)[:2]
     bucket = self._get_bucket(bucket_name)
@@ -233,6 +244,8 @@ class S3FileSystem(object):
 
     Actually it creates an empty object: s3://[bucket]/[path]/
     """
+    bucket_name, key_name = s3.parse_uri(path)[:2]
+    self._get_or_create_bucket(bucket_name)
     stats = self._stats(path)
     if stats:
       if stats.isDir:

+ 6 - 5
desktop/libs/aws/src/aws/s3/s3fs_test.py

@@ -222,8 +222,9 @@ class S3FSTest(S3TestBase):
     buckets = self.fs.listdir('s3://')
     assert_true(len(buckets) > 0)
 
-
-
-
-
-
+  def test_mkdir(self):
+    dir_path = self.get_test_path('test_mkdir')
+    assert_false(self.fs.exists(dir_path))
+    
+    self.fs.mkdir(dir_path)
+    assert_true(self.fs.exists(dir_path))

+ 1 - 0
desktop/libs/hadoop/src/hadoop/fs/__init__.py

@@ -176,6 +176,7 @@ class LocalSubFileSystem(object):
   isfile = _wrap(os.path.isfile)
   isdir = _wrap(os.path.isdir)
   chmod = _wrap(os.chmod)
+  join = _wrap(os.path.join)
   # This could be provided with an error_handler
   rmtree = _wrap(shutil.rmtree)
   chown = _wrap(os.chown, paths=[0], users=[1], groups=[2])