Selaa lähdekoodia

HUE-1027 [core] HDFS trash and restore

Move items to be trashed into /users/<current user>/.Trash/Current/<path>.
HDFS will handle eventual purging of that directory, so we do not worry about
excess directories being created.
HDFS will also handle checkpointing.
Adding timestamp suffixes to paths if they exist in Current directory.
Performs any necessary mkdirs along the way.

Restoration looks at /users/<current user>/.Trash/<timestamps>/<path>.
It moves out of trash, directly to <path>.
Performs any necessary mkdirs along the way.

Testing requires HDFS pseudo cluster to be started with trash enabled.
set fs.trash.interval to 10, which is 10 minutes.

Add option in UI to skip trash.
Abraham Elmahrek 12 vuotta sitten
vanhempi
commit
f514b58698

+ 12 - 0
apps/filebrowser/src/filebrowser/forms.py

@@ -117,6 +117,18 @@ class BaseRmTreeFormset(FormSet):
 
 RmTreeFormSet = formset_factory(RmTreeForm, formset=BaseRmTreeFormset, extra=0)
 
+class RestoreForm(forms.Form):
+  op = "rmtree"
+  path = PathField(label=_("Path to restore"))
+
+class BaseRestoreFormset(FormSet):
+  op = "restore"
+
+RestoreFormSet = formset_factory(RestoreForm, formset=BaseRestoreFormset, extra=0)
+
+class TrashPurgeForm(forms.Form):
+  op = "purge_trash"
+
 class MkDirForm(forms.Form):
   op = "mkdir"
   path = PathField(label=_("Path in which to create the directory"))

+ 3 - 0
apps/filebrowser/src/filebrowser/templates/fb_components.mako

@@ -29,6 +29,9 @@ from django.utils.translation import ugettext as _
                 </ul>
                 <input id="hueBreadcrumbText" type="text" class="input-xxlarge" style="margin-top:4px;margin-right:4px;display:none" data-bind="value: currentPath" />
             </li>
+            <li class="pull-right">
+                <a href="${url('filebrowser.views.view', path=urlencode(path))}?default_to_trash" style="line-height:18px"><i class="icon-trash"></i> ${_('Trash')}</a>
+            </li>
         </ul>
     % else:
         <ul class="nav nav-pills hueBreadcrumbBar">

+ 23 - 7
apps/filebrowser/src/filebrowser/templates/listdir.mako

@@ -34,21 +34,33 @@ ${ commonheader(_('File Browser'), 'filebrowser', user) | n,unicode }
         </%def>
 
         <%def name="actions()">
-            <button class="btn fileToolbarBtn" title="${_('Rename')}" data-bind="click: renameFile, enable: selectedFiles().length == 1"><i class="icon-font"></i> ${_('Rename')}</button>
+            <button class="btn fileToolbarBtn" title="${_('Rename')}" data-bind="visible: !inTrash(), click: renameFile, enable: selectedFiles().length == 1"><i class="icon-font"></i> ${_('Rename')}</button>
             <button class="btn fileToolbarBtn" title="${_('Move')}" data-bind="click: move, enable: selectedFiles().length > 0"><i class="icon-random"></i> ${_('Move')}</button>
             <button class="btn fileToolbarBtn" title="${_('Copy')}" data-bind="click: copy, enable: selectedFiles().length > 0"><i class="icon-retweet"></i> ${_('Copy')}</button>
             %if is_fs_superuser:
-                <button class="btn fileToolbarBtn" title="${_('Change Owner / Group')}" data-bind="click: changeOwner, enable: selectedFiles().length > 0"><i class="icon-user"></i> ${_('Change Owner / Group')}</button>
+                <button class="btn fileToolbarBtn" title="${_('Change Owner / Group')}" data-bind="visible: !inTrash(), click: changeOwner, enable: selectedFiles().length > 0"><i class="icon-user"></i> ${_('Change Owner / Group')}</button>
             %endif
-            <button class="btn fileToolbarBtn" title="${_('Change Permissions')}" data-bind="click: changePermissions, enable: selectedFiles().length > 0"><i class="icon-list-alt"></i> ${_('Change Permissions')}</button>
-            <button class="btn fileToolbarBtn" title="${_('Download')}" data-bind="click: downloadFile, enable: selectedFiles().length == 1 && selectedFile().type == 'file'"><i class="icon-download-alt"></i> ${_('Download')}</button>
+            <button class="btn fileToolbarBtn" title="${_('Change Permissions')}" data-bind="visible: !inTrash(), click: changePermissions, enable: selectedFiles().length > 0"><i class="icon-list-alt"></i> ${_('Change Permissions')}</button>
+            <button class="btn fileToolbarBtn" title="${_('Download')}" data-bind="visible: !inTrash(), click: downloadFile, enable: selectedFiles().length == 1 && selectedFile().type == 'file'"><i class="icon-download-alt"></i> ${_('Download')}</button>
             &nbsp;&nbsp;
-            <button class="btn fileToolbarBtn" title="${_('Delete')}" data-bind="click: deleteSelected, enable: selectedFiles().length > 0"><i class="icon-trash"></i> ${_('Delete')}</button>
+            <button class="btn fileToolbarBtn" title="${_('Empty trash')}" data-bind="visible: inTrash(), click: purgeTrash"><i class="icon-fire"></i> ${_('Empty')}</button>
+            <button class="btn fileToolbarBtn" title="${_('Restore from trash')}" data-bind="visible: inRestorableTrash(), click: restoreTrashSelected, enable: selectedFiles().length > 0"><i class="icon-cloud-upload"></i> ${_('Restore')}</button>
+
+            <div id="delete-dropdown" class="btn-group" style="display: inline">
+              <a href="#" class="btn delete-link dropdown-toggle" title="${_('Delete')}" data-toggle="dropdown" data-bind="visible: !inTrash()">
+                <i class="icon-remove"></i> ${_('Delete')}
+                <span class="caret"></span>
+              </a>
+              <ul class="dropdown-menu" style="top: auto">
+                <li data-bind="visible: trashEnabled"><a href="#" class="delete-link" title="${_('Move to Trash')}" data-bind="enable: selectedFiles().length > 0, click: trashSelected"><i class="icon-trash"></i> ${_('Move to Trash')}</a></li>
+                <li><a href="#" class="delete-link" title="${_('Delete forever')}" data-bind="enable: selectedFiles().length > 0, click: deleteSelected"><i class="icon-bolt"></i> ${_('Delete forever')}</a></li>
+              </ul>
+            </div>
         </%def>
 
         <%def name="creation()">
             <div id="upload-dropdown" class="btn-group pull-right" style="display: inline-block; margin-top:0">
-              <a href="#" class="btn upload-link dropdown-toggle" title="${_('Upload')}" data-toggle="dropdown">
+              <a href="#" class="btn upload-link dropdown-toggle" title="${_('Upload')}" data-toggle="dropdown" data-bind="visible: !inTrash()">
                 <i class="icon-upload"></i> ${_('Upload')}
                 <span class="caret"></span>
               </a>
@@ -58,7 +70,7 @@ ${ commonheader(_('File Browser'), 'filebrowser', user) | n,unicode }
               </ul>
             </div>
             <div class="btn-group" style="display: inline">
-              <a href="#" data-toggle="dropdown" class="btn dropdown-toggle">
+              <a href="#" data-toggle="dropdown" class="btn dropdown-toggle" data-bind="visible: !inTrash()">
                 <i class="icon-plus-sign"></i> ${_('New')}
                 <span class="caret"></span>
               </a>
@@ -70,6 +82,10 @@ ${ commonheader(_('File Browser'), 'filebrowser', user) | n,unicode }
         </%def>
     </%actionbar:render>
 
+    <div class="alert alert-warn" data-bind="visible: inTrash">
+        ${ _("You are in Hadoop trash. Your files will be under a checkpoint, or timestamp named, directory.") }
+    </div>
+
     % if breadcrumbs:
         ${fb_components.breadcrumbs(path, breadcrumbs, True)}
     %endif

+ 81 - 2
apps/filebrowser/src/filebrowser/templates/listdir_components.mako

@@ -153,6 +153,40 @@ from django.utils.translation import ugettext as _
         </div>
     </div>
 
+    <!-- restore modal -->
+    <div id="restoreTrashModal" class="modal hide fade">
+        <div class="modal-header">
+            <a href="#" class="close" data-dismiss="modal">&times;</a>
+            <h3>${_('Confirm Restore')}</h3>
+        </div>
+        <div class="modal-body">
+            <p>${_('Are you sure you want to restore these files?')}</p>
+        </div>
+        <div class="modal-footer">
+            <form id="restoreTrashForm" action="/filebrowser/trash/restore" method="POST" enctype="multipart/form-data" class="form-stacked">
+                <a class="btn" data-dismiss="modal">${_('No')}</a>
+                <input type="submit" value="${_('Yes')}" class="btn btn-primary" />
+            </form>
+        </div>
+    </div>
+
+    <!-- purge modal -->
+    <div id="purgeTrashModal" class="modal hide fade">
+        <div class="modal-header">
+            <a href="#" class="close" data-dismiss="modal">&times;</a>
+            <h3>${_('Confirm Empty Trash')}</h3>
+        </div>
+        <div class="modal-body">
+            <p>${_('Are you sure you want to permanently delete all your trash?')}</p>
+        </div>
+        <div class="modal-footer">
+            <form id="purgeTrashForm" action="/filebrowser/trash/purge" method="POST" enctype="multipart/form-data" class="form-stacked">
+                <a class="btn" data-dismiss="modal">${_('Cancel')}</a>
+                <input type="submit" value="${_('Delete')}" class="btn btn-primary" />
+            </form>
+        </div>
+    </div>
+
     <!-- rename modal -->
     <div id="renameModal" class="modal hide fade">
         <form id="renameForm" action="/filebrowser/rename?next=${current_request_path}" method="POST" enctype="multipart/form-data" class="form-inline form-padding-fix">
@@ -771,6 +805,7 @@ from django.utils.translation import ugettext as _
       self.recordsPerPage = ko.observable($.cookie("hueFilebrowserRecordsPerPage"));
       self.targetPageNum = ko.observable(1);
       self.targetPath = ko.observable("${current_request_path}");
+      self.trashEnabled = ko.observable(${ trash_enabled and "true" or "false" });
 
       self.sortBy = ko.observable("name");
       self.sortDescending = ko.observable(false);
@@ -822,6 +857,14 @@ from django.utils.translation import ugettext as _
 
       self.currentPath = ko.observable(currentDirPath);
 
+      self.inTrash = ko.computed(function() {
+        return self.currentPath().match(/^\/user\/.+?\/\.Trash/) && self.trashEnabled();
+      });
+
+      self.inRestorableTrash = ko.computed(function() {
+        return self.currentPath().match(/^\/user\/.+?\/\.Trash\/.+?/) && self.trashEnabled();
+      });
+
       self.getStats = function (callback) {
         $.getJSON(self.targetPath() + "?pagesize=1&format=json", callback);
       }
@@ -1019,17 +1062,27 @@ from django.utils.translation import ugettext as _
         }
       };
 
-      self.deleteSelected = function () {
+      var deleteSelected = function(skip_trash) {
         var paths = [];
         $(self.selectedFiles()).each(function (index, file) {
           paths.push(file.path);
         });
         hiddenFields($("#deleteForm"), 'path', paths);
-        $("#deleteForm").attr("action", "/filebrowser/rmtree" + "?next=${url('filebrowser.views.view', path=urlencode('/'))}" + "." + self.currentPath());
+        $("#deleteForm").attr("action", "/filebrowser/rmtree" + "?" +
+            (skip_trash ? "skip_trash=true&" : "") +
+            "next=${url('filebrowser.views.view', path=urlencode('/'))}" + "." + self.currentPath());
         $("#deleteModal").modal({
           keyboard:true,
           show:true
         });
+      }
+
+      self.deleteSelected = function () {
+        deleteSelected(true);
+      };
+
+      self.trashSelected = function () {
+        deleteSelected();
       };
 
       self.createDirectory = function (formElement) {
@@ -1042,6 +1095,32 @@ from django.utils.translation import ugettext as _
         return true;
       };
 
+      self.restoreTrashSelected = function(formElement) {
+        var paths = [];
+        $(self.selectedFiles()).each(function (index, file) {
+          paths.push(file.path);
+        });
+        hiddenFields($("#restoreTrashForm"), 'path', paths);
+        $("#restoreTrashForm").attr("action", "/filebrowser/trash/restore?next=${url('filebrowser.views.view', path=urlencode('/'))}" + "." + self.currentPath());
+        $("#restoreTrashModal").modal({
+          keyboard:true,
+          show:true
+        });
+      };
+
+      self.purgeTrash = function(formElement) {
+        var paths = [];
+        $(self.selectedFiles()).each(function (index, file) {
+          paths.push(file.path);
+        });
+        hiddenFields($("#purgeTrashForm"), 'path', paths);
+        $("#purgeTrashForm").attr("action", "/filebrowser/trash/purge?next=${url('filebrowser.views.view', path=urlencode('/'))}" + "." + self.currentPath());
+        $("#purgeTrashModal").modal({
+          keyboard:true,
+          show:true
+        });
+      };
+
       self.uploadFile = (function () {
         var num_of_pending_uploads = 0;
         var action = "/filebrowser/upload/file";

+ 2 - 0
apps/filebrowser/src/filebrowser/urls.py

@@ -38,6 +38,8 @@ urlpatterns = patterns('filebrowser.views',
   url(r'^save$', 'save_file'),
   url(r'^upload/file$', 'upload_file', name='upload_file'),
   url(r'^upload/archive$', 'upload_archive', name='upload_archive'),
+  url(r'^trash/restore$', 'trash_restore', name='trash_restore'),
+  url(r'^trash/purge$', 'trash_purge', name='trash_purge'),
   url(r'^rename$', 'rename', name='rename'),
   url(r'^mkdir$', 'mkdir', name='mkdir'),
   url(r'^touch$', 'touch', name='touch'),

+ 36 - 2
apps/filebrowser/src/filebrowser/views.py

@@ -25,6 +25,7 @@ import logging
 import mimetypes
 import operator
 import posixpath
+import re
 import shutil
 import stat as stat_module
 import os
@@ -57,7 +58,9 @@ from filebrowser.lib.archives import archive_factory
 from filebrowser.lib.rwx import filetype, rwx
 from filebrowser.lib import xxd
 from filebrowser.forms import RenameForm, UploadFileForm, UploadArchiveForm, MkDirForm, EditorForm, TouchForm,\
-                              RenameFormSet, RmTreeFormSet, ChmodFormSet, ChownFormSet, CopyFormSet
+                              RenameFormSet, RmTreeFormSet, ChmodFormSet, ChownFormSet, CopyFormSet, RestoreFormSet,\
+                              TrashPurgeForm
+from hadoop.core_site import get_trash_interval
 from hadoop.fs.hadoopfs import Hdfs
 from hadoop.fs.exceptions import WebHdfsException
 
@@ -134,6 +137,11 @@ def view(request, path):
         if request.fs.isdir(home_dir_path):
             return format_preserving_redirect(request, urlresolvers.reverse(view, kwargs=dict(path=home_dir_path)))
 
+    # default_to_home is set in bootstrap.js
+    if 'default_to_trash' in request.GET:
+        if request.fs.isdir(request.fs.trash_path):
+            return format_preserving_redirect(request, urlresolvers.reverse(view, kwargs=dict(path=request.fs.trash_path)))
+
     try:
         stats = request.fs.stats(path)
         if stats.isDir:
@@ -331,6 +339,8 @@ def listdir(request, path, chooser):
     Implements directory listing (or index).
 
     Intended to be called via view().
+
+    TODO: Remove?
     """
     if not request.fs.isdir(path):
         raise PopupException(_("Not a directory: %(path)s") % {'path': path})
@@ -407,6 +417,8 @@ def listdir_paged(request, path):
     if not request.fs.isdir(path):
         raise PopupException("Not a directory: %s" % (path,))
 
+    trash_enabled = get_trash_interval()
+
     pagenum = int(request.GET.get('pagenum', 1))
     pagesize = int(request.GET.get('pagesize', 30))
 
@@ -458,6 +470,7 @@ def listdir_paged(request, path):
         'page': _massage_page(page),
         'pagesize': pagesize,
         'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None,
+        'trash_enabled': trash_enabled,
         'sortby': sortby,
         'descending': descending_param,
         # The following should probably be deprecated
@@ -1003,8 +1016,10 @@ def rmtree(request):
     recurring = []
     params = ["path"]
     def bulk_rmtree(*args, **kwargs):
+        original = request.fs.setskiptrash('skip_trash' in request.GET)
         for arg in args:
-            request.fs.rmtree(arg['path'])
+            request.fs.do_as_user(request.user, request.fs.rmtree, arg['path'])
+        request.fs.setskiptrash(original)
     return generic_op(RmTreeFormSet, request, bulk_rmtree, ["path"], None,
                       data_extractor=formset_data_extractor(recurring, params),
                       arg_extractor=formset_arg_extractor,
@@ -1078,6 +1093,24 @@ def chown(request):
                       initial_value_extractor=formset_initial_value_extractor)
 
 
+@require_http_methods(["POST"])
+def trash_restore(request):
+    recurring = []
+    params = ["path"]
+    def bulk_restore(*args, **kwargs):
+        for arg in args:
+            request.fs.do_as_user(request.user, request.fs.restore, arg['path'])
+    return generic_op(RestoreFormSet, request, bulk_restore, ["path"], None,
+                      data_extractor=formset_data_extractor(recurring, params),
+                      arg_extractor=formset_arg_extractor,
+                      initial_value_extractor=formset_initial_value_extractor)
+
+
+@require_http_methods(["POST"])
+def trash_purge(request):
+    return generic_op(TrashPurgeForm, request, request.fs.purge_trash, [], None)
+
+
 def upload_file(request):
     """
     A wrapper around the actual upload view function to clean up the temporary file afterwards.
@@ -1237,6 +1270,7 @@ def _upload_archive(request):
     else:
         raise PopupException(_("Error in upload form: %s") % (form.errors,))
 
+
 def status(request):
     status = request.fs.status()
     data = {

BIN
desktop/core/ext-py/django-auth-ldap-1.0.7/._setup.py


BIN
desktop/core/ext-py/django-auth-ldap-1.0.7/django_auth_ldap/._backend.py


BIN
desktop/core/ext-py/django-auth-ldap-1.0.7/django_auth_ldap/._config.py


BIN
desktop/core/ext-py/django-auth-ldap-1.0.7/django_auth_ldap/._tests.py


BIN
desktop/core/ext-py/django-auth-ldap-1.0.7/docs/._.DS_Store


BIN
desktop/core/ext-py/django-auth-ldap-1.0.7/docs/._conf.py


BIN
desktop/core/ext-py/django-auth-ldap-1.0.7/docs/._index.rst


+ 2 - 2
desktop/core/src/desktop/templates/actionbar.mako

@@ -24,7 +24,7 @@
                 ${caller.creation()}
             %endif
         </div>
-        <p>
+        <div style="margin: 0px 0px 10px 0px">
             %if hasattr(caller, "search"):
                 ${caller.search()}
             %else:
@@ -34,6 +34,6 @@
                 &nbsp;&nbsp;&nbsp;&nbsp;
                 ${caller.actions()}
             %endif
-        </p>
+        </div>
     </div>
 </%def>

+ 78 - 0
desktop/libs/hadoop/src/hadoop/core_site.py

@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Helper for reading core-site.xml
+"""
+
+import errno
+import logging
+import os.path
+
+import conf
+import confparse
+
+__all = ['get_conf', 'get_trash_interval']
+
+LOG = logging.getLogger(__name__)
+
+_CORE_SITE_PATH = None                  # Path to core-site.xml
+_CORE_SITE_DICT = None                  # A dictionary of name/value config options
+
+_CNF_TRASH_INTERVAL = 'fs.trash.interval'
+
+def reset():
+  """Reset the cached conf"""
+  global _CORE_SITE_DICT
+  _CORE_SITE_DICT = None
+
+
+def get_conf():
+  """get_conf() ->  ConfParse object for core-site.xml"""
+  if _CORE_SITE_DICT is None:
+    _parse_core_site()
+  return _CORE_SITE_DICT
+
+
+def _parse_core_site():
+  """
+  Parse core-site.xml and store in _CORE_SITE_DICT
+  """
+  global _CORE_SITE_DICT
+  global _CORE_SITE_PATH
+
+  for indentifier in conf.HDFS_CLUSTERS.get():
+    _CORE_SITE_PATH = os.path.join(conf.HDFS_CLUSTERS[indentifier].HADOOP_CONF_DIR.get(), 'core-site.xml')
+    try:
+      data = file(_CORE_SITE_PATH, 'r').read()
+      break
+    except IOError, err:
+      if err.errno != errno.ENOENT:
+        LOG.error('Cannot read from "%s": %s' % (_CORE_SITE_PATH, err))
+        return
+      # Keep going and make an empty ConfParse
+      data = ""
+
+  _CORE_SITE_DICT = confparse.ConfParse(data)
+
+
+def get_trash_interval():
+  """
+  Get trash interval
+
+  Also indicates whether trash is enabled or not.
+  """
+  return get_conf().get(_CNF_TRASH_INTERVAL, None)

+ 399 - 311
desktop/libs/hadoop/src/hadoop/fs/test_webhdfs.py

@@ -24,334 +24,422 @@ import logging
 import posixfile
 import random
 from threading import Thread
+import unittest
 
-from hadoop import pseudo_hdfs4
+from hadoop import conf, pseudo_hdfs4
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.fs.hadoopfs import Hdfs
 
 LOG = logging.getLogger(__name__)
 
-@attr('requires_hadoop')
-def test_webhdfs():
-  """
-  Minimal tests for a few basic file system operations.
-  """
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser(cluster.superuser)
-  f = fs.open("/fortest.txt", "w")
-  try:
-    f.write("hello")
-    f.close()
-    assert_equals("hello", fs.open("/fortest.txt").read())
-    assert_equals(5, fs.stats("/fortest.txt")["size"])
-    assert_true(fs.isfile("/fortest.txt"))
-    assert_false(fs.isfile("/"))
-    assert_true(fs.isdir("/"))
-    assert_false(fs.isdir("/fortest.txt"))
-  finally:
-    fs.remove("/fortest.txt")
-
-@attr('requires_hadoop')
-def test_webhdfs_functions():
-  """
-  Tests advanced file system operations.
-  """
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser(cluster.superuser)
-
-  # Create home dir
-  fs.create_home_dir("/user/test")
-  assert_true(fs.isdir("/user/test"))
-  fs.remove("/user/test")
-
-@attr('requires_hadoop')
-def test_seek():
-  """Test for DESKTOP-293 - ensure seek works in python2.4"""
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser(cluster.superuser)
-  f = fs.open("/fortest.txt", "w")
-  try:
-    f.write("hello")
-    f.close()
+class WebhdfsTests(unittest.TestCase):
+  requires_hadoop = True
+
+  @classmethod
+  def setup_class(cls):
+    cls.cluster = pseudo_hdfs4.shared_cluster()
+
+  def setUp(self):
+    WebhdfsTests.setup_class()
+    self.cluster.fs.setuser(self.cluster.superuser)
+
+  def tearDown(self):
+    try:
+      self.cluster.fs.purge_trash()
+    except:
+      LOG.error('Could not clean up trash.')
+
+  def test_webhdfs(self):
+    """
+    Minimal tests for a few basic file system operations.
+    """
+    fs = self.cluster.fs
+    f = fs.open("/fortest.txt", "w")
+    try:
+      f.write("hello")
+      f.close()
+      assert_equals("hello", fs.open("/fortest.txt").read())
+      assert_equals(5, fs.stats("/fortest.txt")["size"])
+      assert_true(fs.isfile("/fortest.txt"))
+      assert_false(fs.isfile("/"))
+      assert_true(fs.isdir("/"))
+      assert_false(fs.isdir("/fortest.txt"))
+    finally:
+      fs.remove("/fortest.txt")
+
+  def test_webhdfs_functions(self):
+    """
+    Tests advanced file system operations.
+    """
+    fs = self.cluster.fs
+
+    # Create home dir
+    fs.create_home_dir("/user/test")
+    assert_true(fs.isdir("/user/test"))
+    fs.rmtree("/user/test")
+
+  def test_seek(self):
+    """Test for DESKTOP-293 - ensure seek works in python2.4"""
+    fs = self.cluster.fs
+    f = fs.open("/fortest.txt", "w")
+    try:
+      f.write("hello")
+      f.close()
+
+      f = fs.open("/fortest.txt", "r")
+      f.seek(0, posixfile.SEEK_SET)
+      assert_equals("he", f.read(2))
+      f.seek(1, posixfile.SEEK_SET)
+      assert_equals("el", f.read(2))
+      f.seek(-1, posixfile.SEEK_END)
+      assert_equals("o", f.read())
+      f.seek(0, posixfile.SEEK_SET)
+      f.seek(2, posixfile.SEEK_CUR)
+      assert_equals("ll", f.read(2))
+    finally:
+      fs.remove("/fortest.txt")
+
+  def test_seek_across_blocks(self):
+    """Makes a file with a lot of blocks, seeks around"""
+    fs = self.cluster.fs
+    fs.create("/fortest-blocks.txt", replication=1, blocksize=1024)
+    f = fs.open("/fortest-blocks.txt", "w")
+    try:
+      data = "abcdefghijklmnopqrstuvwxyz" * 3000
+      f.write(data)
+      f.close()
+
+      for i in xrange(1, 10):
+        f = fs.open("/fortest-blocks.txt", "r")
+
+        for j in xrange(1, 100):
+          offset = random.randint(0, len(data) - 1)
+          f.seek(offset, posixfile.SEEK_SET)
+          assert_equals(data[offset:offset+50], f.read(50))
+        f.close()
 
-    f = fs.open("/fortest.txt", "r")
-    f.seek(0, posixfile.SEEK_SET)
-    assert_equals("he", f.read(2))
-    f.seek(1, posixfile.SEEK_SET)
-    assert_equals("el", f.read(2))
-    f.seek(-1, posixfile.SEEK_END)
-    assert_equals("o", f.read())
-    f.seek(0, posixfile.SEEK_SET)
-    f.seek(2, posixfile.SEEK_CUR)
-    assert_equals("ll", f.read(2))
-  finally:
-    fs.remove("/fortest.txt")
-
-@attr('requires_hadoop')
-def test_seek_across_blocks():
-  """Makes a file with a lot of blocks, seeks around"""
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser(cluster.superuser)
-  fs.create("/fortest-blocks.txt", replication=1, blocksize=1024)
-  f = fs.open("/fortest-blocks.txt", "w")
-  try:
-    data = "abcdefghijklmnopqrstuvwxyz" * 3000
-    f.write(data)
+    finally:
+      fs.remove("/fortest-blocks.txt")
+
+  def test_exceptions(self):
+    """
+    Tests that appropriate exceptions are raised.
+    """
+    fs = self.cluster.fs
+    f = fs.open("/for_exception_test.txt", "w")
+    f.write("foo")
     f.close()
+    fs.chmod("/for_exception_test.txt", 0400)
+    fs.setuser("notsuperuser")
+    f = fs.open("/for_exception_test.txt")
+
+    assert_raises(WebHdfsException, f.read)
+    assert_raises(IOError, fs.open, "/test/doesnotexist.txt")
+
+  def test_copy_remote_dir(self):
+    fs = self.cluster.fs
+
+    src_dir = '/copy_remote_dir'
+    fs.mkdir(src_dir)
+    f1 = fs.open("/copy_remote_dir/test_one.txt", "w")
+    f1.write("foo")
+    f1.close()
+    f2 = fs.open("/copy_remote_dir/test_two.txt", "w")
+    f2.write("bar")
+    f2.close()
+
+    new_owner = 'testcopy'
+    new_owner_home = '/user/testcopy'
+    new_owner_dir = new_owner_home + '/test-copy'
+    fs.mkdir(new_owner_home)
+    fs.chown(new_owner_home, new_owner, new_owner)
+
+    fs.copy_remote_dir(src_dir, new_owner_dir, dir_mode=0755, owner=new_owner)
+
+    dir_stat = fs.stats(new_owner_dir)
+    assert_equals(new_owner, dir_stat.user)
+    assert_equals(new_owner, dir_stat.group)
+    assert_equals('40755', '%o' % dir_stat.mode)
+
+    src_stat = fs.listdir_stats(src_dir)
+    dest_stat = fs.listdir_stats(new_owner_dir)
+
+    src_names = set([stat.name for stat in src_stat])
+    dest_names = set([stat.name for stat in dest_stat])
+    assert_true(src_names)
+    assert_equals(src_names, dest_names)
+
+    for stat in dest_stat:
+      assert_equals('testcopy', stat.user)
+      assert_equals('testcopy', stat.group)
+      assert_equals('100644', '%o' % stat.mode)
+
+  def test_two_files_open(self):
+    """
+    See DESKTOP-510.  There was a bug where you couldn't open two files at
+    the same time.  It boils down to a close_fds=True issue.  If this doesn't
+    hang, all is good.
+    """
+    fs = self.cluster.fs
+    f1 = fs.open("/test_one.txt", "w")
+    f2 = fs.open("/test_two.txt", "w")
+    f1.write("foo")
+    f2.write("bar")
+    f1.close()
+    f2.close()
+    # This should work, not hang, etc.
+
+  def test_urlsplit(self):
+    """Test Hdfs urlsplit"""
+    url = 'hdfs://nn.no.port/foo/bar'
+    assert_equals(('hdfs', 'nn.no.port', '/foo/bar', '', ''), Hdfs.urlsplit(url))
+    url = 'hdfs://nn:8020/foo/bar'
+    assert_equals(('hdfs', 'nn:8020', '/foo/bar', '', ''), Hdfs.urlsplit(url))
+    url = 'hdfs://nn:8020//foo//bar'
+    assert_equals(('hdfs', 'nn:8020', '/foo/bar', '', ''), Hdfs.urlsplit(url))
+    url = 'hdfs://nn:8020'
+    assert_equals(('hdfs', 'nn:8020', '/', '', ''), Hdfs.urlsplit(url))
+    url = '/foo/bar'
+    assert_equals(('hdfs', '', '/foo/bar', '', ''), Hdfs.urlsplit(url))
+    url = 'foo//bar'
+    assert_equals(('hdfs', '', 'foo/bar', '', ''), Hdfs.urlsplit(url))
+
+  def test_i18n_namespace(self):
+    def check_existence(name, parent, present=True):
+      assertion = present and assert_true or assert_false
+      listing = self.cluster.fs.listdir(parent)
+      assertion(name in listing, "%s should be in %s" % (name, listing))
+
+    name = u'''pt-Olá_ch-你好_ko-안녕_ru-Здравствуйте%20,.<>~`!@#$%^&()_-+='"'''
+    prefix = '/tmp/i18n'
+    dir_path = '%s/%s' % (prefix, name)
+    file_path = '%s/%s' % (dir_path, name)
 
-    for i in xrange(1, 10):
-      f = fs.open("/fortest-blocks.txt", "r")
+    try:
+      # Create a directory
+      self.cluster.fs.mkdir(dir_path)
+      # Directory is there
+      check_existence(name, prefix)
+
+      # Create a file (same name) in the directory
+      self.cluster.fs.open(file_path, 'w').close()
+      # File is there
+      check_existence(name, dir_path)
+
+      # Test rename
+      new_file_path = file_path + '.new'
+      self.cluster.fs.rename(file_path, new_file_path)
+      # New file is there
+      check_existence(name + '.new', dir_path)
+
+      # Test remove
+      self.cluster.fs.remove(new_file_path)
+      check_existence(name + '.new', dir_path, present=False)
+
+      # Test rmtree
+      self.cluster.fs.rmtree(dir_path)
+      check_existence(name, prefix, present=False)
+
+      # Test exception can handle non-ascii characters
+      try:
+        self.cluster.fs.rmtree(dir_path)
+      except IOError, ex:
+        LOG.info(unicode('Successfully caught error: %s') % ex)
+    finally:
+      try:
+        self.cluster.fs.rmtree(prefix)
+      except Exception, ex:
+        LOG.error(unicode('Failed to cleanup %s: %s') % (prefix, ex))
+
+  def test_threadedness(self):
+    # Start a second thread to change the user, and
+    # make sure that isn't reflected.
+    fs = self.cluster.fs
+    fs.setuser("alpha")
+    class T(Thread):
+      def run(self):
+        fs.setuser("beta")
+        assert_equals("beta", fs.user)
+    t = T()
+    t.start()
+    t.join()
+    assert_equals("alpha", fs.user)
+    fs.setuser("gamma")
+    assert_equals("gamma", fs.user)
+
+  def test_chmod(self):
+    # Create a test directory with
+    # a subdirectory and a few files.
+    dir1 = '/test'
+    subdir1 = dir1 + '/test1'
+    file1 = subdir1 + '/test1.txt'
+    fs = self.cluster.fs
+    try:
+      fs.mkdir(subdir1)
+      f = fs.open(file1, "w")
+      f.write("hello")
+      f.close()
 
-      for j in xrange(1, 100):
-        offset = random.randint(0, len(data) - 1)
-        f.seek(offset, posixfile.SEEK_SET)
-        assert_equals(data[offset:offset+50], f.read(50))
+      # Check currrent permissions are not 777 (666 for file)
+      fs.chmod(dir1, 01000, recursive=True)
+      assert_equals(041000, fs.stats(dir1).mode)
+      assert_equals(041000, fs.stats(subdir1).mode)
+      assert_equals(0100000, fs.stats(file1).mode)
+
+      # Chmod non-recursive
+      fs.chmod(dir1, 01222, recursive=False)
+      assert_equals(041222, fs.stats(dir1).mode)
+      assert_equals(041000, fs.stats(subdir1).mode)
+      assert_equals(0100000, fs.stats(file1).mode)
+
+      # Chmod recursive
+      fs.chmod(dir1, 01444, recursive=True)
+      assert_equals(041444, fs.stats(dir1).mode)
+      assert_equals(041444, fs.stats(subdir1).mode)
+      assert_equals(0100444, fs.stats(file1).mode)
+    finally:
+      try:
+        fs.rmtree(dir1)
+      finally:
+        pass
+
+  def test_chown(self):
+    # Create a test directory with
+    # a subdirectory and a few files.
+    dir1 = '/test'
+    subdir1 = dir1 + '/test1'
+    file1 = subdir1 + '/test1.txt'
+    fs = self.cluster.fs
+    try:
+      fs.mkdir(subdir1)
+      f = fs.open(file1, "w")
+      f.write("hello")
       f.close()
 
-  finally:
-    fs.remove("/fortest-blocks.txt")
-
-@attr('requires_hadoop')
-def test_exceptions():
-  """
-  Tests that appropriate exceptions are raised.
-  """
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser(cluster.superuser)
-  f = fs.open("/for_exception_test.txt", "w")
-  f.write("foo")
-  f.close()
-  fs.chmod("/for_exception_test.txt", 0400)
-  fs.setuser("notsuperuser")
-  f = fs.open("/for_exception_test.txt")
-
-  assert_raises(WebHdfsException, f.read)
-  assert_raises(IOError, fs.open, "/test/doesnotexist.txt")
-
-@attr('requires_hadoop')
-def test_copy_remote_dir():
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser(cluster.superuser)
-
-  src_dir = '/copy_remote_dir'
-  fs.mkdir(src_dir)
-  f1 = fs.open("/copy_remote_dir/test_one.txt", "w")
-  f1.write("foo")
-  f1.close()
-  f2 = fs.open("/copy_remote_dir/test_two.txt", "w")
-  f2.write("bar")
-  f2.close()
-
-  new_owner = 'testcopy'
-  new_owner_home = '/user/testcopy'
-  new_owner_dir = new_owner_home + '/test-copy'
-  fs.mkdir(new_owner_home)
-  fs.chown(new_owner_home, new_owner, new_owner)
-
-  fs.copy_remote_dir(src_dir, new_owner_dir, dir_mode=0755, owner=new_owner)
-
-  dir_stat = fs.stats(new_owner_dir)
-  assert_equals(new_owner, dir_stat.user)
-  assert_equals(new_owner, dir_stat.group)
-  assert_equals('40755', '%o' % dir_stat.mode)
-
-  src_stat = fs.listdir_stats(src_dir)
-  dest_stat = fs.listdir_stats(new_owner_dir)
-
-  src_names = set([stat.name for stat in src_stat])
-  dest_names = set([stat.name for stat in dest_stat])
-  assert_true(src_names)
-  assert_equals(src_names, dest_names)
-
-  for stat in dest_stat:
-    assert_equals('testcopy', stat.user)
-    assert_equals('testcopy', stat.group)
-    assert_equals('100644', '%o' % stat.mode)
-
-@attr('requires_hadoop')
-def test_two_files_open():
-  """
-  See DESKTOP-510.  There was a bug where you couldn't open two files at
-  the same time.  It boils down to a close_fds=True issue.  If this doesn't
-  hang, all is good.
-  """
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser(cluster.superuser)
-  f1 = fs.open("/test_one.txt", "w")
-  f2 = fs.open("/test_two.txt", "w")
-  f1.write("foo")
-  f2.write("bar")
-  f1.close()
-  f2.close()
-  # This should work, not hang, etc.
-
-
-def test_urlsplit():
-  """Test Hdfs urlsplit"""
-  url = 'hdfs://nn.no.port/foo/bar'
-  assert_equals(('hdfs', 'nn.no.port', '/foo/bar', '', ''), Hdfs.urlsplit(url))
-  url = 'hdfs://nn:8020/foo/bar'
-  assert_equals(('hdfs', 'nn:8020', '/foo/bar', '', ''), Hdfs.urlsplit(url))
-  url = 'hdfs://nn:8020//foo//bar'
-  assert_equals(('hdfs', 'nn:8020', '/foo/bar', '', ''), Hdfs.urlsplit(url))
-  url = 'hdfs://nn:8020'
-  assert_equals(('hdfs', 'nn:8020', '/', '', ''), Hdfs.urlsplit(url))
-  url = '/foo/bar'
-  assert_equals(('hdfs', '', '/foo/bar', '', ''), Hdfs.urlsplit(url))
-  url = 'foo//bar'
-  assert_equals(('hdfs', '', 'foo/bar', '', ''), Hdfs.urlsplit(url))
-
-
-@attr('requires_hadoop')
-def test_i18n_namespace():
-  cluster = pseudo_hdfs4.shared_cluster()
-  cluster.fs.setuser(cluster.superuser)
-
-  def check_existence(name, parent, present=True):
-    assertion = present and assert_true or assert_false
-    listing = cluster.fs.listdir(parent)
-    assertion(name in listing, "%s should be in %s" % (name, listing))
-
-  name = u'''pt-Olá_ch-你好_ko-안녕_ru-Здравствуйте%20,.<>~`!@#$%^&()_-+='"'''
-  prefix = '/tmp/i18n'
-  dir_path = '%s/%s' % (prefix, name)
-  file_path = '%s/%s' % (dir_path, name)
-
-  try:
-    # Create a directory
-    cluster.fs.mkdir(dir_path)
-    # Directory is there
-    check_existence(name, prefix)
-
-    # Create a file (same name) in the directory
-    cluster.fs.open(file_path, 'w').close()
-    # File is there
-    check_existence(name, dir_path)
-
-    # Test rename
-    new_file_path = file_path + '.new'
-    cluster.fs.rename(file_path, new_file_path)
-    # New file is there
-    check_existence(name + '.new', dir_path)
-
-    # Test remove
-    cluster.fs.remove(new_file_path)
-    check_existence(name + '.new', dir_path, present=False)
-
-    # Test rmtree
-    cluster.fs.rmtree(dir_path)
-    check_existence(name, prefix, present=False)
-
-    # Test exception can handle non-ascii characters
+      # Check currrent owners are not user test
+      LOG.info(str(fs.stats(dir1).__dict__))
+      assert_not_equals('test', fs.stats(dir1).user)
+      assert_not_equals('test', fs.stats(subdir1).user)
+      assert_not_equals('test', fs.stats(file1).user)
+
+      # Chown non-recursive
+      fs.chown(dir1, 'test', recursive=False)
+      assert_equals('test', fs.stats(dir1).user)
+      assert_not_equals('test', fs.stats(subdir1).user)
+      assert_not_equals('test', fs.stats(file1).user)
+
+      # Chown recursive
+      fs.chown(dir1, 'test', recursive=True)
+      assert_equals('test', fs.stats(dir1).user)
+      assert_equals('test', fs.stats(subdir1).user)
+      assert_equals('test', fs.stats(file1).user)
+    finally:
+      try:
+        fs.rmtree(dir1)
+      finally:
+        pass
+
+  def test_trash_and_restore(self):
+    PATH = self.cluster.fs.join(self.cluster.fs.get_home_dir(), 'trash_test')
+
     try:
-      cluster.fs.rmtree(dir_path)
-    except IOError, ex:
-      LOG.info('Successfully caught error: %s' % (ex,))
-  finally:
+      # Trash
+      self.cluster.fs.open(PATH, 'w').close()
+      assert_true(self.cluster.fs.exists(PATH))
+      self.cluster.fs.remove(PATH)
+      assert_false(self.cluster.fs.exists(PATH))
+      assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path))
+      trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path)
+      trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path, trash_dir, PATH[1:]) for trash_dir in trash_dirs]
+      exists = map(self.cluster.fs.exists, trash_paths)
+      assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
+      trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
+
+      # Restore
+      self.cluster.fs.restore(trash_path)
+      assert_false(self.cluster.fs.exists(trash_path))
+      assert_true(self.cluster.fs.exists(PATH))
+    finally:
+      try:
+        self.cluster.fs.rmtree(PATH)
+      except Exception, ex:
+        LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
+
+  def test_trash_and_purge(self):
+    PATH = self.cluster.fs.join(self.cluster.fs.get_home_dir(), 'trash_test')
+
     try:
-      cluster.fs.rmtree(prefix)
-    except Exception, ex:
-      LOG.error('Failed to cleanup %s: %s' % (prefix, ex))
-
-@attr('requires_hadoop')
-def test_threadedness():
-  # Start a second thread to change the user, and
-  # make sure that isn't reflected.
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser("alpha")
-  class T(Thread):
-    def run(self):
-      fs.setuser("beta")
-      assert_equals("beta", fs.user)
-  t = T()
-  t.start()
-  t.join()
-  assert_equals("alpha", fs.user)
-  fs.setuser("gamma")
-  assert_equals("gamma", fs.user)
-
-@attr('requires_hadoop')
-def test_chmod():
-  # Create a test directory with
-  # a subdirectory and a few files.
-  dir1 = '/test'
-  subdir1 = dir1 + '/test1'
-  file1 = subdir1 + '/test1.txt'
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser(cluster.superuser)
-  try:
-    fs.mkdir(subdir1)
-    f = fs.open(file1, "w")
-    f.write("hello")
-    f.close()
+      # Trash
+      self.cluster.fs.open(PATH, 'w').close()
+      assert_true(self.cluster.fs.exists(PATH))
+      self.cluster.fs.remove(PATH)
+      assert_false(self.cluster.fs.exists(PATH))
+      assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path))
+      trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path)
+      trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path, trash_dir, PATH[1:]) for trash_dir in trash_dirs]
+      exists = map(self.cluster.fs.exists, trash_paths)
+      assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
+      trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
+
+      # Purge
+      self.cluster.fs.purge_trash()
+      assert_false(self.cluster.fs.exists(trash_path))
+      assert_false(self.cluster.fs.exists(PATH))
+    finally:
+      try:
+        self.cluster.fs.rmtree(PATH)
+      except Exception, ex:
+        LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
+
+  def test_restore_error(self):
+    PATH = self.cluster.fs.join(self.cluster.fs.get_home_dir(), 'trash_test')
 
-    # Check currrent permissions are not 777 (666 for file)
-    fs.chmod(dir1, 01000, recursive=True)
-    assert_equals(041000, fs.stats(dir1).mode)
-    assert_equals(041000, fs.stats(subdir1).mode)
-    assert_equals(0100000, fs.stats(file1).mode)
-
-    # Chmod non-recursive
-    fs.chmod(dir1, 01222, recursive=False)
-    assert_equals(041222, fs.stats(dir1).mode)
-    assert_equals(041000, fs.stats(subdir1).mode)
-    assert_equals(0100000, fs.stats(file1).mode)
-
-    # Chmod recursive
-    fs.chmod(dir1, 01444, recursive=True)
-    assert_equals(041444, fs.stats(dir1).mode)
-    assert_equals(041444, fs.stats(subdir1).mode)
-    assert_equals(0100444, fs.stats(file1).mode)
-  finally:
     try:
-      fs.rmtree(dir1)
+      # Trash
+      self.cluster.fs.open(PATH, 'w').close()
+      assert_true(self.cluster.fs.exists(PATH))
+      self.cluster.fs.remove(PATH)
+      assert_false(self.cluster.fs.exists(PATH))
+      assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path))
+      trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path)
+      trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path, trash_dir, PATH[1:]) for trash_dir in trash_dirs]
+      exists = map(self.cluster.fs.exists, trash_paths)
+      assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
+      trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
+
+      # Purge
+      self.cluster.fs.purge_trash()
+      assert_false(self.cluster.fs.exists(trash_path))
+      assert_false(self.cluster.fs.exists(PATH))
+
+      # Restore fail
+      assert_raises(IOError, self.cluster.fs.restore, trash_path)
     finally:
-      pass
-
-@attr('requires_hadoop')
-def test_chown():
-  # Create a test directory with
-  # a subdirectory and a few files.
-  dir1 = '/test'
-  subdir1 = dir1 + '/test1'
-  file1 = subdir1 + '/test1.txt'
-  cluster = pseudo_hdfs4.shared_cluster()
-  fs = cluster.fs
-  fs.setuser(cluster.superuser)
-  try:
-    fs.mkdir(subdir1)
-    f = fs.open(file1, "w")
-    f.write("hello")
-    f.close()
+      try:
+        self.cluster.fs.rmtree(PATH)
+      except Exception, ex:
+        LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
+
+  def test_trash_permissions(self):
+    PATH = self.cluster.fs.join(self.cluster.fs.get_home_dir(), 'trash_test')
 
-    # Check currrent owners are not user test
-    LOG.info(str(fs.stats(dir1).__dict__))
-    assert_not_equals('test', fs.stats(dir1).user)
-    assert_not_equals('test', fs.stats(subdir1).user)
-    assert_not_equals('test', fs.stats(file1).user)
-
-    # Chown non-recursive
-    fs.chown(dir1, 'test', recursive=False)
-    assert_equals('test', fs.stats(dir1).user)
-    assert_not_equals('test', fs.stats(subdir1).user)
-    assert_not_equals('test', fs.stats(file1).user)
-
-    # Chown recursive
-    fs.chown(dir1, 'test', recursive=True)
-    assert_equals('test', fs.stats(dir1).user)
-    assert_equals('test', fs.stats(subdir1).user)
-    assert_equals('test', fs.stats(file1).user)
-  finally:
     try:
-      fs.rmtree(dir1)
+      # Trash
+      self.cluster.fs.open(PATH, 'w').close()
+      assert_true(self.cluster.fs.exists(PATH))
+      self.cluster.fs.remove(PATH)
+      assert_false(self.cluster.fs.exists(PATH))
+      assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path))
+      trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path)
+      trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path, trash_dir, PATH[1:]) for trash_dir in trash_dirs]
+      exists = map(self.cluster.fs.exists, trash_paths)
+      assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
+      trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
+
+      # Restore
+      assert_raises(WebHdfsException, self.cluster.fs.do_as_user, 'nouser', self.cluster.fs.restore, trash_path)
     finally:
-      pass
+      try:
+        self.cluster.fs.rmtree(PATH)
+      except Exception, ex:
+        LOG.error('Failed to cleanup %s: %s' % (PATH, ex))

+ 2 - 0
desktop/libs/hadoop/src/hadoop/fs/upload.py

@@ -89,7 +89,9 @@ class HDFStemporaryUploadedFile(object):
 
   def remove(self):
     try:
+      original = self._fs.setskiptrash(True)
       self._fs.remove(self._path)
+      self._fs.setskiptrash(original)
       self._do_cleanup = False
     except IOError, ex:
       if ex.errno != errno.ENOENT:

+ 133 - 22
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py

@@ -25,6 +25,7 @@ import posixpath
 import random
 import stat
 import threading
+import time
 
 from django.utils.encoding import smart_str
 from django.utils.translation import ugettext as _
@@ -36,6 +37,7 @@ from hadoop.fs.webhdfs_types import WebHdfsStat, WebHdfsContentSummary
 from hadoop.conf import UPLOAD_CHUNK_SIZE
 
 import hadoop.conf
+import hadoop.core_site
 
 
 DEFAULT_HDFS_SUPERUSER = 'hdfs'
@@ -50,6 +52,7 @@ class WebHdfs(Hdfs):
   WebHdfs implements the filesystem interface via the WebHDFS rest protocol.
   """
   DEFAULT_USER = 'hue'        # This should be the user running Hue
+  TRASH_CURRENT = 'Current'
 
   def __init__(self, url,
                fs_defaultfs,
@@ -80,7 +83,7 @@ class WebHdfs(Hdfs):
                temp_dir=hdfs_config.TEMP_DIR.get())
 
   def __str__(self):
-    return "WebHdfs at %s" % (self._url,)
+    return "WebHdfs at %s" % self._url
 
   def _make_client(self, url, security_enabled):
     client = http_client.HttpClient(
@@ -121,6 +124,26 @@ class WebHdfs(Hdfs):
     except AttributeError:
       return WebHdfs.DEFAULT_USER
 
+  @property
+  def trash_path(self):
+    try:
+      return self._thread_local.trash_path
+    except AttributeError:
+      self._thread_local.trash_path = self.join(self.get_home_dir(), '.Trash')
+    return self._thread_local.trash_path
+
+  @property
+  def current_trash_path(self):
+    return self.join(self.trash_path, self.TRASH_CURRENT)
+
+  @property
+  def skip_trash(self):
+    try:
+      return self._thread_local.skip_trash
+    except AttributeError:
+      self._thread_local.skip_trash = False
+    return self._thread_local.skip_trash
+
   def _getparams(self):
     return {
       "user.name" : WebHdfs.DEFAULT_USER,
@@ -133,6 +156,10 @@ class WebHdfs(Hdfs):
     self._thread_local.user = user
     return curr
 
+  def setskiptrash(self, skip_trash):
+    curr = self.skip_trash
+    self._thread_local.skip_trash = skip_trash
+    return curr
 
   def listdir_stats(self, path, glob=None):
     """
@@ -189,7 +216,7 @@ class WebHdfs(Hdfs):
     res = self._stats(path)
     if res is not None:
       return res
-    raise IOError(errno.ENOENT, "File %s not found" % (smart_str(path),))
+    raise IOError(errno.ENOENT, _("File %s not found") % path)
 
   def exists(self, path):
     return self._stats(path) is not None
@@ -206,6 +233,41 @@ class WebHdfs(Hdfs):
       return False
     return not sb.isDir
 
+  def _ensure_current_trash_directory(self):
+    """Create trash directory for a user if it doesn't exist."""
+    if not self.exists(self.current_trash_path):
+      self.mkdir(self.current_trash_path)
+    return self.current_trash_path
+
+  def _trash(self, path, recursive=False):
+    """
+    _trash(path, recursive=False)
+
+    Move a file or directory to trash.
+    Will create a timestamped directory underneath /user/<username>/.Trash.
+
+    Trash must be enabled for this to work.
+    """
+    if not self.exists(path):
+      raise IOError(errno.ENOENT, _("File %s not found") % path)
+
+    if not recursive and self.isdir(path):
+      raise IOError(errno.EISDIR, _("File %s is a directory") % path)
+
+    if path.startswith(self.trash_path):
+      raise IOError(errno.EPERM, _("File %s is already trashed") % path)
+
+    # Make path (with timestamp suffix if necessary)
+    base_trash_path = self.join(self._ensure_current_trash_directory(), path[1:])
+    trash_path = base_trash_path
+    while self.exists(trash_path):
+      trash_path = base_trash_path + str(time.time())
+
+    # Move path to trash path
+    self.mkdir(self.dirname(trash_path))
+    self.rename(path, trash_path)
+
+
   def _delete(self, path, recursive=False):
     """
     _delete(path, recursive=False)
@@ -220,19 +282,68 @@ class WebHdfs(Hdfs):
     # This part of the API is nonsense.
     # The lack of exception should indicate success.
     if not result['boolean']:
-      raise IOError('Delete failed: %s' % (smart_str(path),))
+      raise IOError(_('Delete failed: %s') % path)
 
   def remove(self, path):
     """Delete a file."""
-    self._delete(path, recursive=False)
+    if hadoop.core_site.get_trash_interval() is None or self.skip_trash:
+      self._delete(path, recursive=False)
+    else:
+      self._trash(path, recursive=False)
 
   def rmdir(self, path):
-    """Delete a file."""
-    self._delete(path, recursive=False)
+    """Delete a directory."""
+    self.remove(path)
 
   def rmtree(self, path):
     """Delete a tree recursively."""
-    self._delete(path, recursive=True)
+    if hadoop.core_site.get_trash_interval() is None or self.skip_trash:
+      self._delete(path, recursive=True)
+    else:
+      self._trash(path, recursive=True)
+
+  def restore(self, path):
+    """
+    restore(path)
+
+    The root of ``path`` will be /users/<current user>/.Trash/<timestamp>.
+    Removing the root from ``path`` will provide the original path.
+    Ensure parent directories exist and rename path.
+    """
+    if hadoop.core_site.get_trash_interval() is None:
+      raise IOError(errno.EPERM, _("Trash is not enabled."))
+
+    if not path.startswith(self.trash_path):
+      raise IOError(errno.EPERM, _("File %s is not in trash") % path)
+
+    # Build original path
+    original_path = []
+    split_path = self.split(path)
+    while split_path[0] != self.trash_path:
+      original_path.append(split_path[1])
+      split_path = self.split(split_path[0])
+    original_path.reverse()
+    original_path = self.join(posixpath.sep, *original_path)
+
+    # move to original path
+    # the path could have been expunged.
+    if self.exists(original_path):
+      raise IOError(errno.EEXIST, _("Path %s already exists.") % str(smart_str(original_path)))
+    self.rename(path, original_path)
+
+  def purge_trash(self):
+    """
+    purge_trash()
+
+    Purge all trash in users ``trash_path``
+    """
+    if hadoop.core_site.get_trash_interval() is None:
+      raise IOError(errno.EPERM, _("Trash is not enabled."))
+
+    original = self.setskiptrash(True)
+    for timestamped_directory in self.listdir(self.trash_path):
+      self.rmtree(self.join(self.trash_path, timestamped_directory))
+    self.setskiptrash(original)
 
   def mkdir(self, path, mode=None):
     """
@@ -247,7 +358,7 @@ class WebHdfs(Hdfs):
       params['permission'] = safe_octal(mode)
     success = self._root.put(path, params)
     if not success:
-      raise IOError("Mkdir failed: %s" % (smart_str(path),))
+      raise IOError(_("Mkdir failed: %s") % path)
 
   def rename(self, old, new):
     """rename(old, new)"""
@@ -261,17 +372,17 @@ class WebHdfs(Hdfs):
     params['destination'] = smart_str(new)
     result = self._root.put(old, params)
     if not result['boolean']:
-      raise IOError("Rename failed: %s -> %s" %
-                    (smart_str(old), smart_str(new)))
+      raise IOError(_("Rename failed: %s -> %s") %
+                    (str(smart_str(old)), str(smart_str(new))))
 
   def rename_star(self, old_dir, new_dir):
     """Equivalent to `mv old_dir/* new"""
     if not self.isdir(old_dir):
-      raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (old_dir,))
+      raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % old_dir)
     if not self.exists(new_dir):
       self.mkdir(new_dir)
     elif not self.isdir(new_dir):
-      raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (new_dir,))
+      raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % new_dir)
     ls = self.listdir(old_dir)
     for dirent in ls:
       self.rename(Hdfs.join(old_dir, dirent), Hdfs.join(new_dir, dirent))
@@ -385,11 +496,11 @@ class WebHdfs(Hdfs):
   def copyfile(self, src, dst):
     sb = self._stats(src)
     if sb is None:
-      raise IOError(errno.ENOENT, "Copy src '%s' does not exist" % (src,))
+      raise IOError(errno.ENOENT, _("Copy src '%s' does not exist") % src)
     if sb.isDir:
-      raise IOError(errno.INVAL, "Copy src '%s' is a directory" % (src,))
+      raise IOError(errno.INVAL, _("Copy src '%s' is a directory") % src)
     if self.isdir(dst):
-      raise IOError(errno.INVAL, "Copy dst '%s' is a directory" % (dst,))
+      raise IOError(errno.INVAL, _("Copy dst '%s' is a directory") % dst)
 
     offset = 0
 
@@ -511,7 +622,7 @@ class WebHdfs(Hdfs):
 
     if next_url is None:
       raise WebHdfsException(
-        "Failed to create '%s'. HDFS did not return a redirect" % (path,))
+        _("Failed to create '%s'. HDFS did not return a redirect") % path)
 
     # Now talk to the real thing. The redirect url already includes the params.
     client = self._make_client(next_url, self.security_enabled)
@@ -528,7 +639,7 @@ class WebHdfs(Hdfs):
         raise webhdfs_ex
 
       if http_error.code not in (301, 302, 303, 307):
-        LOG.error("Response is not a redirect: %s" % (webhdfs_ex,))
+        LOG.error("Response is not a redirect: %s" % webhdfs_ex)
         raise webhdfs_ex
       return http_error.headers.getheader('location')
     except Exception, ex:
@@ -579,7 +690,7 @@ class File(object):
     try:
       self._stat = fs.stats(path)
       if self._stat.isDir:
-        raise IOError(errno.EISDIR, "Is a directory: '%s'" % (smart_str(path),))
+        raise IOError(errno.EISDIR, _("Is a directory: '%s'") % path)
     except IOError, ex:
       if ex.errno == errno.ENOENT and 'w' in self._mode:
         self._fs.create(self._path)
@@ -597,7 +708,7 @@ class File(object):
       self.stat()
       self._pos = self._fs.stats(self._path).size + offset
     else:
-      raise IOError(errno.EINVAL, "Invalid argument to seek for whence")
+      raise IOError(errno.EINVAL, _("Invalid argument to seek for whence"))
 
   def stat(self):
     self._stat = self._fs.stats(self._path)
@@ -617,7 +728,7 @@ class File(object):
 
   def append(self, data):
     if 'w' not in self._mode:
-      raise IOError(errno.EINVAL, "File not open for writing")
+      raise IOError(errno.EINVAL, _("File not open for writing"))
     self._fs.append(self._path, data=data)
 
   def flush(self):
@@ -673,7 +784,7 @@ def test_fs_configuration(fs_config):
   except Exception, ex:
     LOG.info("%s -- Validation error: %s" % (fs, ex))
     return [(fs_config.WEBHDFS_URL,
-            'Failed to create temporary file "%s"' % (tmpname,))]
+            _('Failed to create temporary file "%s"') % tmpname)]
 
   # Check superuser has super power
   try:  # Finally: delete tmpname
@@ -690,6 +801,6 @@ def test_fs_configuration(fs_config):
     except Exception, ex:
       LOG.error("Failed to remove '%s': %s" % (tmpname, ex))
       return [(fs_config.WEBHDFS_URL,
-              'Failed to remove temporary file "%s"' % (tmpname,))]
+              _('Failed to remove temporary file "%s"') % tmpname)]
 
   return [ ]

+ 5 - 2
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -201,10 +201,12 @@ class PseudoHdfs4(object):
 
     # This is where we prepare our Hadoop configuration
     conf_dir = self._tmppath('conf')
-    os.mkdir(conf_dir)
+    if not os.path.exists(conf_dir):
+      os.mkdir(conf_dir)
 
     self._log_dir = self._tmppath('logs')
-    os.mkdir(self._log_dir)
+    if not os.path.exists(self._log_dir):
+      os.mkdir(self._log_dir)
 
     # Write out the Hadoop conf files
     self._write_hadoop_metrics_conf(conf_dir)
@@ -410,6 +412,7 @@ class PseudoHdfs4(object):
       'dfs.datanode.ipc.address': '%s:0' % self._fqdn,
       'dfs.replication': 1,
       'dfs.safemode.min.datanodes': 1,
+      'fs.trash.interval': 10
     }
     write_config(hdfs_configs, self._tmppath('conf/hdfs-site.xml'))