Ver código fonte

[s3] Improve S3 recursive delete to handle large directories and virtual directory scenarios

- Refactors the recursive deletion logic to efficiently handle deleting large numbers of S3 objects by chunking delete requests according to the S3 API's 1000-key limit.
- Fixes deletion of virtual directories where path like s3a://bucket/child/inner.txt creates a child directory and deleting that was failing before.
- Adds detailed error handling, logging, and validation for invalid URIs and bucket access failures. Enhances robustness when deleting directories and single objects, and clarifies the method's behavior and limitations.
Harsh Gupta 5 meses atrás
pai
commit
d2892ee387
2 arquivos alterados com 311 adições e 186 exclusões
  1. 85 30
      desktop/libs/aws/src/aws/s3/s3fs.py
  2. 226 156
      desktop/libs/aws/src/aws/s3/s3fs_test.py

+ 85 - 30
desktop/libs/aws/src/aws/s3/s3fs.py

@@ -14,14 +14,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging
 import os
+import posixpath
 import re
 import time
-import logging
-import itertools
-import posixpath
-import urllib.error
-import urllib.request
 from builtins import object, str
 from urllib.parse import urlparse as lib_urlparse
 
@@ -33,14 +30,15 @@ from django.http.multipartparser import MultiPartParser
 from django.utils.translation import gettext as _
 
 from aws import s3
-from aws.conf import AWS_ACCOUNTS, PERMISSION_ACTION_S3, get_default_region, get_locations, is_raz_s3
-from aws.s3 import S3A_ROOT, normpath, s3file, translate_s3_error
+from aws.conf import AWS_ACCOUNTS, get_default_region, get_locations, is_raz_s3, PERMISSION_ACTION_S3
+from aws.s3 import normpath, S3A_ROOT, s3file, translate_s3_error
 from aws.s3.s3stat import S3Stat
 from filebrowser.conf import REMOTE_STORAGE_HOME
 
 DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
 BUCKET_NAME_PATTERN = re.compile(
   r"^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9_\-]*[a-zA-Z0-9])\.)*(?:[A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9_\-]*[A-Za-z0-9]))$")
+S3A_DELETE_CHUNK_SIZE = 1000  # S3 API limit for bulk delete operations
 
 LOG = logging.getLogger()
 
@@ -353,38 +351,95 @@ class S3FileSystem(object):
   @translate_s3_error
   @auth_error_handler
   def rmtree(self, path, skipTrash=True):
+    """
+    Recursively deletes objects from an S3 path.
+
+    This method can delete a single object, all objects under a given prefix (a "directory"),
+    or an entire bucket. It handles paginating through keys for deletion to respect
+    S3's 1000-key limit per bulk delete request.
+
+    Args:
+      path (str): The S3 URI to delete (e.g., 's3a://test-bucket/test-folder/').
+      skipTrash (bool): If False, this operation is not supported and will fail.
+
+    Raises:
+      NotImplementedError: If `skipTrash` is set to False.
+      S3FileSystemException: If any errors occur during the deletion process.
+      ValueError: If the provided path is not a valid S3 URI.
+    """
     if not skipTrash:
-      raise NotImplementedError('Moving to trash is not implemented for S3')
+      raise NotImplementedError("Moving to trash is not implemented for S3.")
+
+    try:
+      bucket_name, key_name = s3.parse_uri(path)[:2]
+    except Exception:
+      raise ValueError(f"Invalid S3 URI provided: {path}")
+
+    LOG.info(f"Attempting to recursively delete path: {path}")
+    LOG.debug(f"Parsed bucket: '{bucket_name}', key: '{key_name}'")
 
-    bucket_name, key_name = s3.parse_uri(path)[:2]
     if bucket_name and not key_name:
-      self._delete_bucket(bucket_name)
-    else:
-      if self.isdir(path):
-        path = self._append_separator(path)  # Really need to make sure we end with a '/'
+      return self._delete_bucket(bucket_name)
+
+    # Ensure directory-like paths end with a '/' to be used as a prefix
+    if self.isdir(path):
+      path = self._append_separator(path)
+      key_name = self._append_separator(key_name)
 
+    is_directory_key = key_name and key_name.endswith("/")
+
+    try:
       key = self._get_key(path, validate=False)
+      bucket = key.bucket
+    except Exception as e:
+      # Handle cases where the bucket might not exist or connection fails
+      LOG.error(f"Failed to connect to bucket '{bucket_name}'. Error: {e}")
+      raise S3FileSystemException(f"Could not access bucket '{bucket_name}'.") from e
 
-      if key.exists():
-        dir_keys = []
+    if key.exists() or is_directory_key:  # Check both key.exists() and isdir to handle virtual dirs
+      keys_to_delete = []
 
-        if self.isdir(path):
-          _, dir_key_name = s3.parse_uri(path)[:2]
-          dir_keys = key.bucket.list(prefix=dir_key_name)
+      if is_directory_key:
+        for k in bucket.list(prefix=key_name):
+          keys_to_delete.append(k)
 
-        if not dir_keys:
-          # Avoid Raz bulk delete issue
-          deleted_key = key.delete()
-          if deleted_key.exists():
-            raise S3FileSystemException('Could not delete key %s' % deleted_key)
-        else:
-          result = key.bucket.delete_keys(list(dir_keys))
+        # Explicitly add the current directory marker (empty object) if it exists but wasn't included
+        dir_marker = bucket.get_key(key_name)
+        if dir_marker is not None and dir_marker not in keys_to_delete:
+          keys_to_delete.append(dir_marker)
+      else:
+        # Add the single key object
+        keys_to_delete.append(key)
+
+      LOG.info(f"Found {len(keys_to_delete)} S3 object(s) to delete under prefix '{key_name}'.")
+
+      # Calculate total chunks using integer ceiling division.
+      total_chunks = (len(keys_to_delete) + S3A_DELETE_CHUNK_SIZE - 1) // S3A_DELETE_CHUNK_SIZE
+      all_errors = []
+
+      # Process keys in chunks of 1000 (S3 API limit)
+      for i in range(0, len(keys_to_delete), S3A_DELETE_CHUNK_SIZE):
+        chunk = keys_to_delete[i : i + S3A_DELETE_CHUNK_SIZE]
+
+        LOG.debug(f"Deleting chunk {i // S3A_DELETE_CHUNK_SIZE + 1} of {total_chunks} (size: {len(chunk)} keys).")
+        try:
+          result = bucket.delete_keys(chunk)
           if result.errors:
-            msg = "%d errors occurred while attempting to delete the following S3 paths:\n%s" % (
-              len(result.errors), '\n'.join(['%s: %s' % (error.key, error.message) for error in result.errors])
-            )
-            LOG.error(msg)
-            raise S3FileSystemException(msg)
+            LOG.warning(f"Encountered {len(result.errors)} errors in this deletion chunk.")
+            all_errors.extend(result.errors)
+        except S3ResponseError as e:
+          # Catch potential connection errors or access denied on the delete call itself
+          LOG.error(f"An S3 API error occurred during key deletion: {e}")
+          raise S3FileSystemException(f"Failed to delete objects: {e.message}") from e
+
+      # After deleting all keys, handle any accumulated errors
+      if all_errors:
+        error_details = "\n".join([f"- {err.key}: {err.message}" for err in all_errors])
+        msg = f"{len(all_errors)} errors occurred while deleting objects from '{path}':\n{error_details}"
+        LOG.error(msg)
+        raise S3FileSystemException(msg)
+
+      LOG.info(f"Successfully deleted {len(keys_to_delete)} object(s) from path: {path}")
 
   @translate_s3_error
   @auth_error_handler

+ 226 - 156
desktop/libs/aws/src/aws/s3/s3fs_test.py

@@ -15,18 +15,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os
 import json
+import os
 import string
 import tempfile
-from unittest.mock import Mock, patch
+from unittest.mock import MagicMock, Mock
 
 import pytest
 
 from aws.conf import AWS_ACCOUNTS
 from aws.s3 import join, parse_uri
-from aws.s3.s3fs import S3FileSystem, S3FileSystemException, get_s3_home_directory
-from aws.s3.s3test_utils import S3TestBase, generate_id
+from aws.s3.s3fs import get_s3_home_directory, S3A_DELETE_CHUNK_SIZE, S3FileSystem, S3FileSystemException
+from aws.s3.s3test_utils import generate_id, S3TestBase
 from aws.s3.upload import DEFAULT_WRITE_SIZE
 from desktop.conf import RAZ
 from desktop.lib.django_test_util import make_logged_in_client
@@ -37,21 +37,21 @@ from useradmin.models import User
 
 @pytest.mark.django_db
 def test_get_s3_home_directory():
-  client = make_logged_in_client(username="test", groupname="test", recreate=True, is_superuser=False)
+  make_logged_in_client(username="test", groupname="test", recreate=True, is_superuser=False)
   user = User.objects.get(username="test")
 
-  client_not_me = make_logged_in_client(username="test_not_me", groupname="test_not_me", recreate=True, is_superuser=False)
+  make_logged_in_client(username="test_not_me", groupname="test_not_me", recreate=True, is_superuser=False)
   user_not_me = User.objects.get(username="test_not_me")
 
   # When REMOTE_STORAGE_HOME ends with /user in RAZ S3 environment.
-  resets = [RAZ.IS_ENABLED.set_for_testing(True), REMOTE_STORAGE_HOME.set_for_testing('s3a://gethue-bucket/user')]
+  resets = [RAZ.IS_ENABLED.set_for_testing(True), REMOTE_STORAGE_HOME.set_for_testing("s3a://gethue-bucket/user")]
 
   try:
     default_s3_home_path = get_s3_home_directory(user)
-    assert default_s3_home_path == 's3a://gethue-bucket/user/test'
+    assert default_s3_home_path == "s3a://gethue-bucket/user/test"
 
     default_s3_home_path = get_s3_home_directory(user_not_me)
-    assert default_s3_home_path == 's3a://gethue-bucket/user/test_not_me'
+    assert default_s3_home_path == "s3a://gethue-bucket/user/test_not_me"
   finally:
     for reset in resets:
       reset()
@@ -60,16 +60,16 @@ def test_get_s3_home_directory():
   resets = [
     RAZ.IS_ENABLED.set_for_testing(True),
     AWS_ACCOUNTS.set_for_testing(
-      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 's3a://gethue-other-bucket/user'}}
+      {"default": {"region": "us-west-2", "host": "s3-us-west-2.amazonaws.com", "default_home_path": "s3a://gethue-other-bucket/user"}}
     ),
   ]
 
   try:
     default_s3_home_path = get_s3_home_directory(user)
-    assert default_s3_home_path == 's3a://gethue-other-bucket/user/test'
+    assert default_s3_home_path == "s3a://gethue-other-bucket/user/test"
 
     default_s3_home_path = get_s3_home_directory(user_not_me)
-    assert default_s3_home_path == 's3a://gethue-other-bucket/user/test_not_me'
+    assert default_s3_home_path == "s3a://gethue-other-bucket/user/test_not_me"
   finally:
     for reset in resets:
       reset()
@@ -78,16 +78,16 @@ def test_get_s3_home_directory():
   resets = [
     RAZ.IS_ENABLED.set_for_testing(False),
     AWS_ACCOUNTS.set_for_testing(
-      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 's3a://gethue-other-bucket/test-dir'}}
+      {"default": {"region": "us-west-2", "host": "s3-us-west-2.amazonaws.com", "default_home_path": "s3a://gethue-other-bucket/test-dir"}}
     ),
   ]
 
   try:
     default_s3_home_path = get_s3_home_directory(user)
-    assert default_s3_home_path == 's3a://gethue-other-bucket/test-dir'
+    assert default_s3_home_path == "s3a://gethue-other-bucket/test-dir"
 
     default_s3_home_path = get_s3_home_directory(user_not_me)
-    assert default_s3_home_path == 's3a://gethue-other-bucket/test-dir'
+    assert default_s3_home_path == "s3a://gethue-other-bucket/test-dir"
   finally:
     for reset in resets:
       reset()
@@ -95,19 +95,19 @@ def test_get_s3_home_directory():
   # When both REMOTE_STORAGE_HOME and S3 filesystem's DEFAULT_HOME_PATH are set in RAZ S3 environment.
   resets = [
     RAZ.IS_ENABLED.set_for_testing(True),
-    REMOTE_STORAGE_HOME.set_for_testing('s3a://gethue-bucket/user'),
+    REMOTE_STORAGE_HOME.set_for_testing("s3a://gethue-bucket/user"),
     AWS_ACCOUNTS.set_for_testing(
-      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 's3a://gethue-other-bucket/user'}}
+      {"default": {"region": "us-west-2", "host": "s3-us-west-2.amazonaws.com", "default_home_path": "s3a://gethue-other-bucket/user"}}
     ),
   ]
 
   try:
     # Gives preference to REMOTE_STORAGE_HOME for of backward compatibility.
     default_s3_home_path = get_s3_home_directory(user)
-    assert default_s3_home_path == 's3a://gethue-bucket/user/test'
+    assert default_s3_home_path == "s3a://gethue-bucket/user/test"
 
     default_s3_home_path = get_s3_home_directory(user_not_me)
-    assert default_s3_home_path == 's3a://gethue-bucket/user/test_not_me'
+    assert default_s3_home_path == "s3a://gethue-bucket/user/test_not_me"
   finally:
     for reset in resets:
       reset()
@@ -116,16 +116,16 @@ def test_get_s3_home_directory():
   resets = [
     RAZ.IS_ENABLED.set_for_testing(True),
     AWS_ACCOUNTS.set_for_testing(
-      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 's3a://gethue-other-bucket/dir'}}
+      {"default": {"region": "us-west-2", "host": "s3-us-west-2.amazonaws.com", "default_home_path": "s3a://gethue-other-bucket/dir"}}
     ),
   ]
 
   try:
     default_s3_home_path = get_s3_home_directory(user)
-    assert default_s3_home_path == 's3a://gethue-other-bucket/dir'
+    assert default_s3_home_path == "s3a://gethue-other-bucket/dir"
 
     default_s3_home_path = get_s3_home_directory(user_not_me)
-    assert default_s3_home_path == 's3a://gethue-other-bucket/dir'
+    assert default_s3_home_path == "s3a://gethue-other-bucket/dir"
   finally:
     for reset in resets:
       reset()
@@ -133,90 +133,160 @@ def test_get_s3_home_directory():
   # When some different path is set in both RAZ and non-RAZ S3 environment.
   resets = [
     RAZ.IS_ENABLED.set_for_testing(True),
-    REMOTE_STORAGE_HOME.set_for_testing('abfs://gethue-container/user'),
+    REMOTE_STORAGE_HOME.set_for_testing("abfs://gethue-container/user"),
     AWS_ACCOUNTS.set_for_testing(
-      {'default': {'region': 'us-west-2', 'host': 's3-us-west-2.amazonaws.com', 'default_home_path': 'abfs://gethue-other-container/dir'}}
+      {"default": {"region": "us-west-2", "host": "s3-us-west-2.amazonaws.com", "default_home_path": "abfs://gethue-other-container/dir"}}
     ),
   ]
 
   try:
     default_s3_home_path = get_s3_home_directory(user)
-    assert default_s3_home_path == 's3a://'
+    assert default_s3_home_path == "s3a://"
 
     default_s3_home_path = get_s3_home_directory(user_not_me)
-    assert default_s3_home_path == 's3a://'
+    assert default_s3_home_path == "s3a://"
   finally:
     for reset in resets:
       reset()
 
 
-class TestS3FileSystem:
-  def test_rmtree_bucket(self):
-    with patch('aws.s3.s3fs.S3FileSystem._delete_bucket') as _delete_bucket:
-      fs = S3FileSystem(s3_connection=Mock())
+class TestS3FileSystemRmtree:
+  def test_rmtree_skip_trash_false_raises_error(self):
+    fs = S3FileSystem(s3_connection=Mock())
+
+    with pytest.raises(NotImplementedError, match="Moving to trash is not implemented"):
+      fs.rmtree("s3a://test-bucket/test-path", skipTrash=False)
+
+  @pytest.mark.parametrize("invalid_path", ["/local/path", "http://a.com", "s3:/oops"])
+  def test_rmtree_invalid_uri_raises_value_error(self, invalid_path):
+    fs = S3FileSystem(s3_connection=Mock())
+
+    with pytest.raises(ValueError, match=f"Invalid S3 URI provided: {invalid_path}"):
+      fs.rmtree(invalid_path)
+
+  def test_rmtree_bucket_connection_fails_raises_s3_exception(self):
+    fs = S3FileSystem(s3_connection=Mock())
+    fs._get_key = Mock(side_effect=Exception("Connection timeout"))
+    fs.isdir = Mock(return_value=True)
+
+    with pytest.raises(S3FileSystemException, match="Could not access bucket 'test-bucket'"):
+      fs.rmtree("s3a://test-bucket/test-path")
+
+  def test_rmtree_deletes_single_file(self):
+    fs = S3FileSystem(s3_connection=Mock())
+    fs._get_key = Mock(return_value=Mock(exists=Mock(return_value=True), bucket=Mock(delete_keys=Mock(return_value=Mock(errors=[])))))
+    fs.isdir = Mock(return_value=False)
+
+    fs.rmtree("s3a://test-bucket/file.txt")
+
+    fs._get_key.assert_called_once_with("s3a://test-bucket/file.txt", validate=False)
+    fs._get_key.return_value.exists.assert_called_once()
+    fs._get_key.return_value.bucket.delete_keys.assert_called_once_with([fs._get_key.return_value])
+
+  def test_rmtree_deletes_directory_with_files(self):
+    fs = S3FileSystem(s3_connection=Mock())
+    fs._get_key = Mock(return_value=Mock(exists=Mock(return_value=True), bucket=Mock(delete_keys=Mock(return_value=Mock(errors=[])))))
+    fs.isdir = Mock(return_value=True)
+
+    # Simulate bucket.list() returning a list of mock keys
+    keys_in_dir = [
+      Mock(name="folder/file1.txt", bucket=fs._get_key.return_value.bucket),
+      Mock(name="folder/file2.txt", bucket=fs._get_key.return_value.bucket),
+    ]
+    fs._get_key.return_value.bucket.list.return_value = keys_in_dir
+
+    # The explicit directory marker doesn't exist
+    fs._get_key.return_value.bucket.get_key.return_value = None
+
+    fs.rmtree("s3a://test-bucket/folder")
+
+    fs._get_key.return_value.bucket.list.assert_called_once_with(prefix="folder/")
+    fs._get_key.return_value.bucket.delete_keys.assert_called_once_with(keys_in_dir)
+
+  def test_rmtree_deletes_directory_with_explicit_marker(self):
+    fs = S3FileSystem(s3_connection=Mock())
+    fs._get_key = Mock(return_value=Mock(exists=Mock(return_value=True), bucket=Mock(delete_keys=Mock(return_value=Mock(errors=[])))))
+    fs.isdir = Mock(return_value=True)
+
+    keys_in_dir = [Mock(name="folder/file1.txt", bucket=fs._get_key.return_value.bucket)]
+    dir_marker_key = Mock(name="folder/", bucket=fs._get_key.return_value.bucket)
+
+    fs._get_key.return_value.bucket.list.return_value = keys_in_dir
+    fs._get_key.return_value.bucket.get_key.return_value = dir_marker_key  # The marker exists
+
+    fs.rmtree("s3a://test-bucket/folder")
+
+    fs._get_key.return_value.bucket.get_key.assert_called_once_with("folder/")
+    # Check that both the file and the directory marker itself are deleted
+    fs._get_key.return_value.bucket.delete_keys.assert_called_once_with([keys_in_dir[0], dir_marker_key])
+
+  def test_rmtree_does_not_delete_for_non_existent_path(self):
+    fs = S3FileSystem(s3_connection=Mock())
+    fs._get_key = Mock(return_value=Mock(bucket=Mock(delete_keys=Mock())))
+    fs.isdir = Mock(return_value=False)
+
+    fs._get_key.return_value.exists.return_value = False  # The key doesn't exist
+    fs.isdir.return_value = False  # It's not a directory path
+
+    fs.rmtree("s3a://test-bucket/not-real")
+
+    fs._get_key.return_value.bucket.delete_keys.assert_not_called()
 
-      fs.rmtree(path='s3a://gethue')
+  def test_rmtree_deletes_entire_bucket(self):
+    fs = S3FileSystem(s3_connection=Mock())
+    fs._delete_bucket = Mock()
 
-      _delete_bucket.assert_called()
+    fs.rmtree("s3a://test-bucket")
 
-  def test_rmtree_key(self):
-    with patch('aws.s3.s3fs.S3FileSystem._get_key') as _get_key:
-      with patch('aws.s3.s3fs.S3FileSystem.isdir') as isdir:
-        key = Mock(
-          name='data',
-          exists=Mock(return_value=True),
-          bucket=Mock(list=Mock(return_value=[]), delete_key=Mock()),
-          delete=Mock(return_value=Mock(exists=Mock(return_value=False))),
-        )
-        _get_key.return_value = key
-        isdir.return_value = False
+    fs._delete_bucket.assert_called_once_with("test-bucket")
 
-        fs = S3FileSystem(s3_connection=Mock())
+  def test_rmtree_handles_large_directory_with_chunking(self):
+    fs = S3FileSystem(s3_connection=Mock())
+    fs._get_key = Mock(return_value=Mock(bucket=Mock(list=Mock(), delete_keys=Mock(return_value=Mock(errors=[]))), get_key=Mock()))
+    fs.isdir = Mock(return_value=True)
 
-        fs.rmtree(path='s3a://gethue/data')
+    # Create more keys than the chunk size
+    num_keys = S3A_DELETE_CHUNK_SIZE + 27
 
-        key.delete.assert_called()
-        key.bucket.delete_keys.assert_not_called()
+    large_key_list = [Mock(name=f"big-folder/file{i}.txt", bucket=fs._get_key.return_value.bucket) for i in range(num_keys)]
+    fs._get_key.return_value.bucket.list.return_value = large_key_list
+    fs._get_key.return_value.bucket.get_key.return_value = None
 
-  def test_rmtree_empty_dir(self):
-    with patch('aws.s3.s3fs.S3FileSystem._get_key') as _get_key:
-      with patch('aws.s3.s3fs.S3FileSystem.isdir') as isdir:
-        key = Mock(
-          name='data',
-          exists=Mock(return_value=True),
-          bucket=Mock(list=Mock(return_value=[]), delete_key=Mock()),
-          delete=Mock(return_value=Mock(exists=Mock(return_value=False))),
-        )
-        _get_key.return_value = key
-        isdir.return_value = True
+    fs.rmtree("s3a://test-bucket/big-folder/")
 
-        fs = S3FileSystem(s3_connection=Mock())
+    # It should have been called twice: once for the first 1000, then for the remaining 27
+    assert fs._get_key.return_value.bucket.delete_keys.call_count == 2
+    # Check the contents of the calls
+    first_call_args, _ = fs._get_key.return_value.bucket.delete_keys.call_args_list[0]
+    second_call_args, _ = fs._get_key.return_value.bucket.delete_keys.call_args_list[1]
+    assert len(first_call_args[0]) == S3A_DELETE_CHUNK_SIZE
+    assert len(second_call_args[0]) == 27
+    assert first_call_args[0] == large_key_list[:S3A_DELETE_CHUNK_SIZE]
+    assert second_call_args[0] == large_key_list[S3A_DELETE_CHUNK_SIZE:]
 
-        fs.rmtree(path='s3a://gethue/data')
+  def test_rmtree_handles_partial_deletion_failure(self):
+    fs = S3FileSystem(s3_connection=Mock())
+    fs._get_key = Mock(return_value=Mock(bucket=Mock(list=Mock(), delete_keys=Mock()), get_key=Mock()))
+    fs.isdir = Mock(return_value=True)
 
-        key.delete.assert_called()
-        key.bucket.list.assert_called_with(prefix='data/')
-        key.bucket.delete_keys.assert_not_called()
+    fs._get_key.return_value.bucket.list.return_value = [Mock(name="folder/locked-file.txt", bucket=fs._get_key.return_value.bucket)]
+    fs._get_key.return_value.bucket.get_key.return_value = None
 
-  def test_rmtree_non_empty_dir(self):
-    with patch('aws.s3.s3fs.S3FileSystem._get_key') as _get_key:
-      with patch('aws.s3.s3fs.S3FileSystem.isdir') as isdir:
-        key = Mock(
-          name='data',
-          exists=Mock(return_value=True),
-          bucket=Mock(list=Mock(return_value=['data/1', 'data/2']), delete_keys=Mock(return_value=Mock(errors=[]))),
-          delete=Mock(return_value=Mock(exists=Mock(return_value=False))),
-        )
-        _get_key.return_value = key
-        isdir.return_value = True
+    # Simulate some keys that will fail
+    error_key = MagicMock()
+    error_key.key = "folder/locked-file.txt"
+    error_key.message = "Access Denied"
 
-        fs = S3FileSystem(s3_connection=Mock())
+    mock_delete_result = MagicMock()
+    mock_delete_result.errors = [error_key]  # Simulate one error
+    fs._get_key.return_value.bucket.delete_keys.return_value = mock_delete_result
 
-        fs.rmtree(path='s3a://gethue/data')
+    with pytest.raises(S3FileSystemException) as excinfo:
+      fs.rmtree("s3a://test-bucket/folder/")
 
-        key.delete.assert_not_called()
-        key.bucket.list.assert_called_with(prefix='data/')
-        key.bucket.delete_keys.assert_called()
+    # Check that the exception message is informative
+    assert "1 errors occurred" in str(excinfo.value)
+    assert "folder/locked-file.txt: Access Denied" in str(excinfo.value)
 
 
 class S3FSTest(S3TestBase):
@@ -226,42 +296,42 @@ class S3FSTest(S3TestBase):
     if not cls.shouldSkip():
       cls.fs = S3FileSystem(cls.s3_connection)
 
-      cls.c = make_logged_in_client(username='test', is_superuser=False)
-      grant_access('test', 'test', 'filebrowser')
-      add_to_group('test')
+      cls.c = make_logged_in_client(username="test", is_superuser=False)
+      grant_access("test", "test", "filebrowser")
+      add_to_group("test")
       cls.user = User.objects.get(username="test")
 
   def test_open(self):
-    path = self.get_test_path('test_open.txt')
+    path = self.get_test_path("test_open.txt")
 
     with self.cleaning(path):
       with pytest.raises(S3FileSystemException):
         self.fs.open(path)
 
       key = self.get_key(path)
-      key.set_contents_from_string('Hello')
+      key.set_contents_from_string("Hello")
 
       fh1 = self.fs.open(path)
-      assert 'He' == fh1.read(length=2)
+      assert "He" == fh1.read(length=2)
 
-      fh2 = self.fs.open(path, mode='r')
-      assert 'Hello' == fh2.read()
+      fh2 = self.fs.open(path, mode="r")
+      assert "Hello" == fh2.read()
 
-      assert 'llo' == fh1.read()
+      assert "llo" == fh1.read()
 
       with pytest.raises(Exception):
-        self.fs.open(path, mode='w')
+        self.fs.open(path, mode="w")
       with pytest.raises(Exception):
-        self.fs.open(path, mode='?r')
+        self.fs.open(path, mode="?r")
 
   def test_read(self):
-    path = self.get_test_path('test_read.txt')
+    path = self.get_test_path("test_read.txt")
     with self.cleaning(path):
       key = self.get_key(path)
-      key.set_contents_from_string('Hello')
+      key.set_contents_from_string("Hello")
 
-      assert 'Hel' == self.fs.read(path, 0, 3)
-      assert 'ell' == self.fs.read(path, 1, 3)
+      assert "Hel" == self.fs.read(path, 0, 3)
+      assert "ell" == self.fs.read(path, 1, 3)
 
   def test_isfile(self):
     pass
@@ -270,8 +340,8 @@ class S3FSTest(S3TestBase):
     pass
 
   def test_exists(self):
-    dir_path = self.get_test_path('test_exists')
-    file_path = join(dir_path, 'file')
+    dir_path = self.get_test_path("test_exists")
+    file_path = join(dir_path, "file")
 
     assert not self.fs.exists(dir_path)
     assert not self.fs.exists(file_path)
@@ -281,29 +351,29 @@ class S3FSTest(S3TestBase):
     assert self.fs.exists(dir_path)
     assert self.fs.exists(file_path)
 
-    assert self.fs.exists('s3a://%s' % self.bucket_name)
-    assert self.fs.exists('s3a://')
-    fake_bucket = 'fake%s' % generate_id(8, string.ascii_lowercase + string.digits)
-    assert not self.fs.exists('s3a://%s' % fake_bucket)
+    assert self.fs.exists("s3a://%s" % self.bucket_name)
+    assert self.fs.exists("s3a://")
+    fake_bucket = "fake%s" % generate_id(8, string.ascii_lowercase + string.digits)
+    assert not self.fs.exists("s3a://%s" % fake_bucket)
 
   def test_stats(self):
     with pytest.raises(ValueError):
-      self.fs.stats('ftp://archive')
-    not_exists = self.get_test_path('does_not_exist')
+      self.fs.stats("ftp://archive")
+    not_exists = self.get_test_path("does_not_exist")
     with pytest.raises(S3FileSystemException):
       self.fs.stats(not_exists)
 
-    root_stat = self.fs.stats('s3a://')
+    root_stat = self.fs.stats("s3a://")
     assert True is root_stat.isDir
-    assert 's3a://' == root_stat.path
+    assert "s3a://" == root_stat.path
 
-    bucket_stat = self.fs.stats('s3a://%s' % self.bucket_name)
+    bucket_stat = self.fs.stats("s3a://%s" % self.bucket_name)
     assert True is bucket_stat.isDir
-    assert 's3a://%s' % self.bucket_name == bucket_stat.path
+    assert "s3a://%s" % self.bucket_name == bucket_stat.path
 
   def test_copyfile(self):
-    src_path = self.get_test_path('test_copy_file_src')
-    dst_path = self.get_test_path('test_copy_file_dst')
+    src_path = self.get_test_path("test_copy_file_src")
+    dst_path = self.get_test_path("test_copy_file_dst")
     with self.cleaning(src_path, dst_path):
       data = "To boldly go where no one has gone before\n" * 2000
       self.fs.create(src_path, data=data)
@@ -314,11 +384,11 @@ class S3FSTest(S3TestBase):
       assert data == actual
 
   def test_full_copy(self):
-    src_path = self.get_test_path('test_full_copy_src')
-    dst_path = self.get_test_path('test_full_copy_dst')
+    src_path = self.get_test_path("test_full_copy_src")
+    dst_path = self.get_test_path("test_full_copy_dst")
 
-    src_file_path = join(src_path, 'file.txt')
-    dst_file_path = join(dst_path, 'file.txt')
+    src_file_path = join(src_path, "file.txt")
+    dst_file_path = join(dst_path, "file.txt")
 
     with self.cleaning(src_path, dst_path):
       self.fs.mkdir(src_path)
@@ -336,21 +406,21 @@ class S3FSTest(S3TestBase):
       base_name = parse_uri(src_path)[2]
       dst_folder_path = join(dst_path, base_name)
       assert self.fs.exists(dst_folder_path)
-      assert self.fs.exists(join(dst_folder_path, 'file.txt'))
+      assert self.fs.exists(join(dst_folder_path, "file.txt"))
 
       # Copy directory to file should fail.
       with pytest.raises(S3FileSystemException):
         self.fs.copy(src_path, dst_file_path, True)
 
   def test_copy_remote_dir(self):
-    src_dir = self.get_test_path('test_copy_remote_dir_src')
-    dst_dir = self.get_test_path('test_copy_remote_dir_dst')
+    src_dir = self.get_test_path("test_copy_remote_dir_src")
+    dst_dir = self.get_test_path("test_copy_remote_dir_dst")
 
     with self.cleaning(src_dir, dst_dir):
       self.fs.mkdir(src_dir)
 
-      self.fs.create(join(src_dir, 'file_one.txt'), data='foo')
-      self.fs.create(join(src_dir, 'file_two.txt'), data='bar')
+      self.fs.create(join(src_dir, "file_one.txt"), data="foo")
+      self.fs.create(join(src_dir, "file_two.txt"), data="bar")
 
       self.fs.mkdir(dst_dir)
       self.fs.copy_remote_dir(src_dir, dst_dir)
@@ -364,12 +434,12 @@ class S3FSTest(S3TestBase):
       assert src_names == dst_names
 
   def test_copy_from_local(self):
-    src_name = 'test_copy_from_local_src'
+    src_name = "test_copy_from_local_src"
     src_path = os.path.join(tempfile.gettempdir(), src_name)
-    dst_path = self.get_test_path('test_copy_from_local_dst')
+    dst_path = self.get_test_path("test_copy_from_local_dst")
 
     data = "To boldly go where no one has gone before\n" * 2000
-    f = open(src_path, 'w')
+    f = open(src_path, "w")
     f.write(data)
     f.close()
 
@@ -379,18 +449,18 @@ class S3FSTest(S3TestBase):
       assert data == actual
 
   def test_rename_dir(self):
-    src_dir = self.get_test_path('test_rename_dir_src')
-    dst_dir = self.get_test_path('test_rename_dir_dst')
+    src_dir = self.get_test_path("test_rename_dir_src")
+    dst_dir = self.get_test_path("test_rename_dir_dst")
 
     with self.cleaning(src_dir, dst_dir):
       self.fs.mkdir(src_dir)
-      self.fs.create(join(src_dir, 'file_one.txt'), data='foo')
-      self.fs.create(join(src_dir, 'file_two.txt'), data='bar')
+      self.fs.create(join(src_dir, "file_one.txt"), data="foo")
+      self.fs.create(join(src_dir, "file_two.txt"), data="bar")
 
       src_ls = self.fs.listdir(src_dir)
       assert 2 == len(src_ls)
-      assert 'file_one.txt' in src_ls
-      assert 'file_two.txt' in src_ls
+      assert "file_one.txt" in src_ls
+      assert "file_two.txt" in src_ls
 
       # Assert that no directories with dst_dir name exist yet
       assert not self.fs.exists(dst_dir)
@@ -402,30 +472,30 @@ class S3FSTest(S3TestBase):
 
       dst_ls = self.fs.listdir(dst_dir)
       assert 2 == len(dst_ls)
-      assert 'file_one.txt' in dst_ls
-      assert 'file_two.txt' in dst_ls
+      assert "file_one.txt" in dst_ls
+      assert "file_two.txt" in dst_ls
 
       # Assert that the children files are not duplicated at top-level destination
       bucket_ls = self.bucket.list()
-      assert 'file_one.txt' not in bucket_ls
-      assert 'file_two.txt' not in bucket_ls
+      assert "file_one.txt" not in bucket_ls
+      assert "file_two.txt" not in bucket_ls
 
       # Assert that only the renamed directory, and not an empty file, exists
-      assert 1 == len([key for key in bucket_ls if key.name.strip('/') == self.get_key(dst_dir).name.strip('/')])
+      assert 1 == len([key for key in bucket_ls if key.name.strip("/") == self.get_key(dst_dir).name.strip("/")])
 
   def test_rename_star(self):
-    src_dir = self.get_test_path('test_rename_star_src')
-    dst_dir = self.get_test_path('test_rename_star_dst')
+    src_dir = self.get_test_path("test_rename_star_src")
+    dst_dir = self.get_test_path("test_rename_star_dst")
 
     with self.cleaning(src_dir, dst_dir):
       self.fs.mkdir(src_dir)
-      self.fs.create(join(src_dir, 'file_one.txt'), data='foo')
-      self.fs.create(join(src_dir, 'file_two.txt'), data='bar')
+      self.fs.create(join(src_dir, "file_one.txt"), data="foo")
+      self.fs.create(join(src_dir, "file_two.txt"), data="bar")
 
       src_ls = self.fs.listdir(src_dir)
       assert 2 == len(src_ls)
-      assert 'file_one.txt' in src_ls
-      assert 'file_two.txt' in src_ls
+      assert "file_one.txt" in src_ls
+      assert "file_two.txt" in src_ls
 
       src_stat = self.fs.listdir_stats(src_dir)
 
@@ -441,16 +511,16 @@ class S3FSTest(S3TestBase):
 
   def test_rmtree(self):
     with pytest.raises(NotImplementedError):
-      self.fs.rmtree('universe', skipTrash=False)
+      self.fs.rmtree("universe", skipTrash=False)
 
-    directory = self.get_test_path('test_rmtree')
+    directory = self.get_test_path("test_rmtree")
     with self.cleaning(directory):
       self.fs.mkdir(directory)
-      nested_dir = join(directory, 'nested_dir')
+      nested_dir = join(directory, "nested_dir")
       self.fs.mkdir(nested_dir)
-      file_path = join(nested_dir, 'file')
+      file_path = join(nested_dir, "file")
       key = self.get_key(file_path)
-      key.set_contents_from_string('Some content')
+      key.set_contents_from_string("Some content")
 
       self.fs.rmtree(directory, skipTrash=True)
 
@@ -459,11 +529,11 @@ class S3FSTest(S3TestBase):
       assert not self.fs.exists(directory)
 
   def test_listing_buckets(self):
-    buckets = self.fs.listdir('s3a://')
+    buckets = self.fs.listdir("s3a://")
     assert len(buckets) > 0
 
   def test_mkdir(self):
-    dir_path = self.get_test_path('test_mkdir')
+    dir_path = self.get_test_path("test_mkdir")
     assert not self.fs.exists(dir_path)
 
     self.fs.mkdir(dir_path)
@@ -473,31 +543,31 @@ class S3FSTest(S3TestBase):
     with tempfile.NamedTemporaryFile() as local_file:
       # Make sure we can upload larger than the UPLOAD chunk size
       file_size = DEFAULT_WRITE_SIZE * 2
-      local_file.write('0' * file_size)
+      local_file.write("0" * file_size)
       local_file.flush()
 
-      dest_dir = self.get_test_path('test_upload')
+      dest_dir = self.get_test_path("test_upload")
       local_file = local_file.name
-      dest_path = '%s/%s' % (dest_dir, os.path.basename(local_file))
+      dest_path = "%s/%s" % (dest_dir, os.path.basename(local_file))
 
-      add_permission(self.user.username, 'has_s3', permname='s3_access', appname='filebrowser')
+      add_permission(self.user.username, "has_s3", permname="s3_access", appname="filebrowser")
       try:
         # Just upload the current python file
-        resp = self.c.post('/filebrowser/upload/file?dest=%s' % dest_dir, dict(dest=dest_dir, hdfs_file=file(local_file)))
+        resp = self.c.post("/filebrowser/upload/file?dest=%s" % dest_dir, dict(dest=dest_dir, hdfs_file=open(local_file)))
         response = json.loads(resp.content)
       finally:
-        remove_from_group(self.user.username, 'has_s3')
+        remove_from_group(self.user.username, "has_s3")
 
-      assert 0 == response['status'], response
-      stats = self.fs.stats(dest_path)
+      assert 0 == response["status"], response
+      self.fs.stats(dest_path)
 
       f = self.fs.open(dest_path)
       actual = f.read(file_size)
-      expected = file(local_file).read()
-      assert actual == expected, 'files do not match: %s != %s' % (len(actual), len(expected))
+      expected = open(local_file).read()
+      assert actual == expected, "files do not match: %s != %s" % (len(actual), len(expected))
 
   def test_check_access(self):
-    dir_path = self.get_test_path('test_check_access')
+    dir_path = self.get_test_path("test_check_access")
     self.fs.mkdir(dir_path)
 
-    assert self.fs.check_access(dir_path, permission='WRITE')
+    assert self.fs.check_access(dir_path, permission="WRITE")