Browse Source

[core][s3] Add support for S3-compatible object stores with Boto3 migration (#4236)

Harsh Gupta 4 months ago
parent
commit
5b8f53ba0d
36 changed files with 3744 additions and 76 deletions
  1. 7 2
      apps/filebrowser/src/filebrowser/views.py
  2. 19 15
      apps/filebrowser/src/filebrowser/views_test.py
  3. 10 4
      apps/hive/src/hive/conf.py
  4. 11 1
      desktop/conf.dist/gunicorn_log.conf
  5. 55 0
      desktop/conf.dist/hue.ini
  6. 11 1
      desktop/conf.dist/log.conf
  7. 11 1
      desktop/conf/dev_log.conf
  8. 11 1
      desktop/conf/gunicorn_log.conf
  9. 11 1
      desktop/conf/log.conf
  10. 55 0
      desktop/conf/pseudo-distributed.ini.tmpl
  11. 1 0
      desktop/core/generate_requirements.py
  12. 88 0
      desktop/core/src/desktop/conf.py
  13. 11 3
      desktop/core/src/desktop/lib/fs/proxyfs.py
  14. 16 0
      desktop/core/src/desktop/lib/fs/s3/__init__.py
  15. 16 0
      desktop/core/src/desktop/lib/fs/s3/clients/__init__.py
  16. 16 0
      desktop/core/src/desktop/lib/fs/s3/clients/auth/__init__.py
  17. 110 0
      desktop/core/src/desktop/lib/fs/s3/clients/auth/iam.py
  18. 111 0
      desktop/core/src/desktop/lib/fs/s3/clients/auth/idbroker.py
  19. 59 0
      desktop/core/src/desktop/lib/fs/s3/clients/auth/key.py
  20. 143 0
      desktop/core/src/desktop/lib/fs/s3/clients/auth/raz.py
  21. 131 0
      desktop/core/src/desktop/lib/fs/s3/clients/aws.py
  22. 128 0
      desktop/core/src/desktop/lib/fs/s3/clients/base.py
  23. 73 0
      desktop/core/src/desktop/lib/fs/s3/clients/factory.py
  24. 147 0
      desktop/core/src/desktop/lib/fs/s3/clients/generic.py
  25. 724 0
      desktop/core/src/desktop/lib/fs/s3/conf_utils.py
  26. 54 0
      desktop/core/src/desktop/lib/fs/s3/constants.py
  27. 16 0
      desktop/core/src/desktop/lib/fs/s3/core/__init__.py
  28. 243 0
      desktop/core/src/desktop/lib/fs/s3/core/file.py
  29. 184 0
      desktop/core/src/desktop/lib/fs/s3/core/path.py
  30. 765 0
      desktop/core/src/desktop/lib/fs/s3/core/s3fs.py
  31. 223 0
      desktop/core/src/desktop/lib/fs/s3/core/stat.py
  32. 214 0
      desktop/core/src/desktop/lib/fs/s3/core/upload.py
  33. 15 11
      desktop/core/src/desktop/lib/fsmanager.py
  34. 7 1
      desktop/core/src/desktop/models.py
  35. 8 3
      desktop/core/src/desktop/settings.py
  36. 40 32
      desktop/libs/aws/src/aws/tests.py

+ 7 - 2
apps/filebrowser/src/filebrowser/views.py

@@ -46,12 +46,12 @@ from django.utils.translation import gettext as _
 from django.views.decorators.http import require_http_methods
 from django.views.decorators.http import require_http_methods
 from django.views.static import was_modified_since
 from django.views.static import was_modified_since
 
 
-from aws.s3.s3fs import get_s3_home_directory, S3FileSystemException, S3ListAllBucketsException
+from aws.s3.s3fs import S3FileSystemException, S3ListAllBucketsException
 from aws.s3.upload import S3FineUploaderChunkedUpload
 from aws.s3.upload import S3FineUploaderChunkedUpload
 from azure.abfs.upload import ABFSFineUploaderChunkedUpload
 from azure.abfs.upload import ABFSFineUploaderChunkedUpload
 from desktop import appmanager
 from desktop import appmanager
 from desktop.auth.backend import is_admin
 from desktop.auth.backend import is_admin
-from desktop.conf import RAZ, TASK_SERVER_V2
+from desktop.conf import RAZ, TASK_SERVER_V2, USE_STORAGE_CONNECTORS
 from desktop.lib import i18n
 from desktop.lib import i18n
 from desktop.lib.conf import coerce_bool
 from desktop.lib.conf import coerce_bool
 from desktop.lib.django_util import format_preserving_redirect, JsonResponse, render
 from desktop.lib.django_util import format_preserving_redirect, JsonResponse, render
@@ -268,6 +268,11 @@ def view(request, path):
       )
       )
 
 
   if 'default_s3_home' in request.GET:
   if 'default_s3_home' in request.GET:
+    if USE_STORAGE_CONNECTORS.get():
+      from desktop.lib.fs.s3.conf_utils import get_s3_home_directory
+    else:
+      from aws.s3.s3fs import get_s3_home_directory
+
     home_dir_path = get_s3_home_directory(request.user)
     home_dir_path = get_s3_home_directory(request.user)
     if request.fs.isdir(home_dir_path):
     if request.fs.isdir(home_dir_path):
       return format_preserving_redirect(
       return format_preserving_redirect(

+ 19 - 15
apps/filebrowser/src/filebrowser/views_test.py

@@ -15,11 +15,11 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+import json
+import logging
 import os
 import os
 import re
 import re
-import json
 import stat
 import stat
-import logging
 import tempfile
 import tempfile
 import urllib.error
 import urllib.error
 import urllib.parse
 import urllib.parse
@@ -30,20 +30,18 @@ from unittest.mock import Mock, patch
 from urllib.parse import unquote as urllib_unquote
 from urllib.parse import unquote as urllib_unquote
 
 
 import pandas as pd
 import pandas as pd
-import pytest
 import pyarrow as pa
 import pyarrow as pa
 import pyarrow.parquet as pq
 import pyarrow.parquet as pq
+import pytest
 from avro import datafile, io, schema
 from avro import datafile, io, schema
 from django.http import HttpResponse
 from django.http import HttpResponse
 from django.test import TestCase
 from django.test import TestCase
 from django.urls import reverse
 from django.urls import reverse
-from django.utils.translation import gettext_lazy as _
 
 
 from aws.conf import AWS_ACCOUNTS
 from aws.conf import AWS_ACCOUNTS
-from aws.s3.s3fs import S3FileSystemException
 from aws.s3.s3test_utils import get_test_bucket
 from aws.s3.s3test_utils import get_test_bucket
 from azure.conf import ABFS_CLUSTERS, is_abfs_enabled, is_adls_enabled
 from azure.conf import ABFS_CLUSTERS, is_abfs_enabled, is_adls_enabled
-from desktop.conf import OZONE, RAZ, is_ofs_enabled, is_oozie_enabled
+from desktop.conf import is_ofs_enabled, is_oozie_enabled, OZONE, RAZ, USE_STORAGE_CONNECTORS
 from desktop.lib.django_test_util import make_logged_in_client
 from desktop.lib.django_test_util import make_logged_in_client
 from desktop.lib.test_utils import add_permission, add_to_group, grant_access, remove_from_group
 from desktop.lib.test_utils import add_permission, add_to_group, grant_access, remove_from_group
 from desktop.lib.view_util import location_to_url
 from desktop.lib.view_util import location_to_url
@@ -465,13 +463,13 @@ class TestFileBrowserWithHadoop(object):
     kwargs.update(permissions_dict)
     kwargs.update(permissions_dict)
 
 
     # Set 1777, then check permissions of dirs
     # Set 1777, then check permissions of dirs
-    response = self.c.post("/filebrowser/chmod", kwargs)
+    self.c.post("/filebrowser/chmod", kwargs)
     assert 0o41777 == int(self.cluster.fs.stats(PATH)["mode"])
     assert 0o41777 == int(self.cluster.fs.stats(PATH)["mode"])
 
 
     # Now do the above recursively
     # Now do the above recursively
     assert 0o41777 != int(self.cluster.fs.stats(SUBPATH)["mode"])
     assert 0o41777 != int(self.cluster.fs.stats(SUBPATH)["mode"])
     kwargs['recursive'] = True
     kwargs['recursive'] = True
-    response = self.c.post("/filebrowser/chmod", kwargs)
+    self.c.post("/filebrowser/chmod", kwargs)
     assert 0o41777 == int(self.cluster.fs.stats(SUBPATH)["mode"])
     assert 0o41777 == int(self.cluster.fs.stats(SUBPATH)["mode"])
 
 
     # Test bulk chmod
     # Test bulk chmod
@@ -515,13 +513,13 @@ class TestFileBrowserWithHadoop(object):
     kwargs.update(permissions_dict)
     kwargs.update(permissions_dict)
 
 
     # Set sticky bit, then check sticky bit is on in hdfs
     # Set sticky bit, then check sticky bit is on in hdfs
-    response = self.c.post("/filebrowser/chmod", kwargs)
+    self.c.post("/filebrowser/chmod", kwargs)
     mode = expand_mode(int(self.cluster.fs.stats(PATH)["mode"]))
     mode = expand_mode(int(self.cluster.fs.stats(PATH)["mode"]))
     assert True is mode[-1]
     assert True is mode[-1]
 
 
     # Unset sticky bit, then check sticky bit is off in hdfs
     # Unset sticky bit, then check sticky bit is off in hdfs
     del kwargs['sticky']
     del kwargs['sticky']
-    response = self.c.post("/filebrowser/chmod", kwargs)
+    self.c.post("/filebrowser/chmod", kwargs)
     mode = expand_mode(int(self.cluster.fs.stats(PATH)["mode"]))
     mode = expand_mode(int(self.cluster.fs.stats(PATH)["mode"]))
     assert False is mode[-1]
     assert False is mode[-1]
 
 
@@ -573,7 +571,6 @@ class TestFileBrowserWithHadoop(object):
     NAME = "test-rename-before"
     NAME = "test-rename-before"
     NEW_NAME = "test-rename-after"
     NEW_NAME = "test-rename-after"
     self.cluster.fs.mkdir(PREFIX + NAME)
     self.cluster.fs.mkdir(PREFIX + NAME)
-    op = "rename"
     # test for full path rename
     # test for full path rename
     self.c.post("/filebrowser/rename", dict(src_path=PREFIX + NAME, dest_path=PREFIX + NEW_NAME))
     self.c.post("/filebrowser/rename", dict(src_path=PREFIX + NAME, dest_path=PREFIX + NEW_NAME))
     assert self.cluster.fs.exists(PREFIX + NEW_NAME)
     assert self.cluster.fs.exists(PREFIX + NEW_NAME)
@@ -775,7 +772,6 @@ class TestFileBrowserWithHadoop(object):
   def test_view_snappy_compressed_avro(self):
   def test_view_snappy_compressed_avro(self):
     if not snappy_installed():
     if not snappy_installed():
       pytest.skip("Skipping Test")
       pytest.skip("Skipping Test")
-    import snappy
 
 
     finish = []
     finish = []
     try:
     try:
@@ -1505,7 +1501,8 @@ class TestADLSAccessPermissions(object):
     assert 500 == response.status_code
     assert 500 == response.status_code
 
 
     # 500 for real currently
     # 500 for real currently
-    assert_raises(IOError, self.client.get, '/filebrowser/edit=ADL://hue-test-01')
+    with pytest.raises(IOError):
+      self.client.get('/filebrowser/edit=ADL://hue-test-01')
 
 
     # 500 for real currently
     # 500 for real currently
 
 
@@ -1714,7 +1711,11 @@ class TestFileChooserRedirect(object):
           reset()
           reset()
 
 
       # S3A - default_s3_home
       # S3A - default_s3_home
-      resets = [REMOTE_STORAGE_HOME.set_for_testing(None), AWS_ACCOUNTS.set_for_testing({'default': {'default_home_path': None}})]
+      resets = [
+        REMOTE_STORAGE_HOME.set_for_testing(None),
+        USE_STORAGE_CONNECTORS.set_for_testing(False),
+        AWS_ACCOUNTS.set_for_testing({"default": {"default_home_path": None}}),
+      ]
       try:
       try:
         response = self.client.get('/filebrowser/view=%2F?default_s3_home')
         response = self.client.get('/filebrowser/view=%2F?default_s3_home')
 
 
@@ -1726,6 +1727,7 @@ class TestFileChooserRedirect(object):
 
 
       resets = [
       resets = [
         REMOTE_STORAGE_HOME.set_for_testing(None),
         REMOTE_STORAGE_HOME.set_for_testing(None),
+        USE_STORAGE_CONNECTORS.set_for_testing(False),
         AWS_ACCOUNTS.set_for_testing({'default': {'default_home_path': 's3a://my_bucket'}}),
         AWS_ACCOUNTS.set_for_testing({'default': {'default_home_path': 's3a://my_bucket'}}),
       ]
       ]
       try:
       try:
@@ -1739,6 +1741,7 @@ class TestFileChooserRedirect(object):
       resets = [
       resets = [
         RAZ.IS_ENABLED.set_for_testing(True),
         RAZ.IS_ENABLED.set_for_testing(True),
         REMOTE_STORAGE_HOME.set_for_testing(None),
         REMOTE_STORAGE_HOME.set_for_testing(None),
+        USE_STORAGE_CONNECTORS.set_for_testing(False),
         AWS_ACCOUNTS.set_for_testing({'default': {'default_home_path': 's3a://my_bucket'}}),
         AWS_ACCOUNTS.set_for_testing({'default': {'default_home_path': 's3a://my_bucket'}}),
       ]
       ]
       try:
       try:
@@ -1753,6 +1756,7 @@ class TestFileChooserRedirect(object):
       resets = [
       resets = [
         RAZ.IS_ENABLED.set_for_testing(True),
         RAZ.IS_ENABLED.set_for_testing(True),
         REMOTE_STORAGE_HOME.set_for_testing(None),
         REMOTE_STORAGE_HOME.set_for_testing(None),
+        USE_STORAGE_CONNECTORS.set_for_testing(False),
         AWS_ACCOUNTS.set_for_testing({'default': {'default_home_path': 's3a://my_bucket/user'}}),
         AWS_ACCOUNTS.set_for_testing({'default': {'default_home_path': 's3a://my_bucket/user'}}),
       ]
       ]
       try:
       try:
@@ -1771,7 +1775,7 @@ class TestFileChooserRedirect(object):
           stats.isDir.return_value = True
           stats.isDir.return_value = True
           listdir_paged.return_value = HttpResponse()
           listdir_paged.return_value = HttpResponse()
 
 
-          response = self.client.get('/filebrowser/view=')
+          self.client.get('/filebrowser/view=')
 
 
           _normalize_path.assert_called_with('/')
           _normalize_path.assert_called_with('/')
 
 

+ 10 - 4
apps/hive/src/hive/conf.py

@@ -15,10 +15,10 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-import sys
 import logging
 import logging
+import sys
 
 
-from django.utils.translation import gettext as _, gettext_lazy as _t
+from django.utils.translation import gettext as _
 
 
 import beeswax.hive_site
 import beeswax.hive_site
 from beeswax.settings import NICE_NAME
 from beeswax.settings import NICE_NAME
@@ -63,15 +63,21 @@ def config_validator(user):
         res.append((NICE_NAME, _(msg)))
         res.append((NICE_NAME, _(msg)))
       else:
       else:
         raise e
         raise e
-  except Exception as e:
+  except Exception:
     msg = "The application won't work without a running HiveServer2."
     msg = "The application won't work without a running HiveServer2."
     LOG.exception(msg)
     LOG.exception(msg)
     res.append((NICE_NAME, _(msg)))
     res.append((NICE_NAME, _(msg)))
 
 
   try:
   try:
-    from aws.conf import is_enabled as is_s3_enabled
     from azure.conf import is_abfs_enabled
     from azure.conf import is_abfs_enabled
+    from desktop.conf import USE_STORAGE_CONNECTORS
     from desktop.lib.fsmanager import get_filesystem
     from desktop.lib.fsmanager import get_filesystem
+
+    if USE_STORAGE_CONNECTORS.get():
+      from desktop.lib.fs.s3.conf_utils import is_s3_enabled
+    else:
+      from aws.conf import is_s3_enabled
+
     warehouse = beeswax.hive_site.get_metastore_warehouse_dir()
     warehouse = beeswax.hive_site.get_metastore_warehouse_dir()
     fs = get_filesystem()
     fs = get_filesystem()
     fs_scheme = fs._get_scheme(warehouse)
     fs_scheme = fs._get_scheme(warehouse)

+ 11 - 1
desktop/conf.dist/gunicorn_log.conf

@@ -48,6 +48,16 @@ level=NOTSET
 handlers=errorlog
 handlers=errorlog
 qualname=boto
 qualname=boto
 
 
+[logger_boto3]
+level=NOTSET
+handlers=errorlog
+qualname=boto3
+
+[logger_botocore]
+level=NOTSET
+handlers=errorlog
+qualname=botocore
+
 [handler_stderr]
 [handler_stderr]
 class=StreamHandler
 class=StreamHandler
 formatter=default
 formatter=default
@@ -90,7 +100,7 @@ datefmt=%d/%b/%Y %H:%M:%S %z
 ########################################
 ########################################
 
 
 [loggers]
 [loggers]
-keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto
+keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto,boto3,botocore
 
 
 [handlers]
 [handlers]
 keys=stderr,logfile,accesslog,errorlog
 keys=stderr,logfile,accesslog,errorlog

+ 55 - 0
desktop/conf.dist/hue.ini

@@ -264,6 +264,11 @@ http_500_debug_mode=false
 # Enable saved default configurations for Hive, Impala, Spark, and Oozie.
 # Enable saved default configurations for Hive, Impala, Spark, and Oozie.
 ## use_default_configuration=false
 ## use_default_configuration=false
 
 
+# Use storage connector system for multi-cloud object storage access.
+# Default: true. When enabled, replaces legacy AWS_ACCOUNTS with modern storage connector configuration.
+# Set to false to use legacy AWS_ACCOUNTS instead.
+## use_storage_connectors=true
+
 # The directory where to store the auditing logs. Auditing is disable if the value is empty.
 # The directory where to store the auditing logs. Auditing is disable if the value is empty.
 # e.g. /var/log/hue/audit.log
 # e.g. /var/log/hue/audit.log
 ## audit_event_log_dir=
 ## audit_event_log_dir=
@@ -1080,6 +1085,56 @@ tls=no
 # Provide a comma-separated list of extensions including the dot (e.g., ".exe, .zip, .rar, .tar, .gz").
 # Provide a comma-separated list of extensions including the dot (e.g., ".exe, .zip, .rar, .tar, .gz").
 ## restrict_local_file_extensions=.exe, .zip, .rar, .tar, .gz
 ## restrict_local_file_extensions=.exe, .zip, .rar, .tar, .gz
 
 
+## Storage Connector Configuration (Modern Multi-Cloud Object Storage)
+# ------------------------------------------------------------------------
+[[storage_connectors]]
+# Modern replacement for AWS_ACCOUNTS that supports multiple cloud providers.
+# Enabled by default (use_storage_connectors=true). Uncomment examples below to configure.
+
+# Example: AWS S3 with Access Keys
+## [[[aws_production]]]
+  ## provider=aws
+  ## auth_type=key
+  ## region=us-west-2
+  ## access_key_id=AKIA...
+  ## secret_key=your-secret-key
+  ## bucket_configs={"production-data": {"default_home_path": "user/", "region": "us-west-2"}, "backup-data": {"default_home_path": "/"}}
+
+# Example: AWS S3 with IAM Role
+## [[[aws_iam]]]
+  ## provider=aws
+  ## auth_type=iam
+  ## region=us-east-1
+  ## iam_role=arn:aws:iam::123456789012:role/HueS3AccessRole
+
+# Example: Dell ECS with RAZ Authentication  
+## [[[dell_ecs]]]
+  ## provider=dell
+  ## auth_type=raz
+  ## endpoint=https://ecs.company.com:9021
+  ## region=us-east-1
+  ## bucket_configs={"enterprise-data": {"default_home_path": "user/"}, "shared-data": {"default_home_path": "/"}}
+
+# Example: NetApp StorageGRID
+## [[[netapp_grid]]]
+  ## provider=netapp
+  ## auth_type=key
+  ## endpoint=https://storagegrid.company.com
+  ## access_key_id=your-access-key
+  ## secret_key=your-secret-key
+  ## bucket_configs={"analytics": {"default_home_path": "users/"}}
+  ## options={"ssl_verify": false}
+
+# Example: Generic S3-Compatible Storage (MinIO)
+## [[[minio_dev]]]
+  ## provider=generic
+  ## auth_type=key
+  ## endpoint=http://minio.local:9000
+  ## access_key_id=minioadmin
+  ## secret_key=minioadmin
+  ## bucket_configs={"dev-bucket": {"default_home_path": "workspace/"}}
+  ## options={"signature_version": "s3v4"}
+
 ###########################################################################
 ###########################################################################
 # Settings to configure the snippets available in the Notebook
 # Settings to configure the snippets available in the Notebook
 ###########################################################################
 ###########################################################################

+ 11 - 1
desktop/conf.dist/log.conf

@@ -46,6 +46,16 @@ level=ERROR
 handlers=errorlog
 handlers=errorlog
 qualname=boto
 qualname=boto
 
 
+[logger_boto3]
+level=ERROR
+handlers=errorlog
+qualname=boto3
+
+[logger_botocore]
+level=ERROR
+handlers=errorlog
+qualname=botocore
+
 [handler_stderr]
 [handler_stderr]
 class=StreamHandler
 class=StreamHandler
 formatter=default
 formatter=default
@@ -86,7 +96,7 @@ datefmt=%d/%b/%Y %H:%M:%S %z
 ########################################
 ########################################
 
 
 [loggers]
 [loggers]
-keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto
+keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto,boto3,botocore
 
 
 [handlers]
 [handlers]
 keys=stderr,logfile,accesslog,errorlog
 keys=stderr,logfile,accesslog,errorlog

+ 11 - 1
desktop/conf/dev_log.conf

@@ -47,6 +47,16 @@ level=ERROR
 handlers=errorlog
 handlers=errorlog
 qualname=boto
 qualname=boto
 
 
+[logger_boto3]
+level=ERROR
+handlers=errorlog
+qualname=boto3
+
+[logger_botocore]
+level=ERROR
+handlers=errorlog
+qualname=botocore
+
 [handler_stderr]
 [handler_stderr]
 class=StreamHandler
 class=StreamHandler
 formatter=default
 formatter=default
@@ -88,7 +98,7 @@ datefmt=%d/%b/%Y %H:%M:%S %z
 ########################################
 ########################################
 
 
 [loggers]
 [loggers]
-keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto
+keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto,boto3,botocore
 
 
 [handlers]
 [handlers]
 keys=stderr,logfile,accesslog,errorlog
 keys=stderr,logfile,accesslog,errorlog

+ 11 - 1
desktop/conf/gunicorn_log.conf

@@ -48,6 +48,16 @@ level=NOTSET
 handlers=errorlog
 handlers=errorlog
 qualname=boto
 qualname=boto
 
 
+[logger_boto3]
+level=NOTSET
+handlers=errorlog
+qualname=boto3
+
+[logger_botocore]
+level=NOTSET
+handlers=errorlog
+qualname=botocore
+
 [handler_stderr]
 [handler_stderr]
 class=StreamHandler
 class=StreamHandler
 formatter=default
 formatter=default
@@ -90,7 +100,7 @@ datefmt=%d/%b/%Y %H:%M:%S %z
 ########################################
 ########################################
 
 
 [loggers]
 [loggers]
-keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto
+keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto,boto3,botocore
 
 
 [handlers]
 [handlers]
 keys=stderr,logfile,accesslog,errorlog
 keys=stderr,logfile,accesslog,errorlog

+ 11 - 1
desktop/conf/log.conf

@@ -46,6 +46,16 @@ level=ERROR
 handlers=errorlog
 handlers=errorlog
 qualname=boto
 qualname=boto
 
 
+[logger_boto3]
+level=ERROR
+handlers=errorlog
+qualname=boto3
+
+[logger_botocore]
+level=ERROR
+handlers=errorlog
+qualname=botocore
+
 [handler_stderr]
 [handler_stderr]
 class=StreamHandler
 class=StreamHandler
 formatter=default
 formatter=default
@@ -86,7 +96,7 @@ datefmt=%d/%b/%Y %H:%M:%S %z
 ########################################
 ########################################
 
 
 [loggers]
 [loggers]
-keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto
+keys=root,access,django_auth_ldap,kazoo_client,requests_packages_urllib3_connectionpool,djangosaml2,django_db,boto,boto3,botocore
 
 
 [handlers]
 [handlers]
 keys=stderr,logfile,accesslog,errorlog
 keys=stderr,logfile,accesslog,errorlog

+ 55 - 0
desktop/conf/pseudo-distributed.ini.tmpl

@@ -258,6 +258,11 @@
   # Choose whether to use new charting library across the whole Hue.
   # Choose whether to use new charting library across the whole Hue.
   ## use_new_charts=false
   ## use_new_charts=false
 
 
+  # Use storage connector system for multi-cloud object storage access.
+  # Default: true. When enabled, replaces legacy AWS_ACCOUNTS with modern storage connector configuration.
+  # Set to false to use legacy AWS_ACCOUNTS instead.
+  ## use_storage_connectors=true
+
   # Choose whether to allow multi tenancy or not.
   # Choose whether to allow multi tenancy or not.
   ## enable_organizations=false
   ## enable_organizations=false
 
 
@@ -1065,6 +1070,56 @@
   # Provide a comma-separated list of extensions including the dot (e.g., ".exe, .zip, .rar, .tar, .gz").
   # Provide a comma-separated list of extensions including the dot (e.g., ".exe, .zip, .rar, .tar, .gz").
   ## restrict_local_file_extensions=.exe, .zip, .rar, .tar, .gz
   ## restrict_local_file_extensions=.exe, .zip, .rar, .tar, .gz
 
 
+  ## Storage Connector Configuration (Modern Multi-Cloud Object Storage)
+  # ------------------------------------------------------------------------
+  [[storage_connectors]]
+  # Modern replacement for AWS_ACCOUNTS that supports multiple cloud providers.
+  # Enabled by default (use_storage_connectors=true). Uncomment examples below to configure.
+
+  # Example: AWS S3 with Access Keys
+  ## [[[aws_production]]]
+    ## provider=aws
+    ## auth_type=key
+    ## region=us-west-2
+    ## access_key_id=AKIA...
+    ## secret_key=your-secret-key
+    ## bucket_configs={"production-data": {"default_home_path": "user/", "region": "us-west-2"}, "backup-data": {"default_home_path": "/"}}
+
+  # Example: AWS S3 with IAM Role
+  ## [[[aws_iam]]]
+    ## provider=aws
+    ## auth_type=iam
+    ## region=us-east-1
+    ## iam_role=arn:aws:iam::123456789012:role/HueS3AccessRole
+
+  # Example: Dell ECS with RAZ Authentication  
+  ## [[[dell_ecs]]]
+    ## provider=dell
+    ## auth_type=raz
+    ## endpoint=https://ecs.company.com:9021
+    ## region=us-east-1
+    ## bucket_configs={"enterprise-data": {"default_home_path": "user/"}, "shared-data": {"default_home_path": "/"}}
+
+  # Example: NetApp StorageGRID
+  ## [[[netapp_grid]]]
+    ## provider=netapp
+    ## auth_type=key
+    ## endpoint=https://storagegrid.company.com
+    ## access_key_id=your-access-key
+    ## secret_key=your-secret-key
+    ## bucket_configs={"analytics": {"default_home_path": "users/"}}
+    ## options={"ssl_verify": false}
+
+  # Example: Generic S3-Compatible Storage (MinIO)
+  ## [[[minio_dev]]]
+    ## provider=generic
+    ## auth_type=key
+    ## endpoint=http://minio.local:9000
+    ## access_key_id=minioadmin
+    ## secret_key=minioadmin
+    ## bucket_configs={"dev-bucket": {"default_home_path": "workspace/"}}
+    ## options={"signature_version": "s3v4"}
+
 ###########################################################################
 ###########################################################################
 # Settings to configure the snippets available in the Notebook
 # Settings to configure the snippets available in the Notebook
 ###########################################################################
 ###########################################################################

+ 1 - 0
desktop/core/generate_requirements.py

@@ -48,6 +48,7 @@ class RequirementsGenerator:
       "asn1crypto==0.24.0",
       "asn1crypto==0.24.0",
       "avro-python3==1.8.2",
       "avro-python3==1.8.2",
       "Babel==2.9.1",
       "Babel==2.9.1",
+      "boto3==1.37.38",
       "celery[redis]==5.4.0",
       "celery[redis]==5.4.0",
       "cffi==1.15.0",
       "cffi==1.15.0",
       "channels==4.2.2",
       "channels==4.2.2",

+ 88 - 0
desktop/core/src/desktop/conf.py

@@ -2803,6 +2803,19 @@ def config_validator(user):
   if 'use_new_editor' in USE_NEW_EDITOR.bind_to:
   if 'use_new_editor' in USE_NEW_EDITOR.bind_to:
     res.append(('[desktop] use_new_editor', str(_('This configuration flag has been deprecated.'))))
     res.append(('[desktop] use_new_editor', str(_('This configuration flag has been deprecated.'))))
 
 
+  # Validate S3 configuration
+  if USE_STORAGE_CONNECTORS.get():
+    try:
+      from desktop.lib.fs.s3.conf_utils import validate_s3_configuration
+
+      # Validate storage connector configuration structure
+      s3_errors = validate_s3_configuration()
+      for error in s3_errors:
+        res.append(("STORAGE_CONNECTOR_CONFIGURATION", error))
+
+    except Exception as e:
+      res.append(("STORAGE_CONNECTOR_CONFIGURATION", f"Failed to validate storage connector configuration: {e}"))
+
   return res
   return res
 
 
 
 
@@ -2909,6 +2922,81 @@ def is_gs_enabled():
     conf_idbroker.is_idbroker_enabled('gs')
     conf_idbroker.is_idbroker_enabled('gs')
 
 
 
 
+PERMISSION_ACTION_S3 = "s3_access"
+
+USE_STORAGE_CONNECTORS = Config(
+    key='use_storage_connectors',
+    type=coerce_bool,
+    default=True,
+    help=_('Use storage connector system for multi-cloud object storage access')
+)
+
+STORAGE_CONNECTORS = UnspecifiedConfigSection(
+  'storage_connectors',
+  help=_('Storage connector definitions with bucket-specific configurations'),
+  each=ConfigSection(
+    help=_('Configuration for a single S3-compatible storage connector'),
+    members=dict(
+      PROVIDER=Config(
+        key='provider',
+        type=str,
+        default='aws',  # aws, netapp, dell, generic
+        help=_('Storage provider type (aws, netapp, dell, generic)')
+      ),
+      AUTH_TYPE=Config(
+        key='auth_type',
+        type=str,
+        default='key',  # key, iam, raz, idbroker
+        help=_('Authentication method (key, iam, raz, idbroker)')
+      ),
+      REGION=Config(
+        key='region',
+        type=str,
+        default=None,
+        help=_('Default AWS region (required for AWS provider)')
+      ),
+      ENDPOINT=Config(
+        key='endpoint',
+        type=str,
+        default=None,
+        help=_('Custom endpoint URL (required for non-AWS providers)')
+      ),
+      ACCESS_KEY_ID=Config(
+        key='access_key_id',
+        type=str,
+        default=None,
+        help=_('Access key ID (required for key auth)')
+      ),
+      SECRET_KEY=Config(
+        key='secret_key',
+        type=str,
+        private=True,
+        default=None,
+        help=_('Secret access key (required for key auth)')
+      ),
+      IAM_ROLE=Config(
+        key='iam_role',
+        type=str,
+        default=None,
+        help=_('IAM role ARN to assume (for iam auth)')
+      ),
+      BUCKET_CONFIGS=Config(
+        key='bucket_configs',
+        type=coerce_json_dict,
+        default='{}',
+        help=_('Per-bucket configuration: {"bucket-name": {"default_home_path": "/path/", "region": "us-east-1"}}')
+      ),
+      OPTIONS=Config(
+        key='options',
+        type=coerce_json_dict,
+        default='{}',
+        help=_('Provider-specific configuration options as JSON')
+      )
+    )
+  )
+)
+
+
 def has_gs_access(user):
 def has_gs_access(user):
   from desktop.auth.backend import is_admin
   from desktop.auth.backend import is_admin
   return user.is_authenticated and user.is_active and (
   return user.is_authenticated and user.is_active and (

+ 11 - 3
desktop/core/src/desktop/lib/fs/proxyfs.py

@@ -19,15 +19,18 @@ from urllib.parse import urlparse as lib_urlparse
 
 
 from crequest.middleware import CrequestMiddleware
 from crequest.middleware import CrequestMiddleware
 
 
-from aws.conf import is_raz_s3
-from aws.s3.s3fs import get_s3_home_directory
 from azure.abfs.__init__ import get_abfs_home_directory
 from azure.abfs.__init__ import get_abfs_home_directory
 from azure.conf import is_raz_abfs
 from azure.conf import is_raz_abfs
 from desktop.auth.backend import is_admin
 from desktop.auth.backend import is_admin
-from desktop.conf import DEFAULT_USER, is_ofs_enabled, is_raz_gs
+from desktop.conf import DEFAULT_USER, is_ofs_enabled, is_raz_gs, USE_STORAGE_CONNECTORS
 from desktop.lib.fs.gc.gs import get_gs_home_directory
 from desktop.lib.fs.gc.gs import get_gs_home_directory
 from useradmin.models import User
 from useradmin.models import User
 
 
+if USE_STORAGE_CONNECTORS.get():
+  from desktop.lib.fs.s3.conf_utils import is_raz_s3
+else:
+  from aws.conf import is_raz_s3
+
 LOG = logging.getLogger()
 LOG = logging.getLogger()
 DEFAULT_USER = DEFAULT_USER.get()
 DEFAULT_USER = DEFAULT_USER.get()
 
 
@@ -220,6 +223,11 @@ class ProxyFS(object):
 
 
     # Get the new home_path for S3/ABFS/GS when RAZ is enabled.
     # Get the new home_path for S3/ABFS/GS when RAZ is enabled.
     if is_raz_s3():
     if is_raz_s3():
+      if USE_STORAGE_CONNECTORS.get():
+        from desktop.lib.fs.s3.conf_utils import get_s3_home_directory
+      else:
+        from aws.s3.s3fs import get_s3_home_directory
+
       home_path = get_s3_home_directory(User.objects.get(username=self.getuser()))
       home_path = get_s3_home_directory(User.objects.get(username=self.getuser()))
     elif is_raz_abfs():
     elif is_raz_abfs():
       home_path = get_abfs_home_directory(User.objects.get(username=self.getuser()))
       home_path = get_abfs_home_directory(User.objects.get(username=self.getuser()))

+ 16 - 0
desktop/core/src/desktop/lib/fs/s3/__init__.py

@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+ 16 - 0
desktop/core/src/desktop/lib/fs/s3/clients/__init__.py

@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+ 16 - 0
desktop/core/src/desktop/lib/fs/s3/clients/auth/__init__.py

@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+ 110 - 0
desktop/core/src/desktop/lib/fs/s3/clients/auth/iam.py

@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from datetime import datetime
+from typing import Any, Dict, TYPE_CHECKING
+
+import boto3
+
+from desktop.lib.fs.s3.clients.base import S3AuthProvider
+
+if TYPE_CHECKING:
+  from desktop.lib.fs.s3.conf_utils import ConnectorConfig
+
+LOG = logging.getLogger()
+
+
+class IAMAuthProvider(S3AuthProvider):
+  """
+  Authentication provider using AWS IAM roles.
+  Supports:
+  1. EC2 instance profiles
+  2. ECS task roles
+  3. Explicit IAM role assumption
+  """
+
+  def __init__(self, connector_config: "ConnectorConfig", user: str):
+    super().__init__(connector_config, user)
+    self._credentials = None
+    self._sts_client = None
+    self._role_arn = connector_config.iam_role
+    self._session_name = f"hue-{user}-session"
+    self._init_sts()
+
+  def _init_sts(self) -> None:
+    """Initialize STS client for role assumption if needed"""
+    try:
+      # Create STS client using instance/task credentials
+      self._sts_client = boto3.client("sts")
+
+      if self._role_arn:
+        # Assume the specified role
+        response = self._sts_client.assume_role(RoleArn=self._role_arn, RoleSessionName=self._session_name)
+        self._credentials = {
+          "access_key_id": response["Credentials"]["AccessKeyId"],
+          "secret_access_key": response["Credentials"]["SecretAccessKey"],
+          "session_token": response["Credentials"]["SessionToken"],
+          "expiration": response["Credentials"]["Expiration"],
+        }
+      else:
+        # Use instance/task credentials directly
+        instance_creds = boto3.Session().get_credentials()
+        if instance_creds:
+          self._credentials = {
+            "access_key_id": instance_creds.access_key,
+            "secret_access_key": instance_creds.secret_key,
+            "session_token": instance_creds.token,
+            "expiration": getattr(instance_creds, "_expiry_time", None),
+          }
+        else:
+          raise Exception("No instance credentials found")
+    except Exception as e:
+      LOG.error(f"Failed to initialize IAM credentials: {e}")
+      raise
+
+  def get_credentials(self) -> Dict[str, Any]:
+    """Get current IAM credentials"""
+    if self._should_refresh():
+      self.refresh()
+    return self._credentials
+
+  def get_session_kwargs(self) -> Dict[str, Any]:
+    """Get kwargs for creating boto3 session"""
+    creds = self.get_credentials()
+    return {
+      "aws_access_key_id": creds["access_key_id"],
+      "aws_secret_access_key": creds["secret_access_key"],
+      "aws_session_token": creds.get("session_token"),
+      "region_name": self.connector_config.region,
+    }
+
+  def _should_refresh(self) -> bool:
+    """Check if credentials need refresh"""
+    if not self._credentials:
+      return True
+    if "expiration" not in self._credentials:
+      return False
+
+    # Refresh if less than 5 minutes remaining
+    now = datetime.utcnow()
+    expiry = self._credentials["expiration"]
+    return (expiry - now).total_seconds() < 300
+
+  def refresh(self) -> None:
+    """Refresh IAM credentials"""
+    self._init_sts()

+ 111 - 0
desktop/core/src/desktop/lib/fs/s3/clients/auth/idbroker.py

@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from datetime import datetime
+from typing import Any, Dict, TYPE_CHECKING
+
+from desktop.lib.fs.s3.clients.base import S3AuthProvider
+from desktop.lib.idbroker import conf as conf_idbroker
+from desktop.lib.idbroker.client import IDBroker
+
+if TYPE_CHECKING:
+  from desktop.lib.fs.s3.conf_utils import ConnectorConfig
+
+LOG = logging.getLogger()
+
+
+class IDBrokerAuthProvider(S3AuthProvider):
+  """
+  Authentication provider using IDBroker service.
+  IDBroker provides temporary credentials for S3 access.
+  """
+
+  def __init__(self, connector_config: "ConnectorConfig", user: str):
+    super().__init__(connector_config, user)
+    self._credentials = None
+    self._idbroker = None
+    self._init_idbroker()
+
+  def _init_idbroker(self) -> None:
+    """Initialize IDBroker client using existing global configuration"""
+    try:
+      # Use existing IDBroker configuration from core-site
+      self._idbroker = IDBroker.from_core_site("s3a", self.user)
+      self._load_credentials()
+    except Exception as e:
+      LOG.error(f"Failed to initialize IDBroker: {e}")
+      raise
+
+  def _load_credentials(self) -> None:
+    """Load credentials from IDBroker"""
+    try:
+      cab = self._idbroker.get_cab()
+      if not cab or "Credentials" not in cab:
+        raise Exception("No credentials in IDBroker response")
+
+      creds = cab["Credentials"]
+      self._credentials = {
+        "access_key_id": creds.get("AccessKeyId"),
+        "secret_access_key": creds.get("SecretAccessKey"),
+        "session_token": creds.get("SessionToken"),
+        "expiration": creds.get("Expiration"),
+      }
+
+      if not self._credentials["access_key_id"] or not self._credentials["secret_access_key"]:
+        raise Exception("Missing required credentials from IDBroker")
+
+    except Exception as e:
+      LOG.error(f"Failed to load credentials from IDBroker: {e}")
+      raise
+
+  def get_credentials(self) -> Dict[str, Any]:
+    """Get current credentials"""
+    if self._should_refresh():
+      self.refresh()
+    return self._credentials
+
+  def get_session_kwargs(self) -> Dict[str, Any]:
+    """Get kwargs for creating boto3 session"""
+    creds = self.get_credentials()
+    return {
+      "aws_access_key_id": creds["access_key_id"],
+      "aws_secret_access_key": creds["secret_access_key"],
+      "aws_session_token": creds.get("session_token"),
+      "region_name": self.connector_config.region,
+    }
+
+  def _should_refresh(self) -> bool:
+    """Check if credentials need refresh"""
+    if not self._credentials:
+      return True
+    if "expiration" not in self._credentials:
+      return False
+
+    # Refresh if less than 5 minutes remaining
+    now = datetime.utcnow()
+    expiry = self._credentials["expiration"]
+    return (expiry - now).total_seconds() < 300
+
+  def refresh(self) -> None:
+    """Refresh credentials from IDBroker"""
+    self._load_credentials()
+
+  @classmethod
+  def is_enabled(cls) -> bool:
+    """Check if IDBroker is enabled"""
+    return conf_idbroker.is_idbroker_enabled("s3a")

+ 59 - 0
desktop/core/src/desktop/lib/fs/s3/clients/auth/key.py

@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Dict, TYPE_CHECKING
+
+from desktop.lib.fs.s3.clients.base import S3AuthProvider
+
+if TYPE_CHECKING:
+  from desktop.lib.fs.s3.conf_utils import ConnectorConfig
+
+
+class KeyAuthProvider(S3AuthProvider):
+  """
+  Authentication provider using access key and secret key.
+  Simple static credentials without refresh.
+  """
+
+  def __init__(self, connector_config: "ConnectorConfig", user: str):
+    super().__init__(connector_config, user)
+    self._credentials = None
+    self._load_credentials()
+
+  def _load_credentials(self) -> None:
+    """Load credentials from connector config"""
+    connector = self.connector_config
+    self._credentials = {"access_key_id": connector.access_key_id, "secret_access_key": connector.secret_key}
+
+    if not self._credentials["access_key_id"] or not self._credentials["secret_access_key"]:
+      raise ValueError(f"Missing access key or secret key for connector {connector.id}")
+
+  def get_credentials(self) -> Dict[str, Any]:
+    """Get static credentials"""
+    return self._credentials
+
+  def get_session_kwargs(self) -> Dict[str, Any]:
+    """Get kwargs for creating boto3 session"""
+    return {
+      "aws_access_key_id": self._credentials["access_key_id"],
+      "aws_secret_access_key": self._credentials["secret_access_key"],
+      "region_name": self.connector_config.region,
+    }
+
+  def refresh(self) -> None:
+    """No refresh needed for static credentials"""
+    pass

+ 143 - 0
desktop/core/src/desktop/lib/fs/s3/clients/auth/raz.py

@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Any, Dict, TYPE_CHECKING
+from urllib.parse import urlencode, urlparse, urlunparse
+
+import boto3
+from botocore.awsrequest import AWSRequest
+
+from desktop.lib.fs.s3.clients.base import S3AuthProvider
+from desktop.lib.raz.clients import S3RazClient
+
+if TYPE_CHECKING:
+  from desktop.lib.fs.s3.conf_utils import ConnectorConfig
+
+LOG = logging.getLogger()
+
+
+class RazEventHandler:
+  """
+  Handles RAZ integration with boto3's event system.
+  Intercepts requests before they're signed and sent to S3.
+  """
+
+  def __init__(self, user: str):
+    self.user = user
+    # Use existing S3RazClient which has the correct get_url() interface
+    self.raz_client = S3RazClient(username=user)
+
+  def _handle_before_sign(self, request: AWSRequest, **kwargs) -> None:
+    """
+    Handle before-sign event.
+    This is called before boto3 would normally sign the request.
+    We intercept here to get RAZ to sign instead.
+    """
+    try:
+      # Get request details
+      url = self._get_request_url(request)
+      method = request.method
+      headers = dict(request.headers)
+      data = request.body
+
+      # Get RAZ signed headers
+      raz_headers = self.raz_client.get_url(action=method, url=url, headers=headers, data=data)
+
+      if not raz_headers:
+        raise Exception("RAZ returned no signed headers")
+
+      # Update request headers with RAZ signed headers
+      request.headers.update(raz_headers)
+
+      # Mark request as pre-signed to skip boto3 signing
+      request.context["pre_signed"] = True
+
+    except Exception as e:
+      LOG.error(f"Failed to sign request with RAZ: {e}")
+      raise
+
+  def _handle_before_send(self, request: AWSRequest, **kwargs) -> None:
+    """
+    Handle before-send event.
+    This is called after signing but before sending.
+    We clean up any leftover AWS headers here.
+    """
+    # TODO: Is this needed?
+    # Remove any AWS specific headers that RAZ doesn't need
+    aws_headers = ["X-Amz-Security-Token", "X-Amz-Date", "X-Amz-Content-SHA256", "Authorization"]
+
+    for header in aws_headers:
+      request.headers.pop(header, None)
+
+  def _get_request_url(self, request: AWSRequest) -> str:
+    """
+    Get full request URL including query parameters.
+    Handles virtual hosted and path style URLs.
+    """
+    url_parts = list(urlparse(request.url))
+
+    # Add query parameters
+    if request.params:
+      query = urlencode(request.params)
+      url_parts[4] = query
+
+    # Handle virtual hosted style URLs
+    if "s3." in url_parts[1] and request.context.get("bucket_name"):
+      bucket = request.context["bucket_name"]
+      url_parts[1] = f"{bucket}.{url_parts[1]}"
+      # Remove bucket from path
+      url_parts[2] = url_parts[2].replace(f"/{bucket}", "", 1)
+
+    return urlunparse(url_parts)
+
+
+class RazAuthProvider(S3AuthProvider):
+  """
+  Authentication provider using RAZ for request signing.
+  Uses boto3's event system to intercept and sign requests.
+  """
+
+  def __init__(self, connector_config: "ConnectorConfig", user: str):
+    super().__init__(connector_config, user)
+
+    # Create RAZ event handler (uses global RAZ configuration internally)
+    self.raz_event_handler = RazEventHandler(user=user)
+
+    # Store boto3 session with RAZ event handlers
+    self.session = boto3.Session(aws_access_key_id="dummy", aws_secret_access_key="dummy", region_name=connector_config.region)
+
+    # Register RAZ handlers with the session's event system
+    self.session.events.register("before-sign.*.*", self.raz_event_handler._handle_before_sign)
+
+  def get_credentials(self) -> Dict[str, Any]:
+    """
+    Return dummy credentials.
+    Real signing is done via event handlers.
+    """
+    return {"access_key_id": "dummy", "secret_access_key": "dummy"}
+
+  def get_session_kwargs(self) -> Dict[str, Any]:
+    """
+    Get kwargs for creating boto3 session.
+    Returns pre-configured session with RAZ event handlers.
+    """
+    return {"aws_access_key_id": "dummy", "aws_secret_access_key": "dummy", "region_name": self.connector_config.region}
+
+  def refresh(self) -> None:
+    """No refresh needed as we sign per-request"""
+    pass

+ 131 - 0
desktop/core/src/desktop/lib/fs/s3/clients/aws.py

@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Any, Optional, TYPE_CHECKING
+
+import boto3
+from boto3.session import Session
+from botocore.credentials import Credentials
+from botocore.exceptions import ClientError
+
+from desktop.conf import RAZ
+from desktop.lib.fs.s3.clients.auth.iam import IAMAuthProvider
+from desktop.lib.fs.s3.clients.auth.idbroker import IDBrokerAuthProvider
+from desktop.lib.fs.s3.clients.auth.key import KeyAuthProvider
+from desktop.lib.fs.s3.clients.auth.raz import RazAuthProvider
+from desktop.lib.fs.s3.clients.base import S3AuthProvider, S3ClientInterface
+from desktop.lib.fs.s3.constants import DEFAULT_REGION
+from desktop.lib.idbroker import conf as conf_idbroker
+
+if TYPE_CHECKING:
+  from desktop.lib.fs.s3.conf_utils import ConnectorConfig
+
+LOG = logging.getLogger()
+
+
+class AWSS3Client(S3ClientInterface):
+  """
+  AWS S3 client implementation.
+  Handles AWS-specific features and optimizations.
+  """
+
+  def __init__(self, connector_config: "ConnectorConfig", user: str):
+    super().__init__(connector_config, user)
+
+    # AWS specific config
+    self.client_config.signature_version = "s3v4"
+
+    options = connector_config.options or {}
+    self.client_config.s3.update(
+      {
+        "payload_signing_enabled": True,
+        "use_accelerate_endpoint": options.get("use_accelerate", False),
+        "use_dualstack_endpoint": options.get("use_dualstack", False),
+      }
+    )
+
+  def _create_auth_provider(self) -> S3AuthProvider:
+    """Create appropriate auth provider based on connector config"""
+    connector = self.connector_config
+
+    # Priority-based auth provider selection
+    if RAZ.IS_ENABLED.get() and connector.auth_type == "raz":
+      return RazAuthProvider(connector, self.user)
+    elif conf_idbroker.is_idbroker_enabled("s3a") and connector.auth_type == "idbroker":
+      return IDBrokerAuthProvider(connector, self.user)
+    elif connector.auth_type == "iam" and connector.iam_role:
+      return IAMAuthProvider(connector, self.user)
+    else:
+      return KeyAuthProvider(connector, self.user)
+
+  def _create_session(self) -> Session:
+    """Create boto3 session with credentials from auth provider"""
+    session_kwargs = self.auth_provider.get_session_kwargs()
+    return boto3.Session(**session_kwargs)
+
+  def _create_client(self) -> Any:
+    """Create boto3 S3 client"""
+    return self.session.client("s3", config=self.client_config, endpoint_url=self.connector_config.endpoint)
+
+  def _create_resource(self) -> Any:
+    """Create boto3 S3 resource"""
+    return self.session.resource("s3", config=self.client_config, endpoint_url=self.connector_config.endpoint)
+
+  def get_credentials(self) -> Optional[Credentials]:
+    """Get current credentials"""
+    return self.session.get_credentials()
+
+  def get_delegation_token(self) -> Optional[str]:
+    """Get delegation token (not supported for AWS)"""
+    return None
+
+  def get_region(self, bucket: str) -> str:
+    """
+    Get region for a bucket with smart bucket config support.
+    Checks:
+    1. Bucket-specific region from bucket_configs
+    2. Bucket location constraint (AWS API)
+    3. Connector default region
+    4. System default region
+    """
+    # Check bucket-specific region from bucket_configs first
+    if bucket and self.connector_config.bucket_configs:
+      bucket_config = self.connector_config.bucket_configs.get(bucket)
+      if bucket_config and bucket_config.region:
+        LOG.debug(f"Using bucket-specific region '{bucket_config.region}' for bucket '{bucket}'")
+        return bucket_config.region
+
+    try:
+      # Try to get region from AWS API bucket location
+      response = self.s3_client.get_bucket_location(Bucket=bucket)
+      region = response.get("LocationConstraint")
+
+      # Handle special cases
+      if region is None:
+        # US East 1 returns None
+        return "us-east-1"
+      elif region == "EU":
+        # Legacy EU region
+        return "eu-west-1"
+
+      LOG.debug(f"Detected bucket '{bucket}' in region '{region}' via AWS API")
+      return region
+    except ClientError as e:
+      LOG.debug(f"Could not detect region for bucket {bucket} via AWS API: {e}")
+      # Fall back to connector default or system default
+      return self.connector_config.region or DEFAULT_REGION

+ 128 - 0
desktop/core/src/desktop/lib/fs/s3/clients/base.py

@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from typing import Any, Dict, Optional, TYPE_CHECKING
+
+from boto3.session import Session
+from botocore.client import Config
+from botocore.credentials import Credentials
+
+from desktop.lib.fs.s3.constants import CLIENT_CONFIG, TRANSFER_CONFIG
+
+if TYPE_CHECKING:
+  from desktop.lib.fs.s3.conf_utils import ConnectorConfig
+
+
+class S3AuthProvider(ABC):
+  """
+  Base interface for S3 authentication providers.
+  Each auth method (key, IAM, RAZ) must implement this interface.
+  """
+
+  def __init__(self, connector_config: "ConnectorConfig", user: str):
+    self.connector_config = connector_config
+    self.user = user
+
+  @abstractmethod
+  def get_credentials(self) -> Dict[str, Any]:
+    """
+    Get credentials for S3 access.
+    Returns dict with keys:
+    - access_key_id
+    - secret_access_key
+    - session_token (optional)
+    - expiration (optional)
+    """
+    pass
+
+  @abstractmethod
+  def get_session_kwargs(self) -> Dict[str, Any]:
+    """Get kwargs for creating boto3 session"""
+    pass
+
+  @abstractmethod
+  def refresh(self) -> None:
+    """Refresh credentials if needed"""
+    pass
+
+
+class S3ClientInterface(ABC):
+  """
+  Base interface for S3 clients. All provider-specific clients must implement this interface.
+  """
+
+  def __init__(self, connector_config: "ConnectorConfig", user: str):
+    self.connector_config = connector_config
+    self.user = user
+
+    # Initialize auth provider
+    self.auth_provider = self._create_auth_provider()
+
+    # Initialize boto3 client config
+    self.client_config = Config(
+      **{
+        **CLIENT_CONFIG,
+        "region_name": connector_config.region,
+        "s3": {
+          "addressing_style": "path"  # Use path style for compatibility
+        },
+      }
+    )
+
+    # Initialize transfer config
+    self.transfer_config = TRANSFER_CONFIG.copy()
+
+    # Initialize session and clients
+    self.session = self._create_session()
+    self.s3_client = self._create_client()
+    self.s3_resource = self._create_resource()
+
+  @abstractmethod
+  def _create_auth_provider(self) -> S3AuthProvider:
+    """Create and return appropriate auth provider"""
+    pass
+
+  @abstractmethod
+  def _create_session(self) -> Session:
+    """Create and return a boto3 session with appropriate credentials"""
+    pass
+
+  @abstractmethod
+  def _create_client(self) -> Any:
+    """Create and return a boto3 S3 client"""
+    pass
+
+  @abstractmethod
+  def _create_resource(self) -> Any:
+    """Create and return a boto3 S3 resource"""
+    pass
+
+  @abstractmethod
+  def get_credentials(self) -> Optional[Credentials]:
+    """Get credentials for the client"""
+    pass
+
+  @abstractmethod
+  def get_delegation_token(self) -> Optional[str]:
+    """Get delegation token if supported"""
+    pass
+
+  @abstractmethod
+  def get_region(self, bucket: str) -> str:
+    """Get region for a bucket"""
+    pass

+ 73 - 0
desktop/core/src/desktop/lib/fs/s3/clients/factory.py

@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from desktop.lib.fs.s3.clients.aws import AWSS3Client
+from desktop.lib.fs.s3.clients.base import S3ClientInterface
+from desktop.lib.fs.s3.clients.generic import GenericS3Client
+from desktop.lib.fs.s3.conf_utils import ConnectorConfig, get_all_connectors, get_connector
+
+LOG = logging.getLogger()
+
+
+class S3ClientFactory:
+  """Simplified factory for creating S3 clients using connector architecture"""
+
+  @classmethod
+  def get_client_for_connector(cls, connector_id: str, user: str) -> S3ClientInterface:
+    """
+    Get S3 client instance for a specific connector.
+
+    Args:
+      connector_id: ID of the connector
+      user: Username for the client
+
+    Returns:
+      S3ClientInterface instance configured for the connector
+
+    Raises:
+      ValueError: If connector not found
+    """
+    try:
+      connector = get_connector(connector_id)
+
+      if not connector:
+        available = list(get_all_connectors().keys())
+        raise ValueError(f"Unknown connector ID: {connector_id}. Available connectors: {available}")
+
+      return cls._create_client_for_connector(connector, user)
+
+    except Exception as e:
+      LOG.error(f"Failed to create S3 client for connector '{connector_id}': {e}")
+      raise
+
+  @classmethod
+  def _create_client_for_connector(cls, connector: ConnectorConfig, user: str) -> S3ClientInterface:
+    """Create appropriate client based on connector provider type"""
+    provider_type = connector.provider.lower()
+
+    try:
+      if provider_type == "aws":
+        return AWSS3Client(connector, user)
+      elif provider_type in ("generic", "netapp", "dell"):
+        return GenericS3Client(connector, user)
+      else:
+        raise ValueError(f"Unknown provider type: {provider_type}")
+    except Exception as e:
+      LOG.error(f"Failed to create {provider_type} client: {e}")
+      raise

+ 147 - 0
desktop/core/src/desktop/lib/fs/s3/clients/generic.py

@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Any, Dict, Optional, TYPE_CHECKING
+
+import boto3
+from boto3.session import Session
+from botocore.credentials import Credentials
+
+from desktop.conf import RAZ
+from desktop.lib.fs.s3.clients.auth.key import KeyAuthProvider
+from desktop.lib.fs.s3.clients.auth.raz import RazAuthProvider
+from desktop.lib.fs.s3.clients.base import S3AuthProvider, S3ClientInterface
+from desktop.lib.fs.s3.constants import DEFAULT_REGION
+
+if TYPE_CHECKING:
+  from desktop.lib.fs.s3.conf_utils import ConnectorConfig
+
+LOG = logging.getLogger()
+
+
+class GenericS3Client(S3ClientInterface):
+  """
+  Generic S3 client for S3-compatible storage systems.
+  Supports any storage that implements the S3 API (Netapp, Dell, etc).
+  """
+
+  def __init__(self, connector_config: "ConnectorConfig", user: str):
+    super().__init__(connector_config, user)
+
+    # Override client config for generic providers
+    self.client_config.signature_version = self._get_signature_version()
+    self.client_config.s3.update(
+      {
+        "addressing_style": "path",  # Always use path style
+        "payload_signing_enabled": True,
+      }
+    )
+
+    # Apply provider-specific settings
+    self._setup_provider_specific_config()
+
+  def _create_auth_provider(self) -> S3AuthProvider:
+    """Create appropriate auth provider for generic providers"""
+    connector = self.connector_config
+
+    # Support RAZ authentication for generic providers
+    if RAZ.IS_ENABLED.get() and connector.auth_type == "raz":
+      return RazAuthProvider(connector, self.user)
+    else:
+      # Default to key auth for generic providers
+      return KeyAuthProvider(connector, self.user)
+
+  def _get_signature_version(self) -> str:
+    """Get signature version with option to override via configuration"""
+    options = self.connector_config.options or {}
+
+    # Allow override via options for legacy systems
+    return options.get("signature_version", "s3v4")
+
+  def _setup_provider_specific_config(self) -> None:
+    """Setup provider-specific configurations"""
+    provider = self.connector_config.provider.lower()
+    options = self.connector_config.options or {}
+
+    if provider == "netapp":
+      self._setup_netapp_config(options)
+    elif provider == "dell":
+      self._setup_dell_config(options)
+    # Add more providers as needed
+
+  def _setup_netapp_config(self, options: Dict[str, Any]) -> None:
+    """Setup Netapp StorageGRID specific config"""
+    # SSL verification
+    if "ssl_verify" in options:
+      self.client_config.verify = options["ssl_verify"]
+
+    # Custom headers
+    if "custom_headers" in options:
+      self.client_config.s3["custom_headers"] = options["custom_headers"]
+
+  def _setup_dell_config(self, options: Dict[str, Any]) -> None:
+    """Setup Dell ECS specific config"""
+    # Namespace support
+    if "namespace" in options:
+      self.client_config.s3["namespace"] = options["namespace"]
+
+    # Multipart settings
+    if "multipart_threshold" in options:
+      self.transfer_config["multipart_threshold"] = options["multipart_threshold"]
+    if "multipart_chunksize" in options:
+      self.transfer_config["multipart_chunksize"] = options["multipart_chunksize"]
+
+  def _create_session(self) -> Session:
+    """Create boto3 session"""
+    session_kwargs = self.auth_provider.get_session_kwargs()
+    return boto3.Session(**session_kwargs)
+
+  def _create_client(self) -> Any:
+    """Create boto3 S3 client"""
+    return self.session.client(
+      "s3", config=self.client_config, endpoint_url=self.connector_config.endpoint, verify=getattr(self.client_config, "verify", None)
+    )
+
+  def _create_resource(self) -> Any:
+    """Create boto3 S3 resource"""
+    return self.session.resource(
+      "s3", config=self.client_config, endpoint_url=self.connector_config.endpoint, verify=getattr(self.client_config, "verify", None)
+    )
+
+  def get_credentials(self) -> Optional[Credentials]:
+    """Get current credentials"""
+    return self.session.get_credentials()
+
+  def get_delegation_token(self) -> Optional[str]:
+    """Get delegation token (not supported for generic providers)"""
+    return None
+
+  def get_region(self, bucket: str) -> str:
+    """
+    Get region for a bucket with smart bucket config support.
+    Most S3-compatible systems don't use regions,
+    so return configured region or default.
+    """
+    # Check bucket-specific region from bucket_configs first
+    if bucket and self.connector_config.bucket_configs:
+      bucket_config = self.connector_config.bucket_configs.get(bucket)
+      if bucket_config and bucket_config.region:
+        return bucket_config.region
+
+    # Fall back to connector default region
+    return self.connector_config.region or DEFAULT_REGION

+ 724 - 0
desktop/core/src/desktop/lib/fs/s3/conf_utils.py

@@ -0,0 +1,724 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Configuration utilities for the simplified S3 connector system.
+Uses bucket-embedded configuration for clean, simple setup.
+"""
+
+import logging
+from dataclasses import dataclass
+from typing import Dict, List, Optional
+from urllib.parse import urlparse
+
+from aws.conf import AWS_ACCOUNTS, is_raz_s3 as legacy_is_raz_s3
+from desktop.conf import RAZ, STORAGE_CONNECTORS, USE_STORAGE_CONNECTORS
+from desktop.lib.idbroker import conf as conf_idbroker
+from filebrowser.conf import REMOTE_STORAGE_HOME
+
+LOG = logging.getLogger()
+
+
+class ConfigurationError(Exception):
+  """Raised when configuration validation fails"""
+
+  pass
+
+
+def _load_legacy_aws_accounts_as_connectors() -> Dict[str, "ConnectorConfig"]:
+  """
+  Convert legacy AWS_ACCOUNTS configurations to ConnectorConfig format.
+  This provides backward compatibility for existing AWS configurations.
+
+  Returns:
+    Dict mapping connector IDs to ConnectorConfig objects
+  """
+  try:
+    connectors = {}
+
+    for account_id in AWS_ACCOUNTS.keys():
+      aws_config = AWS_ACCOUNTS[account_id]
+
+      # Skip accounts without basic configuration
+      if not aws_config.get_raw():
+        continue
+
+      try:
+        connector = ConnectorConfig(
+          id=account_id,
+          provider=_detect_provider_from_aws_config(aws_config),
+          auth_type=_detect_auth_type_from_aws_config(aws_config),
+          region=aws_config.REGION.get(),
+          endpoint=_convert_aws_endpoint_to_new_format(aws_config),
+          access_key_id=aws_config.ACCESS_KEY_ID.get(),
+          secret_key=aws_config.SECRET_ACCESS_KEY.get(),
+          bucket_configs=_extract_bucket_configs_from_aws(aws_config),
+          options=_convert_aws_options_to_new_format(aws_config),
+        )
+
+        connectors[account_id] = connector
+        LOG.debug(
+          f"Converted legacy AWS account '{account_id}' to storage connector: provider={connector.provider}, auth={connector.auth_type}"
+        )
+
+      except Exception as e:
+        LOG.warning(f"Failed to convert legacy AWS account '{account_id}': {e}")
+        continue
+
+    if connectors:
+      LOG.info(f"Auto-converted {len(connectors)} legacy AWS accounts to storage connectors")
+
+    return connectors
+  except Exception as e:
+    LOG.error(f"Failed to load legacy AWS accounts: {e}")
+    return {}
+
+
+def _detect_provider_from_aws_config(aws_config) -> str:
+  """Detect provider type from AWS config"""
+  # Check if it's a standard AWS endpoint
+  host = aws_config.HOST.get()
+  if not host or "amazonaws.com" in host:
+    return "aws"
+
+  # Custom endpoint means generic S3-compatible provider
+  return "generic"
+
+
+def _detect_auth_type_from_aws_config(aws_config) -> str:
+  """
+  Detect authentication type from AWS config using priority order:
+  1. RAZ (highest priority - global setting)
+  2. IDBroker (global setting)
+  3. IAM (if environment credentials allowed)
+  4. Key (static keys - default)
+  """
+  try:
+    # Priority 1: RAZ authentication
+    if legacy_is_raz_s3():
+      return "raz"
+
+    # Priority 2: IDBroker authentication
+    if conf_idbroker.is_idbroker_enabled("s3a"):
+      return "idbroker"
+
+    # Priority 3: IAM roles (if environment credentials are allowed)
+    if aws_config.ALLOW_ENVIRONMENT_CREDENTIALS.get():
+      return "iam"
+
+    # Priority 4: Static key authentication (default)
+    return "key"
+
+  except Exception as e:
+    LOG.warning(f"Failed to detect auth type, defaulting to 'key': {e}")
+    return "key"
+
+
+def _convert_aws_endpoint_to_new_format(aws_config) -> Optional[str]:
+  """Convert AWS HOST config to new endpoint format"""
+  host = aws_config.HOST.get()
+  if not host:
+    return None
+
+  # Ensure proper URL format
+  if not host.startswith(("http://", "https://")):
+    # Default to HTTPS for security, but respect IS_SECURE setting
+    is_secure = aws_config.IS_SECURE.get() if hasattr(aws_config, "IS_SECURE") else True
+    protocol = "https" if is_secure else "http"
+    host = f"{protocol}://{host}"
+
+  return host
+
+
+def _extract_bucket_configs_from_aws(aws_config) -> Dict[str, "BucketConfig"]:
+  """
+  Extract bucket configurations from AWS config following old priority logic:
+  1. REMOTE_STORAGE_HOME (global, highest priority)
+  2. DEFAULT_HOME_PATH (per AWS account)
+  """
+  # Priority 1: Check REMOTE_STORAGE_HOME (global override, same logic as old get_s3_home_directory)
+  home_path = None
+  try:
+    if hasattr(REMOTE_STORAGE_HOME, "get") and REMOTE_STORAGE_HOME.get():
+      remote_home = REMOTE_STORAGE_HOME.get()
+      if remote_home.startswith("s3a://"):
+        home_path = remote_home
+        LOG.debug(f"Using REMOTE_STORAGE_HOME for bucket config: {home_path}")
+  except Exception as e:
+    LOG.warning(f"Failed to check REMOTE_STORAGE_HOME: {e}")
+
+  # Priority 2: Fall back to DEFAULT_HOME_PATH (per AWS account)
+  if not home_path:
+    default_home = aws_config.DEFAULT_HOME_PATH.get()
+    if default_home and default_home.startswith("s3a://"):
+      home_path = default_home
+      LOG.debug(f"Using DEFAULT_HOME_PATH for bucket config: {home_path}")
+
+  # If still no home path configured, return empty dict
+  if not home_path:
+    return {}
+
+  try:
+    # Parse s3a://bucket-name/path/to/home/
+    bucket_name = extract_bucket_from_path(home_path)
+    if not bucket_name:
+      return {}
+
+    # Extract the path part and make it relative
+    if home_path.startswith(("s3a://", "s3://")):
+      # Remove s3a://bucket-name/ prefix to get relative path
+      scheme_and_bucket = f"s3a://{bucket_name}/"
+      if home_path.startswith(scheme_and_bucket):
+        relative_path = home_path[len(scheme_and_bucket) :]
+      else:
+        relative_path = None
+    else:
+      relative_path = home_path
+
+    return {
+      bucket_name: BucketConfig(
+        name=bucket_name,
+        default_home_path=relative_path if relative_path else None,
+        region=None,  # Will be inherited from connector
+        options=None,
+      )
+    }
+
+  except Exception as e:
+    LOG.warning(f"Failed to extract bucket config from home path '{home_path}': {e}")
+    return {}
+
+
+def _convert_aws_options_to_new_format(aws_config) -> Dict[str, any]:
+  """Convert AWS-specific options to new options format"""
+  options = {}
+
+  try:
+    # Proxy settings
+    if aws_config.PROXY_ADDRESS.get():
+      options["proxy_address"] = aws_config.PROXY_ADDRESS.get()
+    if aws_config.PROXY_PORT.get():
+      options["proxy_port"] = aws_config.PROXY_PORT.get()
+    if aws_config.PROXY_USER.get():
+      options["proxy_user"] = aws_config.PROXY_USER.get()
+    if aws_config.PROXY_PASS.get():
+      options["proxy_pass"] = aws_config.PROXY_PASS.get()
+
+    # SSL/Security settings
+    if hasattr(aws_config, "IS_SECURE"):
+      options["is_secure"] = aws_config.IS_SECURE.get()
+
+    # Calling format (for compatibility)
+    if hasattr(aws_config, "CALLING_FORMAT"):
+      calling_format = aws_config.CALLING_FORMAT.get()
+      if calling_format and calling_format != "boto.s3.connection.OrdinaryCallingFormat":
+        options["calling_format"] = calling_format
+
+    # Session token (if available)
+    if aws_config.SECURITY_TOKEN.get():
+      options["security_token"] = aws_config.SECURITY_TOKEN.get()
+
+    # Environment credentials setting
+    if aws_config.ALLOW_ENVIRONMENT_CREDENTIALS.get() is not None:
+      options["allow_environment_credentials"] = aws_config.ALLOW_ENVIRONMENT_CREDENTIALS.get()
+
+  except Exception as e:
+    LOG.warning(f"Failed to convert some AWS options: {e}")
+
+  return options if options else {}
+
+
+@dataclass
+class BucketConfig:
+  """Configuration for a specific bucket within a connector"""
+
+  name: str
+  default_home_path: Optional[str] = None
+  region: Optional[str] = None
+  options: Optional[Dict[str, any]] = None
+
+  def get_effective_home_path(self, user: str = None) -> str:
+    """
+    Get effective home path for this bucket, handling relative paths and user context.
+
+    Args:
+      user: Username for RAZ user directory handling
+
+    Returns:
+      Absolute S3 path
+    """
+    if not self.default_home_path:
+      # Default to bucket root
+      path = f"s3a://{self.name}/"
+    elif self.default_home_path.startswith("s3a://"):
+      # Already absolute
+      path = self.default_home_path
+    else:
+      # Relative path - make it absolute
+      path = f"s3a://{self.name}/{self.default_home_path.lstrip('/')}"
+      if not path.endswith("/"):
+        path += "/"
+
+    # Handle RAZ user directory logic
+    if user and RAZ.IS_ENABLED.get():
+      from desktop.models import _handle_user_dir_raz
+
+      path = _handle_user_dir_raz(user, path)
+
+    return path
+
+
+@dataclass
+class ConnectorConfig:
+  """Simplified S3 connector configuration"""
+
+  id: str
+  provider: str
+  auth_type: str
+  region: Optional[str] = None
+  endpoint: Optional[str] = None
+  access_key_id: Optional[str] = None
+  secret_key: Optional[str] = None
+  iam_role: Optional[str] = None
+  bucket_configs: Optional[Dict[str, BucketConfig]] = None
+  options: Optional[Dict[str, any]] = None
+
+  def get_bucket_config(self, bucket_name: str) -> BucketConfig:
+    """Get configuration for a specific bucket, creating default if not found"""
+    if self.bucket_configs and bucket_name in self.bucket_configs:
+      return self.bucket_configs[bucket_name]
+
+    # Return default bucket config
+    return BucketConfig(name=bucket_name)
+
+
+class S3ConfigManager:
+  """
+  Simplified S3 connector configuration manager.
+  No more sources, just connectors with embedded bucket configs.
+  """
+
+  def __init__(self):
+    self._connectors: Dict[str, ConnectorConfig] = {}
+    self._loaded = False
+
+  def load_configurations(self) -> None:
+    """Load and validate connector configurations"""
+    if self._loaded:
+      return
+
+    try:
+      self._load_connectors()
+      self._validate_configurations()
+
+      self._loaded = True
+      LOG.info(f"Successfully loaded {len(self._connectors)} S3 connectors")
+
+    except Exception as e:
+      LOG.error(f"Failed to load S3 configurations: {e}")
+      raise ConfigurationError(f"Configuration loading failed: {e}")
+
+  def _load_connectors(self) -> None:
+    """Load connector configurations from STORAGE_CONNECTORS and legacy AWS_ACCOUNTS"""
+
+    # First, load new STORAGE_CONNECTORS (if any)
+    if STORAGE_CONNECTORS.keys():
+      for connector_id in STORAGE_CONNECTORS.keys():
+        connector_conf = STORAGE_CONNECTORS[connector_id]
+        try:
+          # Parse bucket configurations
+          bucket_configs = {}
+          bucket_configs_raw = connector_conf.BUCKET_CONFIGS.get()
+
+          for bucket_name, bucket_conf_dict in bucket_configs_raw.items():
+            bucket_configs[bucket_name] = BucketConfig(
+              name=bucket_name,
+              default_home_path=bucket_conf_dict.get("default_home_path"),
+              region=bucket_conf_dict.get("region"),
+              options=bucket_conf_dict.get("options"),
+            )
+
+          connector = ConnectorConfig(
+            id=connector_id,
+            provider=connector_conf.PROVIDER.get(),
+            auth_type=connector_conf.AUTH_TYPE.get(),
+            region=connector_conf.REGION.get(),
+            endpoint=connector_conf.ENDPOINT.get(),
+            access_key_id=connector_conf.ACCESS_KEY_ID.get(),
+            secret_key=connector_conf.SECRET_KEY.get(),
+            iam_role=connector_conf.IAM_ROLE.get(),
+            bucket_configs=bucket_configs,
+            options=connector_conf.OPTIONS.get(),
+          )
+
+          self._connectors[connector_id] = connector
+
+          bucket_info = f"with {len(bucket_configs)} buckets" if bucket_configs else "no bucket configs"
+          LOG.debug(f"Loaded connector: {connector_id} ({connector.provider}, {connector.auth_type}) {bucket_info}")
+
+        except Exception as e:
+          raise ConfigurationError(f"Failed to load connector '{connector_id}': {e}")
+    else:
+      LOG.debug("No STORAGE_CONNECTORS configuration found")
+
+    # Second, auto-convert legacy AWS_ACCOUNTS (only when storage connectors feature is enabled)
+    if USE_STORAGE_CONNECTORS.get():
+      legacy_connectors = _load_legacy_aws_accounts_as_connectors()
+
+      for connector_id, connector in legacy_connectors.items():
+        # Don't override new STORAGE_CONNECTORS configs
+        if connector_id not in self._connectors:
+          self._connectors[connector_id] = connector
+        else:
+          LOG.debug(f"Skipping legacy AWS account '{connector_id}' - already configured in STORAGE_CONNECTORS")
+
+  def _validate_configurations(self) -> None:
+    """Validate connector configurations"""
+    errors = []
+
+    for connector_id, connector in self._connectors.items():
+      try:
+        self._validate_connector(connector)
+      except ConfigurationError as e:
+        errors.append(f"Connector '{connector_id}': {e}")
+
+    if errors:
+      raise ConfigurationError("Configuration validation failed:\n" + "\n".join(f"  - {err}" for err in errors))
+
+  def _validate_connector(self, connector: ConnectorConfig) -> None:
+    """Validate a single connector configuration"""
+    # Rule: AWS provider requires region
+    if connector.provider == "aws" and not connector.region:
+      raise ConfigurationError("AWS provider requires 'region' to be specified")
+
+    # Rule: Non-AWS providers require endpoint
+    if connector.provider != "aws" and not connector.endpoint:
+      raise ConfigurationError(f"Provider '{connector.provider}' requires 'endpoint' to be specified")
+
+    # Rule: Key auth requires access_key_id and secret_key
+    if connector.auth_type == "key":
+      if not connector.access_key_id:
+        raise ConfigurationError("Key authentication requires 'access_key_id'")
+      if not connector.secret_key:
+        raise ConfigurationError("Key authentication requires 'secret_key'")
+
+    # Rule: RAZ auth requires global RAZ configuration
+    if connector.auth_type == "raz":
+      if not RAZ.IS_ENABLED.get():
+        raise ConfigurationError("RAZ authentication requires global [desktop] [[raz]] configuration to be enabled")
+
+    # Rule: IDBroker auth requires global IDBroker configuration
+    if connector.auth_type == "idbroker":
+      if not conf_idbroker.is_idbroker_enabled("s3a"):
+        raise ConfigurationError("IDBroker authentication requires global IDBroker configuration in core-site.xml")
+
+  def get_connector(self, connector_id: str) -> Optional[ConnectorConfig]:
+    """Get connector by ID"""
+    self.load_configurations()
+    return self._connectors.get(connector_id)
+
+  def get_all_connectors(self) -> Dict[str, ConnectorConfig]:
+    """Get all connectors"""
+    self.load_configurations()
+    return self._connectors.copy()
+
+
+def get_all_connectors() -> Dict[str, ConnectorConfig]:
+  """Get all configured S3 connectors"""
+  return S3ConfigManager().get_all_connectors()
+
+
+def get_connector(connector_id: str) -> Optional[ConnectorConfig]:
+  """Get specific connector by ID"""
+  return S3ConfigManager().get_connector(connector_id)
+
+
+def validate_s3_configuration() -> List[str]:
+  """
+  Validate S3 configuration and return list of validation errors.
+  Used by Hue's configuration validation system.
+  """
+  try:
+    S3ConfigManager().load_configurations()
+    return []
+  except ConfigurationError as e:
+    return [str(e)]
+  except Exception as e:
+    return [f"Unexpected error validating S3 configuration: {e}"]
+
+
+def get_s3_home_directory(user: Optional[str] = None, connector_id: str = None, bucket_name: str = None) -> str:
+  """
+  Get S3 home directory with smart defaulting logic.
+  Priority is already handled during config loading (REMOTE_STORAGE_HOME vs DEFAULT_HOME_PATH).
+
+  Args:
+    user: Optional username for RAZ handling
+    connector_id: Optional connector ID (defaults to 'default' or first available)
+    bucket_name: Optional bucket name (uses smart bucket selection if not provided)
+
+  Returns:
+    S3 home directory path
+  """
+  try:
+    # Smart connector defaulting
+    if not connector_id:
+      connector_id = get_default_connector()
+
+    connector = get_connector(connector_id)
+    if not connector:
+      LOG.error(f"Connector '{connector_id}' not found, defaulting to s3a://")
+      return "s3a://"
+
+    # Smart bucket defaulting
+    if not bucket_name:
+      bucket_name = _get_default_bucket_for_connector(connector)
+
+    if bucket_name:
+      bucket_config = connector.get_bucket_config(bucket_name)
+      return bucket_config.get_effective_home_path(user)
+
+    # No bucket available, return generic path
+    return "s3a://"
+
+  except Exception as e:
+    LOG.error(f"Failed to get S3 home directory, defaulting to s3a://: {e}")
+    return "s3a://"
+
+
+def get_default_connector() -> str:
+  """
+  Get the default connector ID using smart selection logic.
+
+  Logic:
+  1. Prefer 'default' if it exists
+  2. Otherwise return first available
+  3. Fall back to 'default' string if no connectors (for error handling)
+  """
+  try:
+    connectors = get_all_connectors()
+
+    # Prefer 'default' if it exists
+    if "default" in connectors:
+      LOG.debug("Using 'default' connector")
+      return "default"
+
+    # Otherwise return first available
+    if connectors:
+      first_id = next(iter(connectors.keys()))
+      LOG.debug(f"No 'default' connector found, using first available: '{first_id}'")
+      return first_id
+
+    # No connectors configured - return 'default' for error handling
+    LOG.warning("No connectors configured, returning 'default' (will likely fail)")
+    return "default"
+
+  except Exception as e:
+    LOG.warning(f"Failed to get default connector: {e}")
+    return "default"
+
+
+def _get_default_bucket_for_connector(connector: ConnectorConfig) -> Optional[str]:
+  """
+  Get default bucket for a connector using smart selection logic.
+
+  Logic:
+  - Multiple buckets: Return None (let caller handle bucket-specific logic)
+  - Single bucket: Return that bucket name
+  - No buckets: Return None
+  """
+  if not connector.bucket_configs:
+    LOG.debug(f"Connector '{connector.id}' has no bucket configs")
+    return None
+
+  bucket_names = list(connector.bucket_configs.keys())
+
+  if len(bucket_names) == 1:
+    # Single bucket - return it as default
+    default_bucket = bucket_names[0]
+    LOG.debug(f"Connector '{connector.id}' has single bucket '{default_bucket}', using as default")
+    return default_bucket
+  elif len(bucket_names) > 1:
+    # Multiple buckets - no clear default
+    LOG.debug(f"Connector '{connector.id}' has {len(bucket_names)} buckets, no clear default")
+    return None
+
+  return None
+
+
+def extract_bucket_from_path(path: str) -> Optional[str]:
+  """Extract bucket name from S3 path"""
+  try:
+    if not path or not path.startswith(("s3a://", "s3://")):
+      return None
+
+    # Parse s3a://bucket-name/path/to/file
+    parsed = urlparse(path)
+    if parsed.netloc:
+      return parsed.netloc
+    elif parsed.path:
+      # Handle s3a:///bucket-name/path format
+      path_parts = parsed.path.strip("/").split("/", 1)
+      return path_parts[0] if path_parts[0] else None
+
+    return None
+  except Exception as e:
+    LOG.error(f"Failed to extract bucket from path: {e}")
+    return None
+
+
+def get_default_bucket_home_path(connector_id: str = None, user: str = None) -> str:
+  """
+  Get default home path when no specific bucket is provided.
+
+  Smart Logic:
+  - Single bucket configured: Use that bucket's home path
+  - Multiple buckets: Return s3a:// (generic)
+  - No buckets: Return s3a:// (generic)
+  """
+  try:
+    if not connector_id:
+      connector_id = get_default_connector()
+
+    connector = get_connector(connector_id)
+    if not connector:
+      return "s3a://"
+
+    # Check if connector has buckets configured
+    if not connector.bucket_configs:
+      LOG.debug(f"Connector '{connector_id}' has no bucket configs, using s3a://")
+      return "s3a://"
+
+    bucket_names = list(connector.bucket_configs.keys())
+
+    if len(bucket_names) == 1:
+      # Single bucket - use its home path as default
+      bucket_name = bucket_names[0]
+      bucket_config = connector.get_bucket_config(bucket_name)
+      home_path = bucket_config.get_effective_home_path(user)
+      LOG.debug(f"Using single bucket '{bucket_name}' home path: {home_path}")
+      return home_path
+
+    elif len(bucket_names) > 1:
+      # Multiple buckets - no clear default, use generic
+      LOG.debug(f"Connector '{connector_id}' has {len(bucket_names)} buckets, using generic s3a://")
+      return "s3a://"
+
+    return "s3a://"
+
+  except Exception as e:
+    LOG.error(f"Failed to get default bucket home path: {e}")
+    return "s3a://"
+
+
+def is_enabled() -> bool:
+  """
+  Check if Storage Connector S3 system is enabled.
+  Equivalent to aws.conf.is_enabled() for the new system.
+
+  Returns:
+    True if Storage Connectors are available via:
+    - STORAGE_CONNECTORS configuration
+    - Legacy AWS_ACCOUNTS (when feature flag enabled)
+    - Global RAZ/IDBroker systems
+  """
+  try:
+    # Check if Storage Connectors are directly configured
+    if STORAGE_CONNECTORS.keys():
+      return True
+
+    # Check if legacy AWS accounts can be auto-converted
+    if USE_STORAGE_CONNECTORS.get():
+      legacy_connectors = _load_legacy_aws_accounts_as_connectors()
+      if legacy_connectors:
+        return True
+
+    # Check if global RAZ or IDBroker provide S3 access (same pattern as legacy)
+    if is_raz_s3():
+      return True
+
+    if conf_idbroker.is_idbroker_enabled("s3a"):
+      return True
+
+    return False
+
+  except Exception as e:
+    LOG.warning(f"Failed to check Storage Connector S3 availability: {e}")
+    return False
+
+
+def has_s3_access(user) -> bool:
+  """
+  Check if user has access to Storage Connector S3 system.
+  Equivalent to aws.conf.has_s3_access() for the new system.
+
+  Args:
+    user: User object to check permissions for
+
+  Returns:
+    True if user has S3 access via Storage Connectors
+  """
+  try:
+    # Same user validation logic as legacy
+    if not (user.is_authenticated and user.is_active):
+      return False
+
+    from desktop.auth.backend import is_admin
+
+    # Admin always has access
+    if is_admin(user):
+      return True
+
+    # Check if user has S3 permission
+    if user.has_hue_permission(action="s3_access", app="filebrowser"):
+      return True
+
+    # RAZ users get access if system is enabled
+    if is_raz_s3():
+      return True
+
+    return False
+
+  except Exception as e:
+    LOG.warning(f"Failed to check Storage Connector S3 access for user {user}: {e}")
+    return False
+
+
+def is_raz_s3() -> bool:
+  """
+  Check if RAZ S3 is enabled for Storage Connector system.
+  Equivalent to aws.conf.is_raz_s3() for the new system.
+
+  Returns:
+    True if RAZ is enabled AND at least one storage connector exists
+  """
+  try:
+    # Must have RAZ enabled globally
+    if not RAZ.IS_ENABLED.get():
+      return False
+
+    # Must have at least one storage connector (like legacy AWS_ACCOUNTS check)
+    connectors = get_all_connectors()
+    if not connectors:
+      return False
+
+    # RAZ can work with any connector, so any connector + RAZ = RAZ S3 enabled for now
+    return True
+
+  except Exception as e:
+    LOG.warning(f"Failed to check Storage Connector RAZ S3 status: {e}")
+    return False

+ 54 - 0
desktop/core/src/desktop/lib/fs/s3/constants.py

@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+# Multipart upload settings
+DEFAULT_CHUNK_SIZE = 1024 * 1024 * 8  # 8MiB
+MULTIPART_THRESHOLD = DEFAULT_CHUNK_SIZE  # Start multipart upload if file size > threshold
+MAX_POOL_CONNECTIONS = 10
+MAX_RETRIES = 3
+
+# Timeouts (in seconds)
+CONNECT_TIMEOUT = 120
+READ_TIMEOUT = 120
+
+# S3 specific constants
+S3_DELIMITER = "/"
+DEFAULT_REGION = "us-east-1"
+
+# Error retry settings
+RETRY_EXCEPTIONS = ("RequestTimeout", "ConnectionError", "HTTPClientError")
+
+# Client config defaults
+CLIENT_CONFIG = {
+  "max_pool_connections": MAX_POOL_CONNECTIONS,
+  "connect_timeout": CONNECT_TIMEOUT,
+  "read_timeout": READ_TIMEOUT,
+  "retries": {
+    "max_attempts": MAX_RETRIES,
+    "mode": "standard",  # standard/adaptive
+  },
+}
+
+# Transfer config defaults
+TRANSFER_CONFIG = {
+  "multipart_threshold": MULTIPART_THRESHOLD,
+  "multipart_chunksize": DEFAULT_CHUNK_SIZE,
+  "max_concurrency": MAX_POOL_CONNECTIONS,
+  "use_threads": True,
+}

+ 16 - 0
desktop/core/src/desktop/lib/fs/s3/core/__init__.py

@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+ 243 - 0
desktop/core/src/desktop/lib/fs/s3/core/file.py

@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import logging
+import os
+from typing import Union
+
+from botocore.exceptions import ClientError
+
+from desktop.lib.fs.s3.constants import DEFAULT_CHUNK_SIZE
+from desktop.lib.fs.s3.core.path import S3Path
+
+LOG = logging.getLogger()
+
+
+class S3File:
+  """
+  File-like object for reading from S3.
+  Provides buffered read access with seek support.
+  Only supports read modes ('r' or 'rb').
+  """
+
+  def __init__(self, fs, path: Union[str, S3Path], mode: str = "rb"):
+    """
+    Initialize S3File for reading.
+
+    Args:
+      fs: Parent S3FileSystem instance
+      path: File path
+      mode: File mode ('r' or 'rb' only)
+
+    Raises:
+      ValueError: If mode is not supported
+    """
+    # Only allow read modes
+    if mode not in ("r", "rb"):
+      raise ValueError(f"Unsupported file mode: {mode}. Only 'r' and 'rb' modes are supported")
+
+    self.fs = fs
+    self.path = S3Path.from_path(path) if isinstance(path, str) else path
+    self.mode = mode
+    self.closed = False
+
+    # Initialize read buffer and position tracking
+    self._read_buffer = io.BytesIO()
+    self._buffer_size = DEFAULT_CHUNK_SIZE
+    self._position = 0
+    self._size = None
+
+    # Load file size
+    self._load_size()
+
+  def _load_size(self) -> None:
+    """
+    Load file size for reading.
+
+    Raises:
+      FileNotFoundError: If file does not exist
+      PermissionError: If access is denied
+      ClientError: For other S3 API errors
+    """
+    try:
+      response = self.fs.s3_client.head_object(Bucket=self.path.bucket, Key=self.path.key)
+      self._size = response["ContentLength"]
+    except ClientError as e:
+      if e.response["Error"]["Code"] == "404":
+        raise FileNotFoundError(f"File not found: {self.path}")
+      elif e.response["Error"]["Code"] == "403":
+        raise PermissionError(f"Access denied to file: {self.path}")
+      raise
+
+  def read(self, size: int = -1) -> bytes:
+    """
+    Read from file.
+
+    Args:
+      size: Number of bytes to read (-1 for all)
+
+    Returns:
+      Bytes read
+
+    Raises:
+      ValueError: If file is closed
+      ClientError: For S3 API errors
+    """
+    if self.closed:
+      raise ValueError("I/O operation on closed file")
+
+    # Handle empty files
+    if self._size == 0:
+      return b""
+
+    # Handle full file read
+    if size < 0:
+      size = self._size - self._position
+
+    # Read from buffer first
+    data = self._read_buffer.read(size)
+    read_size = len(data)
+
+    # If we need more data
+    if read_size < size:
+      try:
+        # Calculate range
+        start = self._position + read_size
+        end = start + (size - read_size) - 1
+
+        # Get data from S3
+        response = self.fs.s3_client.get_object(Bucket=self.path.bucket, Key=self.path.key, Range=f"bytes={start}-{end}")
+
+        # Add to buffer
+        new_data = response["Body"].read()
+        self._read_buffer = io.BytesIO(new_data)
+        data += self._read_buffer.read(size - read_size)
+
+      except ClientError as e:
+        if e.response["Error"]["Code"] == "InvalidRange":
+          # Should never happen with our range checks
+          LOG.error(f"Invalid range request: bytes={start}-{end} for file size {self._size}")
+          return data
+        elif e.response["Error"]["Code"] == "404":
+          raise FileNotFoundError(f"File not found: {self.path}")
+        elif e.response["Error"]["Code"] == "403":
+          raise PermissionError(f"Access denied to file: {self.path}")
+        raise
+
+    self._position += len(data)
+    return data
+
+  def seek(self, offset: int, whence: int = os.SEEK_SET) -> int:
+    """
+    Seek to position in file.
+
+    Args:
+      offset: Offset in bytes
+      whence: Reference point (SEEK_SET, SEEK_CUR, SEEK_END)
+
+    Returns:
+      New position
+
+    Raises:
+      ValueError: If file is closed or position invalid
+    """
+    if self.closed:
+      raise ValueError("I/O operation on closed file")
+
+    # Calculate new position
+    if whence == os.SEEK_SET:
+      new_pos = offset
+    elif whence == os.SEEK_CUR:
+      new_pos = self._position + offset
+    elif whence == os.SEEK_END:
+      new_pos = self._size + offset
+    else:
+      raise ValueError(f"Invalid whence value: {whence}")
+
+    # Validate position
+    if new_pos < 0:
+      raise ValueError("Negative seek position")
+
+    # Update position
+    self._position = new_pos
+    return self._position
+
+  def tell(self) -> int:
+    """
+    Get current position in file.
+
+    Returns:
+      Current position
+
+    Raises:
+      ValueError: If file is closed
+    """
+    if self.closed:
+      raise ValueError("I/O operation on closed file")
+    return self._position
+
+  def close(self) -> None:
+    """Close file"""
+    self.closed = True
+
+  def __enter__(self) -> "S3File":
+    """Context manager enter"""
+    return self
+
+  def __exit__(self, exc_type, exc_val, exc_tb) -> None:
+    """Context manager exit"""
+    self.close()
+
+  def __iter__(self) -> "S3File":
+    """Iterator interface"""
+    return self
+
+  def __next__(self) -> bytes:
+    """Get next line"""
+    line = self.readline()
+    if not line:
+      raise StopIteration
+    return line
+
+  def readline(self, size: int = -1) -> bytes:
+    """
+    Read a line from file.
+
+    Args:
+      size: Maximum bytes to read (-1 for no limit)
+
+    Returns:
+      Line of bytes
+
+    Raises:
+      ValueError: If file is closed
+    """
+    if self.closed:
+      raise ValueError("I/O operation on closed file")
+
+    # Read until newline or size
+    line = b""
+    while size < 0 or len(line) < size:
+      byte = self.read(1)
+      if not byte:
+        break
+      line += byte
+      if byte == b"\n":
+        break
+
+    return line

+ 184 - 0
desktop/core/src/desktop/lib/fs/s3/core/path.py

@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+from typing import Optional
+from urllib.parse import unquote, urlparse
+
+from desktop.lib.fs.s3.constants import S3_DELIMITER
+
+
+@dataclass
+class S3Path:
+  """
+  Represents an S3 path with bucket and key components.
+  Handles path parsing, normalization, and manipulation.
+  """
+
+  bucket: Optional[str]
+  key: Optional[str]
+
+  @classmethod
+  def from_path(cls, path: str) -> "S3Path":
+    """
+    Create S3Path from string path.
+
+    Args:
+      path: S3 path (s3://bucket/key or s3a://bucket/key)
+
+    Returns:
+      S3Path instance
+
+    Raises:
+      ValueError: If path is not a valid S3 path
+    """
+    if path in ("s3://", "s3a://"):
+      return cls(bucket=None, key=None)
+
+    parsed = urlparse(path)
+    if parsed.scheme not in ("s3", "s3a"):
+      raise ValueError(f"Invalid S3 path: {path}")
+
+    # Get bucket from netloc if present, otherwise from path
+    bucket = parsed.netloc
+    path_parts = parsed.path.lstrip("/").split("/", 1)
+
+    if not bucket and path_parts:
+      bucket = path_parts[0]
+      key = path_parts[1] if len(path_parts) > 1 else None
+    else:
+      key = parsed.path.lstrip("/") if parsed.path else None
+
+    # Normalize empty strings to None
+    bucket = unquote(bucket) if bucket else None
+    key = unquote(key) if key else None
+
+    return cls(bucket=bucket, key=key)
+
+  def is_root(self) -> bool:
+    """Check if path is root (s3:// or s3a://)"""
+    return self.bucket is None
+
+  def is_bucket(self) -> bool:
+    """Check if path is a bucket (s3://bucket/)"""
+    return self.bucket is not None and self.key is None
+
+  def join(self, *components: str) -> "S3Path":
+    """
+    Join path components.
+
+    Args:
+      *components: Path components to join
+
+    Returns:
+      New S3Path with joined components
+    """
+    if self.is_root() and not components:
+      return self
+
+    # Start with current path components
+    parts = []
+    if self.bucket:
+      parts.append(self.bucket)
+    if self.key:
+      parts.append(self.key.rstrip("/"))
+
+    # Add new components
+    for comp in components:
+      # Skip empty components
+      if not comp:
+        continue
+
+      # Handle absolute paths
+      if comp.startswith("s3://") or comp.startswith("s3a://"):
+        return S3Path.from_path(comp)
+
+      # Add component
+      parts.append(comp.strip("/"))
+
+    # Reconstruct path
+    if not parts:
+      return S3Path(None, None)
+
+    return S3Path(bucket=parts[0], key=S3_DELIMITER.join(parts[1:]) if len(parts) > 1 else None)
+
+  def parent(self) -> "S3Path":
+    """Get parent path"""
+    if self.is_root():
+      return self
+    if self.is_bucket():
+      return S3Path(None, None)
+
+    # Split key into components
+    key_parts = self.key.rstrip("/").split("/")
+    if len(key_parts) == 1:
+      # Key is in bucket root
+      return S3Path(self.bucket, None)
+
+    # Remove last component
+    return S3Path(bucket=self.bucket, key="/".join(key_parts[:-1]))
+
+  def name(self) -> str:
+    """Get path name (last component)"""
+    if self.is_root():
+      return ""
+    if self.is_bucket():
+      return self.bucket
+
+    return self.key.rstrip("/").split("/")[-1]
+
+  def add_suffix(self, suffix: str) -> "S3Path":
+    """Add suffix to path name"""
+    if self.is_root() or self.is_bucket():
+      raise ValueError("Cannot add suffix to root or bucket")
+
+    return S3Path(bucket=self.bucket, key=f"{self.key.rstrip('/')}{suffix}")
+
+  def with_trailing_slash(self) -> "S3Path":
+    """Ensure path ends with slash"""
+    if self.is_root() or self.is_bucket():
+      return self
+
+    if not self.key.endswith("/"):
+      return S3Path(bucket=self.bucket, key=f"{self.key}/")
+    return self
+
+  def without_trailing_slash(self) -> "S3Path":
+    """Remove trailing slash"""
+    if self.is_root() or self.is_bucket():
+      return self
+
+    return S3Path(bucket=self.bucket, key=self.key.rstrip("/"))
+
+  def __str__(self) -> str:
+    """Convert to string path"""
+    if self.is_root():
+      return "s3a://"
+    elif self.is_bucket():
+      return f"s3a://{self.bucket}"
+    else:
+      return f"s3a://{self.bucket}/{self.key}"
+
+  def __eq__(self, other: object) -> bool:
+    """Compare paths"""
+    if not isinstance(other, S3Path):
+      return NotImplemented
+    return self.bucket == other.bucket and self.key == other.key
+
+  def __hash__(self) -> int:
+    """Hash path"""
+    return hash((self.bucket, self.key))

+ 765 - 0
desktop/core/src/desktop/lib/fs/s3/core/s3fs.py

@@ -0,0 +1,765 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import time
+from typing import Any, List, Optional, Union
+
+from botocore.exceptions import ClientError
+
+from desktop.conf import PERMISSION_ACTION_S3
+from desktop.lib.fs.s3.clients.factory import S3ClientFactory
+from desktop.lib.fs.s3.constants import DEFAULT_CHUNK_SIZE, S3_DELIMITER
+from desktop.lib.fs.s3.core.file import S3File
+from desktop.lib.fs.s3.core.path import S3Path
+from desktop.lib.fs.s3.core.stat import S3Stat
+
+LOG = logging.getLogger()
+
+
+def make_s3_client(connector_id: str, user: str) -> "S3FileSystem":
+  return S3FileSystem(connector_id, user)
+
+
+class S3FileSystem:
+  """
+  Simplified S3FileSystem implementation using boto3.
+  Takes connector_id and user, extracts bucket from paths during operations.
+  """
+
+  def __init__(self, connector_id: str, user: str):
+    """
+    Initialize S3FileSystem for a specific connector.
+
+    Args:
+      connector_id: ID of the S3 connector to use
+      user: Username for operations
+    """
+    self.connector_id = connector_id
+    self.user = user
+
+    # Backward compatibility attributes
+    self.is_sentry_managed = lambda path: False  # S3 doesn't use Sentry
+    self.superuser = None  # No superuser concept in S3
+    self.supergroup = None  # No supergroup concept in S3
+    self.expiration = None  # No expiration for S3
+    self._filebrowser_action = PERMISSION_ACTION_S3  # S3 uses filebrowser_action
+
+    # Initialize client
+    self._initialize_client()
+
+  def _initialize_client(self) -> None:
+    """Initialize the S3 client using connector configuration"""
+    try:
+      # Create client using factory
+      self.client = S3ClientFactory.get_client_for_connector(self.connector_id, self.user)
+
+      # Get boto3 clients for direct use
+      self.s3_client = self.client.s3_client
+      self.s3_resource = self.client.s3_resource
+
+    except Exception as e:
+      LOG.error(f"Failed to initialize S3 client: {e}")
+      raise
+
+  def _get_bucket(self, bucket_name: str, validate: bool = True):
+    """Get bucket by name"""
+    try:
+      bucket = self.s3_resource.Bucket(bucket_name)
+      if validate:
+        # Force validation of bucket
+        self.s3_client.head_bucket(Bucket=bucket_name)
+      return bucket
+    except ClientError as e:
+      error_code = e.response["Error"]["Code"]
+      if error_code == "404":
+        raise FileNotFoundError(f"Bucket not found: {bucket_name}")
+      elif error_code == "403":
+        raise PermissionError(f"Access denied to bucket: {bucket_name}")
+      raise
+
+  def _create_bucket(self, bucket_name: str) -> Any:
+    """Create bucket in the correct region"""
+    region = self.client.get_region(bucket_name)
+    kwargs = {}
+
+    if region and region != "us-east-1":
+      kwargs["CreateBucketConfiguration"] = {"LocationConstraint": region}
+
+    return self.s3_client.create_bucket(Bucket=bucket_name, **kwargs)
+
+  def _delete_bucket(self, bucket_name: str) -> None:
+    """Delete bucket and all its contents"""
+    try:
+      bucket = self._get_bucket(bucket_name)
+
+      # Delete objects in batches
+      paginator = self.s3_client.get_paginator("list_objects_v2")
+      for page in paginator.paginate(Bucket=bucket_name):
+        objects_to_delete = [{"Key": obj["Key"]} for obj in page.get("Contents", [])]
+        if objects_to_delete:
+          self.s3_client.delete_objects(Bucket=bucket_name, Delete={"Objects": objects_to_delete})
+
+      # Delete the empty bucket
+      bucket.delete()
+
+    except ClientError as e:
+      if e.response["Error"]["Code"] == "403":
+        raise PermissionError(f"Access denied to delete bucket: {bucket_name}")
+      raise
+
+  def _get_object(self, path: Union[str, S3Path], validate: bool = True):
+    """Get object by path"""
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    try:
+      obj = self.s3_resource.Object(path.bucket, path.key)
+      if validate:
+        obj.load()
+      return obj
+    except ClientError as e:
+      error_code = e.response["Error"]["Code"]
+      if error_code == "404":
+        raise FileNotFoundError(f"Object not found: {path}")
+      elif error_code == "403":
+        raise PermissionError(f"Access denied to object: {path}")
+      raise
+
+  def open(self, path: Union[str, S3Path], mode: str = "rb") -> S3File:
+    """Open file for reading or writing"""
+    return S3File(self, path, mode)
+
+  def read(self, path: Union[str, S3Path], offset: int = 0, length: int = -1) -> bytes:
+    """Read file contents"""
+    with self.open(path, "rb") as f:
+      f.seek(offset)
+      return f.read(length)
+
+  def create(self, path: Union[str, S3Path], overwrite: bool = False, data: Optional[Union[str, bytes]] = None) -> None:
+    """
+    Create a new file with optional initial content.
+
+    Args:
+      path: S3 path for new file
+      overwrite: If True, overwrite existing file. If False, raise error if file exists
+      data: Optional initial content (string or bytes)
+
+    Raises:
+      FileExistsError: If file exists and overwrite=False
+      ClientError: For S3 API errors
+    """
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    # Check if file exists
+    if not overwrite and self.exists(path):
+      raise FileExistsError(f"File already exists: {path}")
+
+    # Convert string data to bytes if needed
+    if isinstance(data, str):
+      data = data.encode("utf-8")
+    elif data is None:
+      data = b""
+
+    try:
+      self.s3_client.put_object(Bucket=path.bucket, Key=path.key, Body=data)
+    except ClientError as e:
+      error_code = e.response["Error"]["Code"]
+      if error_code == "NoSuchBucket":
+        raise FileNotFoundError(f"Bucket does not exist: {path.bucket}")
+      elif error_code == "AccessDenied":
+        raise PermissionError(f"Access denied creating file: {path}")
+      raise
+
+  def exists(self, path: Union[str, S3Path]) -> bool:
+    """Check if path exists"""
+    try:
+      self.stats(path)
+      return True
+    except (FileNotFoundError, PermissionError):
+      return False
+
+  def stats(self, path: Union[str, S3Path]) -> S3Stat:
+    """Get path stats"""
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    if path.is_root():
+      return S3Stat.for_root()
+
+    try:
+      if path.is_bucket():
+        bucket = self._get_bucket(path.bucket)
+        return S3Stat.from_bucket(bucket)
+      else:
+        obj = self._get_object(path)
+        return S3Stat.from_object(obj)
+    except FileNotFoundError:
+      # Check if it's a prefix (directory)
+      try:
+        response = self.s3_client.list_objects_v2(Bucket=path.bucket, Prefix=path.key, Delimiter=S3_DELIMITER, MaxKeys=1)
+        if response.get("CommonPrefixes") or response.get("Contents"):
+          return S3Stat.for_directory(path)
+        raise FileNotFoundError(f"Path not found: {path}")
+      except ClientError as e:
+        if e.response["Error"]["Code"] == "403":
+          raise PermissionError(f"Access denied to path: {path}")
+        raise
+
+  def isfile(self, path: Union[str, S3Path]) -> bool:
+    """Check if path is a file"""
+    try:
+      stats = self.stats(path)
+      return not stats.is_dir
+    except FileNotFoundError:
+      return False
+
+  def isdir(self, path: Union[str, S3Path]) -> bool:
+    """Check if path is a directory"""
+    try:
+      stats = self.stats(path)
+      return stats.is_dir
+    except FileNotFoundError:
+      return False
+
+  def listdir_stats(self, path: Union[str, S3Path], glob_pattern: Optional[str] = None) -> List[S3Stat]:
+    """List directory with stats"""
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    if path.is_root():
+      # List buckets
+      buckets = self.s3_client.list_buckets()["Buckets"]
+      return [S3Stat.from_bucket(self.s3_resource.Bucket(b["Name"])) for b in buckets]
+
+    # List objects with prefix
+    prefix = path.key if path.key else ""
+    if prefix and not prefix.endswith(S3_DELIMITER):
+      prefix += S3_DELIMITER
+
+    paginator = self.s3_client.get_paginator("list_objects_v2")
+
+    stats = []
+    try:
+      for page in paginator.paginate(Bucket=path.bucket, Prefix=prefix, Delimiter=S3_DELIMITER):
+        # Add common prefixes (directories)
+        for prefix_dict in page.get("CommonPrefixes", []):
+          prefix_path = S3Path(bucket=path.bucket, key=prefix_dict["Prefix"])
+          stats.append(S3Stat.for_directory(prefix_path))
+
+        # Add objects (files)
+        for obj_dict in page.get("Contents", []):
+          if obj_dict["Key"] == prefix:
+            continue  # Skip current directory marker
+          obj = self.s3_resource.Object(path.bucket, obj_dict["Key"])
+          stats.append(S3Stat.from_object(obj))
+
+    except ClientError as e:
+      if e.response["Error"]["Code"] == "403":
+        raise PermissionError(f"Access denied to path: {path}")
+      raise
+
+    return stats
+
+  def listdir(self, path: Union[str, S3Path], glob_pattern: Optional[str] = None) -> List[str]:
+    """List directory contents"""
+    stats = self.listdir_stats(path, glob_pattern)
+    return [stat.name for stat in stats]
+
+  def mkdir(self, path: Union[str, S3Path]) -> None:
+    """Create directory (empty object with trailing slash)"""
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    if path.is_root():
+      raise ValueError("Cannot create root directory")
+
+    try:
+      if path.is_bucket():
+        self._create_bucket(path.bucket)
+      else:
+        # Create directory marker
+        key = path.key.rstrip("/") + "/"
+        self.s3_client.put_object(Bucket=path.bucket, Key=key, Body=b"")
+    except ClientError as e:
+      if e.response["Error"]["Code"] == "403":
+        raise PermissionError(f"Access denied to create: {path}")
+      raise
+
+  def remove(self, path: Union[str, S3Path], skip_trash: bool = True) -> None:
+    """Remove file or empty directory"""
+    if not skip_trash:
+      raise NotImplementedError("Trash not supported for S3")
+
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    try:
+      if path.is_bucket():
+        self._delete_bucket(path.bucket)
+      else:
+        # Delete object
+        self.s3_client.delete_object(Bucket=path.bucket, Key=path.key)
+    except ClientError as e:
+      if e.response["Error"]["Code"] == "403":
+        raise PermissionError(f"Access denied to delete: {path}")
+      raise
+
+  def rmtree(self, path: Union[str, S3Path], skip_trash: bool = True) -> None:
+    """Remove directory and contents recursively"""
+    if not skip_trash:
+      raise NotImplementedError("Trash not supported for S3")
+
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    try:
+      if path.is_bucket():
+        self._delete_bucket(path.bucket)
+      elif self.isfile(path):
+        self.remove(path, skip_trash=skip_trash)  # Delete file in rmtree for backward compatibility
+      else:
+        # Get prefix for directory
+        prefix = path.key
+        if not prefix.endswith("/"):
+          prefix += "/"
+
+        # Delete objects in batches
+        paginator = self.s3_client.get_paginator("list_objects_v2")
+        for page in paginator.paginate(Bucket=path.bucket, Prefix=prefix):
+          objects_to_delete = [{"Key": obj["Key"]} for obj in page.get("Contents", [])]
+          if objects_to_delete:
+            self.s3_client.delete_objects(Bucket=path.bucket, Delete={"Objects": objects_to_delete})
+
+    except ClientError as e:
+      if e.response["Error"]["Code"] == "403":
+        raise PermissionError(f"Access denied to delete: {path}")
+      raise
+
+  def copy(self, src: Union[str, S3Path], dst: Union[str, S3Path], recursive: bool = False, *args, **kwargs) -> None:
+    """Copy file or directory"""
+    if isinstance(src, str):
+      src = S3Path.from_path(src)
+    if isinstance(dst, str):
+      dst = S3Path.from_path(dst)
+
+    # Validate source exists
+    if not self.exists(src):
+      raise FileNotFoundError(f"Source does not exist: {src}")
+
+    src_is_dir = self.isdir(src)
+
+    # Check if we can copy directory
+    if src_is_dir and not recursive:
+      raise IsADirectoryError(f"Source is a directory (use recursive=True): {src}")
+
+    try:
+      if not src_is_dir:
+        # Handle file-to-directory copy case
+        if self.exists(dst) and self.isdir(dst):
+          src_filename = src.name()
+          dst = dst.join(src_filename)
+          LOG.debug(f"File-to-directory copy: appending filename to destination → {dst}")
+
+        # Copy single file
+        self._copy_file(src, dst)
+      else:
+        # Handle directory-to-directory copy case
+        if self.exists(dst) and self.isdir(dst):
+          src_dirname = src.name()
+          dst = dst.join(src_dirname)
+          LOG.debug(f"Directory-to-directory copy: appending source name to destination → {dst}")
+
+        # Copy directory recursively
+        self._copy_directory(src, dst)
+
+    except ClientError as e:
+      error_code = e.response["Error"]["Code"]
+      if error_code == "403":
+        raise PermissionError(f"Access denied for copy operation: {src} → {dst}")
+      elif error_code == "NoSuchBucket":
+        raise FileNotFoundError(f"Destination bucket does not exist: {dst.bucket}")
+      elif error_code == "NoSuchKey":
+        raise FileNotFoundError(f"Source not found: {src}")
+      else:
+        LOG.error(f"S3 copy operation failed: {error_code} - {e.response['Error'].get('Message', str(e))}")
+        raise
+
+  def _copy_file(self, src: S3Path, dst: S3Path) -> None:
+    """Copy a single file using boto3 copy_object API"""
+    copy_source = {"Bucket": src.bucket, "Key": src.key}
+
+    LOG.debug(f"Copying file: {src} → {dst}")
+
+    self.s3_client.copy_object(CopySource=copy_source, Bucket=dst.bucket, Key=dst.key)
+
+  def _copy_directory(self, src: S3Path, dst: S3Path) -> None:
+    """Copy directory recursively using boto3"""
+    src_prefix = src.key.rstrip("/") + "/" if src.key else ""
+    dst_prefix = dst.key.rstrip("/") + "/" if dst.key else ""
+
+    LOG.debug(f"Copying directory: {src} → {dst} (src_prefix='{src_prefix}', dst_prefix='{dst_prefix}')")
+
+    copied_files = 0
+    paginator = self.s3_client.get_paginator("list_objects_v2")
+
+    for page in paginator.paginate(Bucket=src.bucket, Prefix=src_prefix):
+      for obj in page.get("Contents", []):
+        src_key = obj["Key"]
+
+        # Calculate destination key by replacing src_prefix with dst_prefix
+        relative_key = src_key[len(src_prefix) :]
+        dst_key = dst_prefix + relative_key
+
+        # Copy each file
+        copy_source = {"Bucket": src.bucket, "Key": src_key}
+        self.s3_client.copy_object(CopySource=copy_source, Bucket=dst.bucket, Key=dst_key)
+
+        copied_files += 1
+
+    LOG.debug(f"Directory copy completed: {copied_files} files copied")
+
+  def rename(self, old: Union[str, S3Path], new: Union[str, S3Path]) -> None:
+    """Rename/move file or directory"""
+    self.copy(old, new, recursive=True)
+    self.rmtree(old)
+
+  # Deprecated
+  def upload(self, file, path, *args, **kwargs):
+    pass  # upload is handled by S3ConnectorUploadHandler
+
+  def create_home_dir(self, home_path: Optional[str] = None) -> None:
+    """
+    Create home directory for the user with smart path resolution.
+
+    Args:
+      home_path: Optional explicit home path, if not provided will be determined from config
+
+    Raises:
+      PermissionError: If user doesn't have permission to create directory
+      IOError: If directory creation fails
+    """
+    try:
+      # Get home directory path using smart defaults
+      if not home_path:
+        home_path = self._get_smart_home_directory()
+
+      # Create directory if it doesn't exist
+      if not self.exists(home_path):
+        LOG.info(f"Creating home directory at: {home_path}")
+        self.mkdir(home_path)
+
+    except Exception as e:
+      LOG.error(f"Failed to create home directory at {home_path}: {e}")
+      raise IOError(f"Failed to create home directory: {e}")
+
+  def _get_smart_home_directory(self) -> str:
+    """
+    Get smart home directory using connector bucket configuration.
+
+    Logic:
+    - Single bucket configured: Use that bucket's home path
+    - Multiple buckets: Use generic s3a://
+    - No buckets: Use generic s3a://
+    """
+    from desktop.lib.fs.s3.conf_utils import get_default_bucket_home_path
+
+    return get_default_bucket_home_path(self.connector_id, self.user)
+
+  # Backward compatibility methods
+
+  def filebrowser_action(self):
+    return self._filebrowser_action
+
+  def setuser(self, user):
+    self.user = user
+
+  def get_upload_chuck_size(self):
+    return DEFAULT_CHUNK_SIZE
+
+  def get_upload_handler(self, destination_path, overwrite):
+    # TODO: Implement upload flow for new S3 filesystem
+    return None
+
+  def normpath(self, path: str) -> str:
+    """
+    Normalize S3 path.
+    Converts 's3://' to 's3a://' and handles path components.
+    """
+    if path.startswith("s3://"):
+      path = "s3a://" + path[5:]
+    s3path = S3Path.from_path(path)
+    return str(s3path)
+
+  def netnormpath(self, path: str) -> str:
+    """
+    Network normalize path - same as normpath for S3.
+    """
+    return self.normpath(path)
+
+  def parent_path(self, path: str) -> str:
+    """Get parent path"""
+    s3path = S3Path.from_path(path)
+    return str(s3path.parent())
+
+  def join(self, first: str, *comp_list: str) -> str:
+    """Join path components"""
+    s3path = S3Path.from_path(first)
+    return str(s3path.join(*comp_list))
+
+  def isroot(self, path: str) -> bool:
+    """Check if path is root"""
+    s3path = S3Path.from_path(path)
+    return s3path.is_root()
+
+  def restore(self, *args, **kwargs):
+    raise NotImplementedError("restore is not implemented for S3")
+
+  def chmod(self, path: Union[str, S3Path], mode: int) -> None:
+    """Change file mode"""
+    raise NotImplementedError("chmod is not implemented for S3")
+
+  def chown(self, path: Union[str, S3Path], user: str, group: str, *args, **kwargs) -> None:
+    """Change file owner"""
+    raise NotImplementedError("chown is not implemented for S3")
+
+  def copyfile(self, src: Union[str, S3Path], dst: Union[str, S3Path], *args, **kwargs) -> None:
+    """Copy single file (no recursion)"""
+    if isinstance(src, str):
+      src = S3Path.from_path(src)
+    if isinstance(dst, str):
+      dst = S3Path.from_path(dst)
+
+    # Validate source is a file
+    if not self.exists(src):
+      raise FileNotFoundError(f"Source file does not exist: {src}")
+    if self.isdir(src):
+      raise IsADirectoryError(f"Source is a directory, use copy() with recursive=True: {src}")
+    if self.isdir(dst):
+      raise IsADirectoryError(f"Destination is a directory, specify file path: {dst}")
+
+    return self._copy_file(src, dst)
+
+  def copy_remote_dir(self, src: Union[str, S3Path], dst: Union[str, S3Path], *args, **kwargs) -> None:
+    """Copy directory recursively"""
+    if isinstance(src, str):
+      src = S3Path.from_path(src)
+    if isinstance(dst, str):
+      dst = S3Path.from_path(dst)
+
+    # Validate source is a directory
+    if not self.exists(src):
+      raise FileNotFoundError(f"Source directory does not exist: {src}")
+    if not self.isdir(src):
+      raise NotADirectoryError(f"Source is not a directory: {src}")
+
+    return self._copy_directory(src, dst)
+
+  def rename_star(self, old_dir: Union[str, S3Path], new_dir: Union[str, S3Path]) -> None:
+    """
+    Rename contents of old_dir to new_dir without renaming the directory itself.
+    Useful when you want to move directory contents but preserve the directory.
+
+    Args:
+      old_dir: Source directory
+      new_dir: Destination directory
+
+    Raises:
+      NotADirectoryError: If old_dir is not a directory
+      ClientError: For S3 API errors
+    """
+    if isinstance(old_dir, str):
+      old_dir = S3Path.from_path(old_dir)
+    if isinstance(new_dir, str):
+      new_dir = S3Path.from_path(new_dir)
+
+    if not self.isdir(old_dir):
+      raise NotADirectoryError(f"Source is not a directory: {old_dir}")
+
+    # Get directory contents and rename each entry
+    entries = self.listdir(old_dir)
+    for entry in entries:
+      src = old_dir.join(entry)
+      dst = new_dir.join(entry)
+      self.rename(src, dst)
+
+  def copyFromLocal(self, local_src: str, remote_dst: str, *args, **kwargs) -> None:
+    """
+    Copy local file or directory to S3.
+
+    The method preserves directory structure when copying directories.
+    For single files, if remote_dst is a directory, the file is copied into it.
+    Otherwise remote_dst is used as the destination file path.
+
+    Args:
+      local_src: Local file/directory path
+      remote_dst: S3 destination path
+      *args, **kwargs: Additional arguments (for backward compatibility)
+    """
+    if isinstance(remote_dst, str):
+      remote_dst = S3Path.from_path(remote_dst)
+
+    # Handle directory copy
+    if os.path.isdir(local_src):
+      # Walk directory tree
+      for root, dirs, files in os.walk(local_src, followlinks=False):
+        # Calculate relative path
+        rel_path = os.path.relpath(root, local_src)
+
+        # Create remote directory path
+        remote_dir = remote_dst.join(rel_path) if rel_path != "." else remote_dst
+
+        # Create empty directory if no contents
+        if not dirs and not files:
+          self.mkdir(remote_dir)
+          continue
+
+        # Copy each file
+        for file_name in files:
+          local_file = os.path.join(root, file_name)
+          remote_file = remote_dir.join(file_name)
+
+          self.s3_client.upload_file(Filename=local_file, Bucket=remote_file.bucket, Key=remote_file.key)
+
+    # Handle single file copy
+    else:
+      if self.isdir(remote_dst):
+        remote_file = remote_dst.join(os.path.basename(local_src))
+      else:
+        remote_file = remote_dst
+
+      self.s3_client.upload_file(Filename=local_src, Bucket=remote_file.bucket, Key=remote_file.key)
+
+  def append(self, path: Union[str, S3Path], data: Union[str, bytes]) -> None:
+    """
+    Append data to file by reading existing content and writing back.
+    Not recommended for large files.
+
+    Args:
+      path: S3 path
+      data: Data to append
+    """
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    # Convert string data to bytes
+    if isinstance(data, str):
+      data = data.encode("utf-8")
+
+    try:
+      # Get existing content
+      try:
+        response = self.s3_client.get_object(Bucket=path.bucket, Key=path.key)
+        current = response["Body"].read()
+      except ClientError as e:
+        if e.response["Error"]["Code"] == "NoSuchKey":
+          current = b""
+        else:
+          raise
+
+      # Write back with appended data
+      self.s3_client.put_object(Bucket=path.bucket, Key=path.key, Body=current + data)
+    except ClientError as e:
+      if e.response["Error"]["Code"] == "403":
+        raise PermissionError(f"Access denied to path: {path}")
+      raise
+
+  def check_access(self, path: Union[str, S3Path], permission: str = "READ") -> bool:
+    """
+    Check if user has specified permission on path.
+
+    This method verifies access permissions by attempting actual operations:
+    - READ: Tries to access the file/directory
+    - WRITE: Tries to create a temporary file and delete it
+
+    Args:
+      path: Path to check
+      permission: Permission type ("READ" or "WRITE")
+
+    Returns:
+      True if access is allowed, False otherwise
+    """
+    path = S3Path.from_path(path) if isinstance(path, str) else path
+    permission = permission.upper()
+
+    try:
+      # For write permission, try creating and deleting a temporary file
+      if permission == "WRITE":
+        temp_filename = f"temp_{int(time.time() * 1000)}"
+
+        if path.is_root():
+          return False  # Can't write to root
+        elif path.is_bucket():
+          # For bucket, try creating a temp file in the bucket
+          temp_path = S3Path(bucket=path.bucket, key=temp_filename)
+        else:
+          # For directory/file path, create temp file in same directory
+          if self.isdir(str(path)):
+            temp_path = path.join(temp_filename)  # It's a directory, create temp file inside it
+          else:
+            # It's a file, create temp file in parent directory
+            parent = path.parent()
+            if parent.is_root():
+              return False
+            temp_path = parent.join(temp_filename)
+
+        try:
+          self.s3_client.put_object(Bucket=temp_path.bucket, Key=temp_path.key, Body=b"")
+          self.s3_client.delete_object(Bucket=temp_path.bucket, Key=temp_path.key)  # Delete the temporary file
+          return True
+        except ClientError as e:
+          error_code = e.response["Error"]["Code"]
+          if error_code in ("AccessDenied", "Forbidden", "NoSuchBucket"):
+            return False
+          raise
+      else:  # READ permission
+        if path.is_root():
+          try:
+            self.s3_client.list_buckets()  # Root is always readable (list buckets)
+            return True
+          except ClientError:
+            return False
+        elif path.is_bucket():
+          try:
+            self.s3_client.list_objects_v2(Bucket=path.bucket, MaxKeys=1)  # Try to list bucket contents
+            return True
+          except ClientError as e:
+            error_code = e.response["Error"]["Code"]
+            if error_code in ("AccessDenied", "Forbidden", "NoSuchBucket"):
+              return False
+            raise
+        else:
+          # Try to access the specific object
+          try:
+            self.s3_client.head_object(Bucket=path.bucket, Key=path.key)
+            return True
+          except ClientError as e:
+            error_code = e.response["Error"]["Code"]
+            if error_code in ("AccessDenied", "Forbidden", "NoSuchKey"):
+              # For NoSuchKey, check if we can at least list the parent directory
+              if error_code == "NoSuchKey":
+                parent = path.parent()
+                if not parent.is_root():
+                  return self.check_access(str(parent), "READ")
+              return False
+            raise
+
+    except Exception as e:
+      LOG.warning(f"S3 check_access encountered error verifying {permission} permission at path '{path}': {e}")
+      return False

+ 223 - 0
desktop/core/src/desktop/lib/fs/s3/core/stat.py

@@ -0,0 +1,223 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import stat
+from dataclasses import dataclass, field
+from datetime import datetime
+from typing import Any, Dict, Optional, Union
+
+from desktop.lib.fs.s3.core.path import S3Path
+
+
+@dataclass
+class S3Stat:
+  """
+  File/directory statistics for S3 objects.
+  Compatible with os.stat_result interface.
+  """
+
+  path: str  # Full path
+  name: str  # File/directory name
+  size: int  # Size in bytes
+  mtime: Optional[int]  # Last modified time
+  is_dir: bool  # True if directory
+
+  # Optional S3-specific attributes
+  etag: Optional[str] = None
+  version_id: Optional[str] = None
+  storage_class: Optional[str] = None
+  metadata: Optional[Dict[str, str]] = None
+
+  # Additional attributes needed for compatibility
+  atime: Optional[int] = None  # Access time (same as mtime for S3)
+  aclBit: bool = False  # ACL bit flag
+  replication: int = 1  # Replication factor (always 1 for S3)
+  user: str = ""  # Owner
+  group: str = ""  # Group
+
+  # Internal dict to store dynamic attributes
+  _attrs: Dict[str, Any] = field(default_factory=dict)
+
+  def __post_init__(self):
+    # For backward compatibility - expose is_dir as isDir
+    self.isDir = self.is_dir
+
+  def __getitem__(self, key: str) -> Any:
+    """Support dict-like access to attributes"""
+    # First check internal attrs dict
+    if key in self._attrs:
+      return self._attrs[key]
+
+    # Then check instance attributes
+    if hasattr(self, key):
+      return getattr(self, key)
+
+    # Finally check property methods
+    if key in ["type", "mode"]:
+      return getattr(self, key)
+
+    raise KeyError(key)
+
+  def __setitem__(self, key: str, value: Any) -> None:
+    """Support dict-like setting of attributes"""
+    # For path and name attribute, also update the instance attribute
+    if key == "path":
+      self.path = value
+    elif key == "name":
+      self.name = value
+
+    # Store in internal attrs dict
+    self._attrs[key] = value
+
+  @property
+  def mode(self) -> int:
+    """Get file mode (permissions)"""
+    if self.is_dir:
+      return stat.S_IFDIR | 0o777  # rwxrwxrwx
+    return stat.S_IFREG | 0o666  # rw-rw-rw-
+
+  @property
+  def type(self) -> str:
+    """Get file type"""
+    return "DIRECTORY" if self.is_dir else "FILE"
+
+  @classmethod
+  def for_root(cls) -> "S3Stat":
+    """Create stats for root directory"""
+    return cls(path="s3a://", name="S3", size=0, mtime=None, is_dir=True)
+
+  @classmethod
+  def from_bucket(cls, bucket) -> "S3Stat":
+    """
+    Create stats from bucket.
+
+    Args:
+      bucket: boto3 Bucket object
+
+    Returns:
+      S3Stat instance
+    """
+    return cls(
+      path=f"s3a://{bucket.name}",
+      name=bucket.name,
+      size=0,
+      mtime=None,  # Buckets don't have mtime
+      is_dir=True,
+    )
+
+  @classmethod
+  def from_object(cls, obj) -> "S3Stat":
+    """
+    Create stats from S3 object.
+
+    Args:
+      obj: boto3 Object object
+
+    Returns:
+      S3Stat instance
+    """
+    return cls(
+      path=f"s3a://{obj.bucket_name}/{obj.key}",
+      name=obj.key.rstrip("/").split("/")[-1],
+      size=obj.content_length,
+      mtime=int(obj.last_modified.timestamp()),  # Convert to timestamp
+      atime=int(obj.last_modified.timestamp()),  # Convert to timestamp
+      is_dir=obj.key.endswith("/"),
+      etag=obj.e_tag.strip('"') if obj.e_tag else None,
+      version_id=obj.version_id,
+      storage_class=obj.storage_class,
+      metadata=obj.metadata,
+    )
+
+  @classmethod
+  def from_head(cls, head_response: Dict[str, Any], path: Union[str, S3Path]) -> "S3Stat":
+    """
+    Create stats from head_object response.
+
+    Args:
+      head_response: Response from head_object API call
+      path: S3 path
+
+    Returns:
+      S3Stat instance
+    """
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    last_modified = head_response["LastModified"]
+    if isinstance(last_modified, str):
+      last_modified = datetime.strptime(last_modified, "%Y-%m-%dT%H:%M:%S.%fZ")
+    timestamp = int(last_modified.timestamp())
+
+    return cls(
+      path=str(path),
+      name=path.name(),
+      size=head_response["ContentLength"],
+      mtime=timestamp,  # Store as timestamp
+      atime=timestamp,  # Store as timestamp
+      is_dir=path.key.endswith("/"),
+      etag=head_response.get("ETag", "").strip('"'),
+      version_id=head_response.get("VersionId"),
+      storage_class=head_response.get("StorageClass"),
+      metadata=head_response.get("Metadata"),
+    )
+
+  @classmethod
+  def for_directory(cls, path: Union[str, S3Path]) -> "S3Stat":
+    """
+    Create stats for directory path.
+
+    Args:
+      path: S3 path
+
+    Returns:
+      S3Stat instance
+    """
+    if isinstance(path, str):
+      path = S3Path.from_path(path)
+
+    return cls(path=str(path), name=path.name(), size=0, mtime=None, is_dir=True)
+
+  def to_json_dict(self) -> Dict[str, Any]:
+    """Convert to JSON-serializable dict"""
+    base = {
+      "path": self.path,
+      "name": self.name,
+      "size": self.size,
+      "mtime": self.mtime,  # Already an integer timestamp
+      "atime": self.atime,  # Already an integer timestamp
+      "type": self.type,
+      "mode": self.mode,
+      "user": self.user,
+      "group": self.group,
+      "aclBit": self.aclBit,
+      "replication": self.replication,
+      "etag": self.etag,
+      "version_id": self.version_id,
+      "storage_class": self.storage_class,
+      "metadata": self.metadata,
+      "isDir": self.is_dir,  # Add for backward compatibility
+    }
+    # Include any dynamic attributes
+    base.update(self._attrs)
+    return base
+
+  def __eq__(self, other: object) -> bool:
+    """Compare stats"""
+    if not isinstance(other, S3Stat):
+      return NotImplemented
+    return self.path == other.path and self.size == other.size and self.mtime == other.mtime and self.is_dir == other.is_dir

+ 214 - 0
desktop/core/src/desktop/lib/fs/s3/core/upload.py

@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from django.core.files.uploadedfile import SimpleUploadedFile
+from django.core.files.uploadhandler import FileUploadHandler, StopFutureHandlers, StopUpload, UploadFileException
+
+from desktop.lib.fs.s3.conf_utils import get_default_connector
+from desktop.lib.fs.s3.core.path import S3Path
+from desktop.lib.fs.s3.core.s3fs import make_s3_client
+from filebrowser.utils import is_file_upload_allowed
+
+LOG = logging.getLogger()
+
+# Default chunk size for uploads
+DEFAULT_WRITE_SIZE = 1024 * 1024 * 8  # 8MiB
+
+
+class S3ConnectorUploadError(UploadFileException):
+  """Exception for S3 Storage Connector upload errors"""
+
+  pass
+
+
+class S3ConnectorUploadHandler(FileUploadHandler):
+  """
+  Modern S3 upload handler using Storage Connector system with boto3.
+
+  This handler:
+  - Uses the new Storage Connector configuration system
+  - Leverages boto3 for reliable multipart uploads
+  - Streams data chunks directly to S3 without local storage
+  - Supports all S3-compatible providers (AWS, NetApp, Dell, etc.)
+  """
+
+  def __init__(self, request):
+    super(S3ConnectorUploadHandler, self).__init__(request)
+    self.chunk_size = DEFAULT_WRITE_SIZE
+    self.destination = request.GET.get("dest", None)  # GET param avoids infinite looping
+    self.target_path = None
+    self.file = None
+    self._request = request
+    self._multipart_upload = None
+    self._part_num = 1
+    self._upload_rejected = False
+    self._parts = []  # Track uploaded parts for completion
+
+    if self._is_s3_upload():
+      # Use Storage Connector system to get S3FileSystem
+      connector_id = self._get_connector_id_for_upload()
+      self._s3fs = make_s3_client(connector_id, request.user.username)
+
+      # Parse destination path
+      self._s3path = S3Path.from_path(self.destination)
+
+      # Validate destination exists and we have write access
+      self._validate_destination()
+
+  def _get_connector_id_for_upload(self) -> str:
+    """
+    Get appropriate connector ID for this upload.
+    Uses smart defaulting to pick the best connector.
+    """
+    # TODO: In the future, we could extract connector from the destination path or request params
+    # For now, use the default connector selection
+    return get_default_connector()
+
+  def _validate_destination(self) -> None:
+    """Validate that we can upload to the destination"""
+    try:
+      if not self._s3fs.check_access(self.destination, permission="WRITE"):
+        raise S3ConnectorUploadError(f"Cannot write to destination: {self.destination}")
+
+    except Exception as e:
+      LOG.error(f"Upload destination validation failed: {e}")
+      raise S3ConnectorUploadError(f"Invalid upload destination: {e}")
+
+  def new_file(self, field_name, file_name, *args, **kwargs):
+    """Initialize new file upload"""
+    if self._is_s3_upload():
+      LOG.info(f"Using S3ConnectorUploadHandler for file upload: {file_name}")
+
+      # Check file extension restrictions
+      is_allowed, err_message = is_file_upload_allowed(file_name)
+      if not is_allowed:
+        LOG.error(f"File upload rejected: {err_message}")
+        self._request.META["upload_failed"] = err_message
+        self._upload_rejected = True
+        return None
+
+      super(S3ConnectorUploadHandler, self).new_file(field_name, file_name, *args, **kwargs)
+
+      # Build target path using S3Path.join() for proper slash handling
+      target_s3path = self._s3path.join(file_name)
+      self.target_path = str(target_s3path)
+
+      try:
+        # Initiate multipart upload using boto3
+        LOG.debug(f"Initiating boto3 multipart upload to: {self.target_path}")
+
+        response = self._s3fs.s3_client.create_multipart_upload(Bucket=target_s3path.bucket, Key=target_s3path.key)
+
+        self._multipart_upload = {"UploadId": response["UploadId"], "Bucket": target_s3path.bucket, "Key": target_s3path.key}
+
+        LOG.debug(f"Multipart upload initiated: UploadId={response['UploadId']}")
+
+        self.file = SimpleUploadedFile(name=file_name, content="")
+      except Exception as e:
+        LOG.error(f"Failed to initiate S3 multipart upload: {e}")
+        self._request.META["upload_failed"] = str(e)
+        raise StopUpload()
+
+      raise StopFutureHandlers()
+
+  def receive_data_chunk(self, raw_data, start):
+    """Receive and upload data chunk"""
+    if self._upload_rejected:
+      return None
+
+    if self._is_s3_upload():
+      try:
+        LOG.debug(f"Uploading part {self._part_num}, size: {len(raw_data)} bytes")
+
+        # Upload part using boto3
+        response = self._s3fs.s3_client.upload_part(
+          Bucket=self._multipart_upload["Bucket"],
+          Key=self._multipart_upload["Key"],
+          PartNumber=self._part_num,
+          UploadId=self._multipart_upload["UploadId"],
+          Body=raw_data,
+        )
+
+        # Track uploaded part for completion
+        self._parts.append({"ETag": response["ETag"], "PartNumber": self._part_num})
+
+        self._part_num += 1
+        return None
+
+      except Exception as e:
+        LOG.error(f"Failed to upload part {self._part_num}: {e}")
+        self._abort_multipart_upload()
+        raise StopUpload()
+    else:
+      return raw_data
+
+  def file_complete(self, file_size):
+    """Complete the multipart upload"""
+    if self._upload_rejected:
+      return None
+
+    if self._is_s3_upload():
+      try:
+        LOG.info(f"Completing multipart upload: {self.target_path}, size: {file_size} bytes")
+
+        # Complete multipart upload
+        self._s3fs.s3_client.complete_multipart_upload(
+          Bucket=self._multipart_upload["Bucket"],
+          Key=self._multipart_upload["Key"],
+          UploadId=self._multipart_upload["UploadId"],
+          MultipartUpload={"Parts": self._parts},
+        )
+
+        LOG.info(f"S3 upload completed successfully: {self.target_path}")
+
+        self.file.size = file_size
+        return self.file
+
+      except Exception as e:
+        LOG.error(f"Failed to complete S3 multipart upload: {e}")
+        self._abort_multipart_upload()
+        raise StopUpload()
+    else:
+      return None
+
+  def _abort_multipart_upload(self):
+    """Abort multipart upload on error"""
+    try:
+      if self._multipart_upload:
+        LOG.warning(f"Aborting multipart upload: {self._multipart_upload['UploadId']}")
+
+        self._s3fs.s3_client.abort_multipart_upload(
+          Bucket=self._multipart_upload["Bucket"], Key=self._multipart_upload["Key"], UploadId=self._multipart_upload["UploadId"]
+        )
+
+    except Exception as e:
+      LOG.error(f"Failed to abort multipart upload: {e}")
+
+  def _is_s3_upload(self):
+    """Check if this is an S3 upload based on destination scheme"""
+    return self._get_scheme() and self._get_scheme().startswith("S3")
+
+  def _get_scheme(self):
+    """Extract scheme from destination path"""
+    if self.destination:
+      if "://" in self.destination:
+        return self.destination.split("://")[0].upper()
+      else:
+        raise S3ConnectorUploadError("Destination does not have a valid scheme")
+    return None

+ 15 - 11
desktop/core/src/desktop/lib/fsmanager.py

@@ -17,27 +17,27 @@
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
 
 
-from functools import partial
 import logging
 import logging
+from functools import partial
 
 
 import aws.client
 import aws.client
 import azure.client
 import azure.client
 import desktop.lib.fs.gc.client
 import desktop.lib.fs.gc.client
 import desktop.lib.fs.ozone.client
 import desktop.lib.fs.ozone.client
-
-from aws.conf import is_enabled as is_s3_enabled, has_s3_access
-from azure.conf import is_adls_enabled, is_abfs_enabled, has_adls_access, has_abfs_access
-
-
-from desktop.conf import is_gs_enabled, has_gs_access, DEFAULT_USER, is_ofs_enabled, has_ofs_access, RAZ
-
+from azure.conf import has_abfs_access, has_adls_access, is_abfs_enabled, is_adls_enabled
+from desktop.conf import DEFAULT_USER, has_gs_access, has_ofs_access, is_gs_enabled, is_ofs_enabled, RAZ, USE_STORAGE_CONNECTORS
 from desktop.lib.fs.proxyfs import ProxyFS
 from desktop.lib.fs.proxyfs import ProxyFS
-from desktop.lib.python_util import current_ms_from_utc
+from desktop.lib.fs.s3.conf_utils import get_default_connector
+from desktop.lib.fs.s3.core.s3fs import make_s3_client
 from desktop.lib.idbroker import conf as conf_idbroker
 from desktop.lib.idbroker import conf as conf_idbroker
-
-from hadoop.cluster import get_hdfs, _make_filesystem
+from desktop.lib.python_util import current_ms_from_utc
+from hadoop.cluster import _make_filesystem, get_hdfs
 from hadoop.conf import has_hdfs_enabled
 from hadoop.conf import has_hdfs_enabled
 
 
+if USE_STORAGE_CONNECTORS.get():
+  from desktop.lib.fs.s3.conf_utils import has_s3_access, is_enabled as is_s3_enabled
+else:
+  from aws.conf import has_s3_access, is_enabled as is_s3_enabled
 
 
 SUPPORTED_FS = ['hdfs', 's3a', 'adl', 'abfs', 'gs', 'ofs']
 SUPPORTED_FS = ['hdfs', 's3a', 'adl', 'abfs', 'gs', 'ofs']
 CLIENT_CACHE = None
 CLIENT_CACHE = None
@@ -93,6 +93,10 @@ def _make_client(fs, name, user):
   if fs == 'hdfs':
   if fs == 'hdfs':
     return _make_filesystem(name)
     return _make_filesystem(name)
   elif fs == 's3a':
   elif fs == 's3a':
+    if USE_STORAGE_CONNECTORS.get():
+      # For storage connector system, use connector_id (default to 'default' or first available)
+      connector_id = name if name != "default" else get_default_connector()
+      return make_s3_client(connector_id, user)
     return aws.client._make_client(name, user)
     return aws.client._make_client(name, user)
   elif fs == 'adl':
   elif fs == 'adl':
     return azure.client._make_adls_client(name, user)
     return azure.client._make_adls_client(name, user)

+ 7 - 1
desktop/core/src/desktop/models.py

@@ -52,6 +52,7 @@ from desktop.conf import (
   IS_MULTICLUSTER_ONLY,
   IS_MULTICLUSTER_ONLY,
   RAZ,
   RAZ,
   TASK_SERVER,
   TASK_SERVER,
+  USE_STORAGE_CONNECTORS,
 )
 )
 from desktop.lib import fsmanager
 from desktop.lib import fsmanager
 from desktop.lib.connectors.api import _get_installed_connectors
 from desktop.lib.connectors.api import _get_installed_connectors
@@ -2027,8 +2028,13 @@ class ClusterConfig(object):
         })
         })
 
 
       if 'filebrowser' in self.apps and fsmanager.is_enabled_and_has_access('s3a', self.user):
       if 'filebrowser' in self.apps and fsmanager.is_enabled_and_has_access('s3a', self.user):
-        from aws.s3.s3fs import get_s3_home_directory
+        if USE_STORAGE_CONNECTORS.get():
+          from desktop.lib.fs.s3.conf_utils import get_s3_home_directory
+        else:
+          from aws.s3.s3fs import get_s3_home_directory
+
         home_path = get_s3_home_directory(self.user)
         home_path = get_s3_home_directory(self.user)
+
         interpreters.append({
         interpreters.append({
           'type': 's3',
           'type': 's3',
           'displayName': _('S3'),
           'displayName': _('S3'),

+ 8 - 3
desktop/core/src/desktop/settings.py

@@ -665,7 +665,7 @@ LOAD_BALANCER_COOKIE = 'ROUTEID'
 ################################################################
 ################################################################
 
 
 # Import after configs are set
 # Import after configs are set
-from desktop.conf import ENABLE_NEW_STORAGE_BROWSER  # noqa: E402
+from desktop.conf import ENABLE_NEW_STORAGE_BROWSER, USE_STORAGE_CONNECTORS  # noqa: E402
 
 
 # Insert our custom upload handlers
 # Insert our custom upload handlers
 file_upload_handlers = []
 file_upload_handlers = []
@@ -682,8 +682,13 @@ elif not ENABLE_NEW_STORAGE_BROWSER.get():
     'django.core.files.uploadhandler.TemporaryFileUploadHandler',
     'django.core.files.uploadhandler.TemporaryFileUploadHandler',
   ]
   ]
 
 
-  if is_s3_enabled():
-    file_upload_handlers.insert(0, 'aws.s3.upload.S3FileUploadHandler')
+  # S3 upload handler selection: Storage Connector (new) vs Legacy AWS (old)
+  if USE_STORAGE_CONNECTORS.get():
+    # Use Storage Connector upload handler (new system with boto3)
+    file_upload_handlers.insert(0, "desktop.lib.fs.s3.core.upload.S3ConnectorUploadHandler")
+  elif is_s3_enabled():
+    # Use Legacy AWS upload handler (old system with boto2)
+    file_upload_handlers.insert(0, "aws.s3.upload.S3FileUploadHandler")
 
 
   if is_gs_enabled():
   if is_gs_enabled():
     file_upload_handlers.insert(0, 'desktop.lib.fs.gc.upload.GSFileUploadHandler')
     file_upload_handlers.insert(0, 'desktop.lib.fs.gc.upload.GSFileUploadHandler')

+ 40 - 32
desktop/libs/aws/src/aws/tests.py

@@ -14,18 +14,15 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 import logging
 import logging
-import unittest
+from unittest.mock import patch
 
 
-from aws import conf
-from aws.client import Client, get_credential_provider
 from django.test import TestCase
 from django.test import TestCase
 
 
-from desktop.lib.fsmanager import get_client, clear_cache
+from aws import conf
+from aws.client import Client, get_credential_provider
+from desktop.conf import RAZ, USE_STORAGE_CONNECTORS
+from desktop.lib.fsmanager import clear_cache, get_client
 from desktop.lib.python_util import current_ms_from_utc
 from desktop.lib.python_util import current_ms_from_utc
-from desktop.conf import RAZ
-
-from unittest.mock import patch
-
 
 
 LOG = logging.getLogger()
 LOG = logging.getLogger()
 
 
@@ -33,7 +30,10 @@ LOG = logging.getLogger()
 class TestAWS(TestCase):
 class TestAWS(TestCase):
   def test_with_credentials(self):
   def test_with_credentials(self):
     try:
     try:
-      finish = conf.AWS_ACCOUNTS.set_for_testing({'default': {'access_key_id': 'access_key_id', 'secret_access_key': 'secret_access_key'}})
+      finish = [
+        USE_STORAGE_CONNECTORS.set_for_testing(False),
+        conf.AWS_ACCOUNTS.set_for_testing({"default": {"access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}}),
+      ]
       with patch('aws.client.conf_idbroker.get_conf') as get_conf:
       with patch('aws.client.conf_idbroker.get_conf') as get_conf:
         with patch('aws.client.Client.get_s3_connection'):
         with patch('aws.client.Client.get_s3_connection'):
           get_conf.return_value = {}
           get_conf.return_value = {}
@@ -42,16 +42,19 @@ class TestAWS(TestCase):
 
 
           provider = get_credential_provider('default', 'hue')
           provider = get_credential_provider('default', 'hue')
           assert provider.get_credentials().get('AccessKeyId') == conf.AWS_ACCOUNTS['default'].ACCESS_KEY_ID.get()
           assert provider.get_credentials().get('AccessKeyId') == conf.AWS_ACCOUNTS['default'].ACCESS_KEY_ID.get()
-          assert client1 == client2 # Should be the same as no support for user based client with credentials & no Expiration
+          assert client1 == client2  # Should be the same as no support for user based client with credentials & no Expiration
     finally:
     finally:
-      finish()
+      for f in finish:
+        f()
       clear_cache()
       clear_cache()
       conf.clear_cache()
       conf.clear_cache()
 
 
-
   def test_with_idbroker(self):
   def test_with_idbroker(self):
     try:
     try:
-      finish = conf.AWS_ACCOUNTS.set_for_testing({}) # Set empty to test when no configs are set
+      finish = [
+        USE_STORAGE_CONNECTORS.set_for_testing(False),
+        conf.AWS_ACCOUNTS.set_for_testing({}),
+      ]  # Set empty to test when no configs are set
       with patch('aws.client.conf_idbroker.get_conf') as get_conf:
       with patch('aws.client.conf_idbroker.get_conf') as get_conf:
         with patch('aws.client.conf_idbroker.get_cab_address') as get_cab_address:
         with patch('aws.client.conf_idbroker.get_cab_address') as get_cab_address:
           with patch('aws.client.Client.get_s3_connection'):
           with patch('aws.client.Client.get_s3_connection'):
@@ -72,26 +75,29 @@ class TestAWS(TestCase):
                 client1 = get_client(name='default', fs='s3a', user='hue')
                 client1 = get_client(name='default', fs='s3a', user='hue')
                 client2 = get_client(name='default', fs='s3a', user='hue')
                 client2 = get_client(name='default', fs='s3a', user='hue')
 
 
-                assert client1 != client2 # Test that with Expiration 0 clients not equal
+                assert client1 != client2  # Test that with Expiration 0 clients not equal
 
 
                 get_cab.return_value = {
                 get_cab.return_value = {
-                  'Credentials': {'AccessKeyId': 'AccessKeyId', 'Expiration': int(current_ms_from_utc()) + 10*1000}
+                  'Credentials': {'AccessKeyId': 'AccessKeyId', 'Expiration': int(current_ms_from_utc()) + 10 * 1000}
                 }
                 }
                 client3 = get_client(name='default', fs='s3a', user='hue')
                 client3 = get_client(name='default', fs='s3a', user='hue')
                 client4 = get_client(name='default', fs='s3a', user='hue')
                 client4 = get_client(name='default', fs='s3a', user='hue')
                 client5 = get_client(name='default', fs='s3a', user='test')
                 client5 = get_client(name='default', fs='s3a', user='test')
 
 
-                assert client3 == client4 # Test that with 10 sec expiration, clients equal
-                assert client4 != client5 # Test different user have different clients
+                assert client3 == client4  # Test that with 10 sec expiration, clients equal
+                assert client4 != client5  # Test different user have different clients
     finally:
     finally:
-      finish()
+      for f in finish:
+        f()
       clear_cache()
       clear_cache()
       conf.clear_cache()
       conf.clear_cache()
 
 
-
   def test_with_idbroker_and_config(self):
   def test_with_idbroker_and_config(self):
     try:
     try:
-      finish = conf.AWS_ACCOUNTS.set_for_testing({'default': {'region': 'ap-northeast-1'}})
+      finish = [
+        USE_STORAGE_CONNECTORS.set_for_testing(False),
+        conf.AWS_ACCOUNTS.set_for_testing({"default": {"region": "ap-northeast-1"}}),
+      ]
       with patch('aws.client.conf_idbroker.get_conf') as get_conf:
       with patch('aws.client.conf_idbroker.get_conf') as get_conf:
         with patch('aws.client.conf_idbroker.get_cab_address') as get_cab_address:
         with patch('aws.client.conf_idbroker.get_cab_address') as get_cab_address:
           with patch('aws.client.Client.get_s3_connection'):
           with patch('aws.client.Client.get_s3_connection'):
@@ -112,14 +118,17 @@ class TestAWS(TestCase):
                 client = Client.from_config(conf.AWS_ACCOUNTS['default'], get_credential_provider('default', 'hue'))
                 client = Client.from_config(conf.AWS_ACCOUNTS['default'], get_credential_provider('default', 'hue'))
                 assert client._region == 'ap-northeast-1'
                 assert client._region == 'ap-northeast-1'
     finally:
     finally:
-      finish()
+      for f in finish:
+        f()
       clear_cache()
       clear_cache()
       conf.clear_cache()
       conf.clear_cache()
 
 
-
   def test_with_idbroker_on_ec2(self):
   def test_with_idbroker_on_ec2(self):
     try:
     try:
-      finish = conf.AWS_ACCOUNTS.set_for_testing({}) # Set empty to test when no configs are set
+      finish = [
+        USE_STORAGE_CONNECTORS.set_for_testing(False),
+        conf.AWS_ACCOUNTS.set_for_testing({}),
+      ]  # Set empty to test when no configs are set
       with patch('aws.client.aws_conf.get_region') as get_region:
       with patch('aws.client.aws_conf.get_region') as get_region:
         with patch('aws.client.conf_idbroker.get_conf') as get_conf:
         with patch('aws.client.conf_idbroker.get_conf') as get_conf:
           with patch('aws.client.conf_idbroker.get_cab_address') as get_cab_address:
           with patch('aws.client.conf_idbroker.get_cab_address') as get_cab_address:
@@ -137,22 +146,21 @@ class TestAWS(TestCase):
                   has_iam_metadata.return_value = True
                   has_iam_metadata.return_value = True
                   client = Client.from_config(None, get_credential_provider('default', 'hue'))
                   client = Client.from_config(None, get_credential_provider('default', 'hue'))
 
 
-                  assert client._region == 'us-west-1' # Test different user have different clients
+                  assert client._region == 'us-west-1'  # Test different user have different clients
     finally:
     finally:
-      finish()
+      for f in finish:
+        f()
       clear_cache()
       clear_cache()
       conf.clear_cache()
       conf.clear_cache()
 
 
-
   def test_with_raz_enabled(self):
   def test_with_raz_enabled(self):
-    with patch('aws.client.RazS3Connection') as raz_s3_connection:
+    with patch('aws.client.RazS3Connection'):
       resets = [
       resets = [
         RAZ.IS_ENABLED.set_for_testing(True),
         RAZ.IS_ENABLED.set_for_testing(True),
-        conf.AWS_ACCOUNTS.set_for_testing({'default': {
-          'region': 'us-west-2',
-          'host': 's3-us-west-2.amazonaws.com',
-          'allow_environment_credentials': 'false'
-        }})
+        USE_STORAGE_CONNECTORS.set_for_testing(False),
+        conf.AWS_ACCOUNTS.set_for_testing(
+          {"default": {"region": "us-west-2", "host": "s3-us-west-2.amazonaws.com", "allow_environment_credentials": "false"}}
+        ),
       ]
       ]
 
 
       try:
       try: