Răsfoiți Sursa

HUE-8978 [fb] First commit for Google Storage support.

Uses the boto library to operate on Google Storage. Only tested method
is s3fs.listdir_stats via
desktop.lib.fs.gc.tests:TestGCS.test_with_credentials. Other methods
needs to be modified to support both S3 & GS.
Only authentication that was testing was json_credentials provided for a
service account.
https://console.cloud.google.com/iam-admin/serviceaccounts

Authentication is handled by the boto library, but pluggable via
desktop.lib.fs.gc.client:OAuth2JsonServiceAccountClientAuth. We had to
create our own authentication plugin, because the provided one in
gcs_oauth2_boto_plugin.oauth2_plugin require fetching credentials via
files and this won't work with IDBroker. Current IDBroker code is not
tested.

Instead of creating a desktop/libs which comes with a bunch of problems,
added this lib in desktop.lib.fs.
Authentication via IAM should also be supported, but not tested.
Jean-Francois Desjeans Gauthier 6 ani în urmă
părinte
comite
a0d7cd1ea8

+ 3 - 1
apps/filebrowser/src/filebrowser/settings.py

@@ -23,10 +23,12 @@ MENU_INDEX = 20
 
 from aws.conf import PERMISSION_ACTION_S3
 from azure.conf import PERMISSION_ACTION_ADLS, PERMISSION_ACTION_ABFS
+from desktop.conf import PERMISSION_ACTION_GS
 
 
 PERMISSION_ACTIONS = (
   (PERMISSION_ACTION_S3, "Access to S3 from filebrowser and filepicker."),
   (PERMISSION_ACTION_ADLS, "Access to ADLS from filebrowser and filepicker."),
-  (PERMISSION_ACTION_ABFS, "Access to ABFS from filebrowser and filepicker.")
+  (PERMISSION_ACTION_ABFS, "Access to ABFS from filebrowser and filepicker."),
+  (PERMISSION_ACTION_GS,  "Access to GS from filebrowser and filepicker.")
 )

+ 1 - 0
apps/useradmin/src/useradmin/models.py

@@ -281,6 +281,7 @@ def update_app_permissions(**kwargs):
            not (new_dp.app == 'hbase' and new_dp.action == 'write') and \
            not (new_dp.app == 'security' and new_dp.action == 'impersonate') and \
            not (new_dp.app == 'filebrowser' and new_dp.action == 's3_access' and not is_idbroker_enabled('s3a')) and \
+           not (new_dp.app == 'filebrowser' and new_dp.action == 'gs_access' and not is_idbroker_enabled('gs')) and \
            not (new_dp.app == 'filebrowser' and new_dp.action == 'adls_access') and \
            not (new_dp.app == 'filebrowser' and new_dp.action == 'abfs_access') and \
            not (new_dp.app == 'oozie' and new_dp.action == 'disable_editor_access'):

+ 7 - 0
desktop/conf.dist/hue.ini

@@ -845,6 +845,13 @@
    #   }
    # ]
 
+   # Settings for the Google Cloud lib
+   # ------------------------------------------------------------------------
+   [[gc_accounts]]
+      [[[default]]]
+        # The JSON credentials to authenticate to Google Cloud e.g. '{ "type": "service_account", "project_id": .... }'
+        # json_credentials=None
+
 ###########################################################################
 # Settings to configure the snippets available in the Notebook
 ###########################################################################

+ 6 - 0
desktop/conf/pseudo-distributed.ini.tmpl

@@ -829,6 +829,12 @@
    # Django cache to use to store temporarily used data during query execution. This is in addition to result_file_storage and result_backend.
    ## execution_storage='{"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "celery-hue"}'
 
+   # Settings for the Google Cloud lib
+   # ------------------------------------------------------------------------
+   [[gc_accounts]]
+      [[[default]]]
+        # The JSON credentials to authenticate to Google Cloud e.g. '{ "type": "service_account", "project_id": .... }'
+        # json_credentials=None
 
 ###########################################################################
 # Settings to configure the snippets available in the Notebook

+ 25 - 0
desktop/core/src/desktop/conf.py

@@ -2128,3 +2128,28 @@ def get_ldap_bind_password(ldap_config):
     password = ldap_config.BIND_PASSWORD_SCRIPT.get()
 
   return password
+
+PERMISSION_ACTION_GS = "gs_access"
+
+GC_ACCOUNTS = UnspecifiedConfigSection(
+  'gc_accounts',
+  help=_('One entry for each GC account'),
+  each=ConfigSection(
+    help=_('Information about single GC account'),
+    members=dict(
+      JSON_CREDENTIALS=Config(
+        key='json_credentials',
+        type=str,
+        default=None,
+      )
+    )
+  )
+)
+
+def is_gs_enabled():
+  from desktop.lib.idbroker import conf as conf_idbroker # Circular dependencies  desktop.conf -> idbroker.conf -> desktop.conf
+  return ('default' in list(GC_ACCOUNTS.keys()) and GC_ACCOUNTS['default'].JSON_CREDENTIALS.get()) or conf_idbroker.is_idbroker_enabled('gs')
+
+def has_gs_access(user):
+  from desktop.auth.backend import is_admin
+  return user.is_authenticated() and user.is_active and (is_admin(user) or user.has_hue_permission(action="gs_access", app="filebrowser"))

+ 17 - 0
desktop/core/src/desktop/lib/fs/gc/__init__.py

@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+

+ 179 - 0
desktop/core/src/desktop/lib/fs/gc/client.py

@@ -0,0 +1,179 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import boto
+import datetime
+import logging
+import gcs_oauth2_boto_plugin
+import json
+
+from aws.s3.s3fs import S3FileSystem
+from boto.gs.bucket import Bucket
+from boto.gs.connection import GSConnection
+from boto.provider import Provider
+from boto.s3.connection import SubdomainCallingFormat
+
+from desktop import conf
+from desktop.conf import DEFAULT_USER
+from desktop.lib.idbroker import conf as conf_idbroker
+from desktop.lib.idbroker.client import IDBroker
+
+LOG = logging.getLogger(__name__)
+
+CLIENT_CACHE = None
+
+_DEFAULT_USER = DEFAULT_USER.get()
+
+# FIXME: Should we check hue principal for the default user?
+def _get_cache_key(identifier='default', user=_DEFAULT_USER): # FIXME: Caching via username has issues when users get deleted. Need to switch to userid, but bigger change
+  return identifier + ':' + user
+
+
+def clear_cache():
+  global CLIENT_CACHE
+  CLIENT_CACHE = None
+
+
+def current_ms_from_utc():
+  return (datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000
+
+
+def get_client(identifier='default', user=_DEFAULT_USER):
+  global CLIENT_CACHE
+  _init_clients()
+
+  cache_key = _get_cache_key(identifier, user) if conf_idbroker.is_idbroker_enabled('gs') else _get_cache_key(identifier) # We don't want to cache by username when IDBroker not enabled
+  client = CLIENT_CACHE.get(cache_key)
+
+  if client and (client.expiration is None or client.expiration > int(current_ms_from_utc())): # expiration from IDBroker returns java timestamp in MS
+    return client
+  else:
+    client = _make_client(identifier, user)
+    CLIENT_CACHE[cache_key] = client
+    return client
+
+def get_credential_provider(config=None, user=_DEFAULT_USER):
+  return CredentialProviderIDBroker(IDBroker.from_core_site('gs', user)) if conf_idbroker.is_idbroker_enabled('gs') else CredentialProviderConf(config)
+
+
+def _init_clients():
+  global CLIENT_CACHE
+  if CLIENT_CACHE is not None:
+    return
+  CLIENT_CACHE = {} # Can't convert this to django cache, because S3FileSystem is not pickable
+
+def _make_client(identifier, user=_DEFAULT_USER):
+  config = conf.GC_ACCOUNTS[identifier] if identifier in list(conf.GC_ACCOUNTS.keys()) else None
+  client = Client.from_config(config, get_credential_provider(config, user))
+  return S3FileSystem(client.get_s3_connection(), client.expiration, headers={"x-goog-project-id": client.project}, filebrowser_action=conf.PERMISSION_ACTION_GS) # It would be nice if the connection is lazy loaded
+
+
+class Client(object):
+  def __init__(self, json_credentials=None, expiration=None):
+    self.project = json_credentials.get('project_id') if json_credentials else None
+    self.json_credentials = json_credentials
+    self.expiration = expiration
+
+  @classmethod
+  def from_config(cls, config, credential_provider):
+    credentials = credential_provider.get_credentials()
+    return Client(json_credentials=credentials.get('JsonCredentials'), expiration=credentials.get('Expiration', 0))
+
+  def get_s3_connection(self):
+    return HueGSConnection(provider=HueProvider('google', json_credentials=self.json_credentials))
+
+
+# Boto looks at subclasses of boto.auth_handler.AuthHandler and checks if they can authenticate
+# The subclasses provided by gcs_oauth2_boto_plugin.oauth2_plugin are designed to work with files, but we want to programmatically configure the auth
+class OAuth2JsonServiceAccountClientAuth(boto.auth_handler.AuthHandler):
+  """AuthHandler for working with OAuth2 service account credentials."""
+
+  capability = ['google-oauth2', 's3']
+
+  def __init__(self, path, config, provider):
+    if (provider.name == 'google'):
+      self.oauth2_client = gcs_oauth2_boto_plugin.oauth2_client.OAuth2JsonServiceAccountClient(provider.get_json_credentials())
+      global IS_SERVICE_ACCOUNT
+      IS_SERVICE_ACCOUNT = True
+    else:
+      raise boto.auth_handler.NotReadyToAuthenticate()
+
+  def add_auth(self, http_request):
+    http_request.headers['Authorization'] = (
+        self.oauth2_client.GetAuthorizationHeader())
+
+class HueProvider(Provider):
+  def __init__(self, name, json_credentials=None, access_key=None, secret_key=None,
+                 security_token=None, profile_name=None):
+    self.json_credentials = json_credentials
+    super(HueProvider, self).__init__(name, access_key=access_key, secret_key=secret_key,
+                 security_token=security_token, profile_name=profile_name)
+
+  def get_json_credentials(self):
+    return self.json_credentials
+
+#Custom GSConnection to be able to add our own credential provider. This is missing on GSConnection, but not S3Connection
+class HueGSConnection(GSConnection):
+  def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
+                 is_secure=True, port=None, proxy=None, proxy_port=None,
+                 proxy_user=None, proxy_pass=None,
+                 host=GSConnection.DefaultHost, debug=0, https_connection_factory=None,
+                 calling_format=SubdomainCallingFormat(), path='/',
+                 suppress_consec_slashes=True, provider="google"):
+        super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key,
+                 is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
+                 host, debug, https_connection_factory, calling_format, path,
+                 provider, Bucket,
+                 suppress_consec_slashes=suppress_consec_slashes)
+
+
+class CredentialProviderConf(object):
+  def __init__(self, conf):
+    self._conf=conf
+
+  def validate(self):
+    credentials = self.get_credentials()
+    if credentials.get('JsonCredentials') and not credentials.get('AllowEnvironmentCredentials') and not credentials.get('HasIamMetadata'):
+      raise ValueError('Can\'t create GS client, credential is not configured')
+    return True
+
+  def get_credentials(self):
+    if self._conf:
+      return {
+        'JsonCredentials': json.loads(self._conf.JSON_CREDENTIALS.get()),
+        'AllowEnvironmentCredentials': False,
+        'HasIamMetadata': False
+      }
+    else:
+      return {
+        'JsonCredentials': None,
+        'AllowEnvironmentCredentials': False,
+        'HasIamMetadata': False
+      }
+
+
+class CredentialProviderIDBroker(object):
+  def __init__(self, idbroker):
+    self.idbroker=idbroker
+    self.credentials = None
+
+  def validate(self):
+    return True # Already been validated in config
+
+  def get_credentials(self):
+    return self.idbroker.get_cab().get('Credentials')
+

+ 41 - 0
desktop/core/src/desktop/lib/fs/gc/tests.py

@@ -0,0 +1,41 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import logging
+import unittest
+
+from mock import patch, Mock
+from nose.plugins.skip import SkipTest
+from nose.tools import assert_equal, assert_true, assert_not_equal
+
+from desktop.conf import is_gs_enabled
+from desktop.lib.fs.gc.client import get_client
+
+
+LOG = logging.getLogger(__name__)
+
+
+class TestGCS(unittest.TestCase):
+  def setUp(self):
+    if not is_gs_enabled():
+      raise SkipTest('gs not enabled')
+
+  def test_with_credentials(self):
+    # Simple test that makes sure no errors are thrown. 
+    client = get_client()
+    buckets = client.listdir_stats('gs://')
+    LOG.info(len(buckets))

+ 22 - 8
desktop/core/src/desktop/lib/fs/proxyfs_test.py

@@ -36,8 +36,8 @@ def test_fs_selection():
   with patch('desktop.lib.fs.ProxyFS._has_access') as _has_access:
     _has_access.return_value = True
 
-    s3fs, adls, hdfs, abfs = MagicMock(), MagicMock(), MagicMock(), MagicMock()
-    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
+    s3fs, adls, hdfs, abfs, gs = MagicMock(), MagicMock(), MagicMock(), MagicMock(), MagicMock()
+    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs), 'gs': wrapper(gs)}, 'hdfs')
     proxy_fs.setuser(user)
 
     proxy_fs.isdir('s3a://bucket/key')
@@ -56,6 +56,10 @@ def test_fs_selection():
     abfs.isdir.assert_called_once_with('abfs://net/key')
     assert_false(hdfs.isdir.called)
 
+    proxy_fs.isdir('gs://net/key')
+    gs.isdir.assert_called_once_with('gs://net/key')
+    assert_false(hdfs.isdir.called)
+
     assert_raises(IOError, proxy_fs.stats, 'ftp://host')
 
 def wrapper(mock):
@@ -71,8 +75,8 @@ def test_multi_fs_selection():
   with patch('desktop.lib.fs.ProxyFS._has_access') as _has_access:
     _has_access.return_value = True
 
-    s3fs, adls, hdfs, abfs = MagicMock(), MagicMock(), MagicMock(), MagicMock()
-    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
+    s3fs, adls, hdfs, abfs, gs = MagicMock(), MagicMock(), MagicMock(), MagicMock(), MagicMock()
+    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs), 'gs': wrapper(gs)}, 'hdfs')
     proxy_fs.setuser(user)
 
     proxy_fs.copy('s3a://bucket1/key', 's3a://bucket2/key')
@@ -95,6 +99,10 @@ def test_multi_fs_selection():
     hdfs.rename.assert_called_once_with('/tmp/file', 'shmile')
     assert_false(s3fs.rename.called)
 
+    proxy_fs.copyfile('gs://bucket/key', 'key2')
+    gs.copyfile.assert_called_once_with('gs://bucket/key', 'key2')
+    assert_false(hdfs.copyfile.called)
+
     # Will be addressed in HUE-2934
     assert_raises(NotImplementedError, proxy_fs.copy_remote_dir, 's3a://bucket/key', 'adl://tmp/dir') # Exception can only be thrown if scheme is specified, else default to 1st scheme
 
@@ -122,8 +130,8 @@ class TestFsPermissions(object):
     user_client = make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
     user = User.objects.get(username='test')
 
-    s3fs, adls, hdfs, abfs = MockFs("s3_access"), MockFs("adls_access"), MockFs(), MockFs("abfs_access")
-    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
+    s3fs, adls, hdfs, abfs, gs = MockFs("s3_access"), MockFs("adls_access"), MockFs(), MockFs("abfs_access"), MockFs("gs_access")
+    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs), 'gs': wrapper(gs)}, 'hdfs')
     proxy_fs.setuser(user)
 
     f = proxy_fs._get_fs
@@ -131,6 +139,7 @@ class TestFsPermissions(object):
     remove_from_group(user.username, 'has_s3')
     remove_from_group(user.username, 'has_adls')
     remove_from_group(user.username, 'has_abfs')
+    remove_from_group(user.username, 'has_gs')
 
     # No perms by default
     assert_raises(Exception, f, 's3a://bucket')
@@ -138,6 +147,7 @@ class TestFsPermissions(object):
     assert_raises(Exception, f, 'adl://net/key')
     assert_raises(Exception, f, 'adl:/key')
     assert_raises(Exception, f, 'abfs:/key')
+    assert_raises(Exception, f, 'gs://bucket/key')
     f('hdfs://path')
     f('/tmp')
 
@@ -146,6 +156,7 @@ class TestFsPermissions(object):
       add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
       add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
       add_permission('test', 'has_abfs', permname='abfs_access', appname='filebrowser')
+      add_permission('test', 'has_gs', permname='gs_access', appname='filebrowser')
 
       f('s3a://bucket')
       f('S3A://bucket/key')
@@ -154,17 +165,19 @@ class TestFsPermissions(object):
       f('abfs:/key')
       f('hdfs://path')
       f('/tmp')
+      f('gs://bucket')
     finally:
       remove_from_group(user.username, 'has_s3')
       remove_from_group(user.username, 'has_adls')
       remove_from_group(user.username, 'has_abfs')
+      remove_from_group(user.username, 'has_gs')
 
   def test_fs_permissions_admin_user(self):
     user_client = make_logged_in_client(username='admin', groupname='default', recreate=True, is_superuser=True)
     user = User.objects.get(username='admin')
 
-    s3fs, adls, hdfs, abfs = MockFs("s3_access"), MockFs("adls_access"), MockFs(), MockFs("abfs_access")
-    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
+    s3fs, adls, hdfs, abfs, gs = MockFs("s3_access"), MockFs("adls_access"), MockFs(), MockFs("abfs_access"), MockFs("gs_access")
+    proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs), 'gs': wrapper(gs)}, 'hdfs')
     proxy_fs.setuser(user)
 
     f = proxy_fs._get_fs
@@ -176,3 +189,4 @@ class TestFsPermissions(object):
     f('abfs:/key')
     f('hdfs://path')
     f('/tmp')
+    f('gs://bucket/key')

+ 14 - 2
desktop/core/src/desktop/lib/fsmanager.py

@@ -24,11 +24,17 @@ import azure.client
 
 from aws.conf import is_enabled as is_s3_enabled, has_s3_access
 from azure.conf import is_adls_enabled, is_abfs_enabled, has_adls_access, has_abfs_access
+
+from desktop.lib.fs.proxyfs import ProxyFS
+from desktop.conf import is_gs_enabled, has_gs_access
+from desktop.lib.fs.gc.client import get_client as get_client_gs 
+
 from hadoop.cluster import get_hdfs
 from hadoop.conf import has_hdfs_enabled
-from desktop.lib.fs.proxyfs import ProxyFS
 
-SUPPORTED_FS = ['hdfs', 's3a', 'adl', 'abfs']
+
+
+SUPPORTED_FS = ['hdfs', 's3a', 'adl', 'abfs', 'gs']
 
 
 def has_access(fs=None, user=None):
@@ -40,6 +46,8 @@ def has_access(fs=None, user=None):
     return has_s3_access(user)
   elif fs == 'abfs':
     return has_abfs_access(user)
+  elif fs == 'gs':
+    return has_gs_access(user)
 
 
 def is_enabled(fs=None):
@@ -51,6 +59,8 @@ def is_enabled(fs=None):
     return is_s3_enabled()
   elif fs == 'abfs':
     return is_abfs_enabled()
+  elif fs == 'gs':
+    return is_gs_enabled()
 
 def is_enabled_and_has_access(fs=None, user=None):
   return is_enabled(fs) and has_access(fs, user)
@@ -64,6 +74,8 @@ def _get_client(fs=None):
     return azure.client.get_client
   elif fs == 'abfs':
     return azure.client.get_client_abfs
+  elif fs == 'gs':
+    return get_client_gs
   return None
 
 

+ 17 - 13
desktop/libs/aws/src/aws/s3/s3fs.py

@@ -23,6 +23,7 @@ import logging
 import os
 import posixpath
 import re
+from urlparse import urlparse
 import time
 
 from boto.exception import BotoClientError, S3ResponseError
@@ -74,17 +75,19 @@ def auth_error_handler(view_fn):
 
 
 class S3FileSystem(object):
-  def __init__(self, s3_connection, expiration=None):
+  def __init__(self, s3_connection, expiration=None, fs='s3a', headers=None, filebrowser_action=PERMISSION_ACTION_S3):
     self._s3_connection = s3_connection
-    self._filebrowser_action = PERMISSION_ACTION_S3
+    self._filebrowser_action = filebrowser_action
     self.user = None
     self.is_sentry_managed = lambda path: False
     self.superuser = None
     self.supergroup = None
     self.expiration = expiration
+    self.fs = fs
+    self.header_values = headers
 
   def _get_bucket(self, name):
-    return self._s3_connection.get_bucket(name)
+    return self._s3_connection.get_bucket(name, headers=self.header_values)
 
   def _get_or_create_bucket(self, name):
     try:
@@ -174,18 +177,18 @@ class S3FileSystem(object):
       raise S3FileSystemException(_('Failed to access path "%s": %s') % (path, e.message))
     if key is None:
       key = self._get_key(path, validate=False)
-    return self._stats_key(key)
+    return self._stats_key(key, self.fs)
 
   @staticmethod
-  def _stats_key(key):
+  def _stats_key(key, fs='s3a'):
     if key.size is not None:
       is_directory_name = not key.name or key.name[-1] == '/'
-      return S3Stat.from_key(key, is_dir=is_directory_name)
+      return S3Stat.from_key(key, is_dir=is_directory_name, fs=fs)
     else:
       key.name = S3FileSystem._append_separator(key.name)
       ls = key.bucket.get_all_keys(prefix=key.name, max_keys=1)
       if len(ls) > 0:
-        return S3Stat.from_key(key, is_dir=True)
+        return S3Stat.from_key(key, is_dir=True, fs=fs)
     return None
 
   @staticmethod
@@ -200,7 +203,8 @@ class S3FileSystem(object):
 
   @staticmethod
   def isroot(path):
-    return s3.is_root(path)
+    parsed = urlparse(path) 
+    return parsed.path == '/' or parsed.path == ''
 
   @staticmethod
   def join(*comp_list):
@@ -270,9 +274,9 @@ class S3FileSystem(object):
     if glob is not None:
       raise NotImplementedError(_("Option `glob` is not implemented"))
 
-    if s3.is_root(path):
+    if S3FileSystem.isroot(path):
       try:
-        return sorted([S3Stat.from_bucket(b) for b in self._s3_connection.get_all_buckets()], key=lambda x: x.name)
+        return sorted([S3Stat.from_bucket(b, self.fs) for b in self._s3_connection.get_all_buckets(headers=self.header_values)], key=lambda x: x.name)
       except S3FileSystemException as e:
         raise e
       except S3ResponseError as e:
@@ -284,13 +288,13 @@ class S3FileSystem(object):
     bucket = self._get_bucket(bucket_name)
     prefix = self._append_separator(prefix)
     res = []
-    for item in bucket.list(prefix=prefix, delimiter='/'):
+    for item in bucket.list(prefix=prefix, delimiter='/', headers=self.header_values):
       if isinstance(item, Prefix):
-        res.append(S3Stat.from_key(Key(item.bucket, item.name), is_dir=True))
+        res.append(S3Stat.from_key(Key(item.bucket, item.name), is_dir=True, fs=self.fs))
       else:
         if item.name == prefix:
           continue
-        res.append(self._stats_key(item))
+        res.append(self._stats_key(item, self.fs))
     return res
 
   @translate_s3_error

+ 5 - 5
desktop/libs/aws/src/aws/s3/s3stat.py

@@ -68,17 +68,17 @@ class S3Stat(object):
     return False
 
   @classmethod
-  def from_bucket(cls, bucket):
-    return cls(bucket.name, 's3a://%s' % bucket.name, True, 0, None)
+  def from_bucket(cls, bucket, fs='s3a'):
+    return cls(bucket.name, '%s://%s' % (fs, bucket.name), True, 0, None)
 
   @classmethod
-  def from_key(cls, key, is_dir=False):
+  def from_key(cls, key, is_dir=False, fs='s3a'):
     if key.name:
       name = posixpath.basename(key.name[:-1] if key.name[-1] == '/' else key.name)
-      path = 's3a://%s/%s' % (key.bucket.name, key.name)
+      path = '%s://%s/%s' % (fs, key.bucket.name, key.name)
     else:
       name = ''
-      path = 's3a://%s' % key.bucket.name
+      path = '%s://%s' % (fs, key.bucket.name)
 
     size = key.size or 0