Эх сурвалжийг харах

HUE-2930 [core] Add S3 filesystem

Ivan Orlov 10 жил өмнө
parent
commit
77115d3

+ 1 - 0
desktop/Makefile

@@ -40,6 +40,7 @@ DESKTOP_ROOT := $(realpath .)
 include $(ROOT)/Makefile.vars.priv
 
 APPS := core \
+	libs/aws \
 	libs/hadoop \
 	libs/indexer \
 	libs/liboauth \

+ 4 - 1
desktop/core/src/desktop/lib/fsmanager.py

@@ -20,6 +20,8 @@ from __future__ import absolute_import
 import sys
 import logging
 
+import aws
+
 from desktop.lib.fs import ProxyFS
 from hadoop import cluster
 
@@ -28,7 +30,8 @@ FS_CACHE = {}
 DEFAULT_SCHEMA = 'hdfs'
 
 FS_GETTERS = {
-  "hdfs": cluster.get_hdfs
+  "hdfs": cluster.get_hdfs,
+  "s3": aws.get_s3fs
 }
 
 

+ 34 - 0
desktop/libs/aws/Makefile

@@ -0,0 +1,34 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+ifeq ($(ROOT),)
+  $(error "Error: Expect the environment variable $$ROOT to point to the Desktop installation")
+endif
+
+include $(ROOT)/Makefile.sdk
+
+default::
+	@echo '  env-install    : Install into virtual-env'
+
+#
+# env-install
+#   Install app into the virtual environment.
+#
+.PHONY: env-install
+env-install: compile ext-env-install
+	@echo '--- Installing $(APP_NAME) into virtual-env'
+	@$(ENV_PYTHON) setup.py develop -N -q

+ 1 - 0
desktop/libs/aws/babel.cfg

@@ -0,0 +1 @@
+[python: src/aws/**.py]

+ 1 - 0
desktop/libs/aws/hueversion.py

@@ -0,0 +1 @@
+../../../VERSION

+ 29 - 0
desktop/libs/aws/setup.py

@@ -0,0 +1,29 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from setuptools import setup, find_packages
+from hueversion import VERSION
+
+setup(
+  name='aws',
+  version=VERSION,
+  url='http://github.com/cloudera/hue',
+  description='AmazonWebServices Libraries',
+  packages=find_packages('src'),
+  package_dir={'': 'src'},
+  install_requires=['setuptools', 'desktop', 'boto'],
+  entry_points={'desktop.sdk.lib': 'aws=aws'}
+)

+ 51 - 0
desktop/libs/aws/src/aws/__init__.py

@@ -0,0 +1,51 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import aws.s3
+
+from aws import conf
+from aws.client import Client
+from aws.s3.s3fs import S3FileSystem
+
+CLIENT_CACHE = None
+
+
+def get_client(identifier='default'):
+  global CLIENT_CACHE
+  _init_clients()
+  if identifier not in CLIENT_CACHE:
+    raise ValueError('Unknown AWS client: %s, check you configuration' % identifier)
+  return CLIENT_CACHE[identifier]
+
+
+def _init_clients():
+  global CLIENT_CACHE
+  if CLIENT_CACHE is not None:
+    return
+  CLIENT_CACHE = {}
+  for identifier in conf.AWS_ACCOUNTS.keys():
+    CLIENT_CACHE[identifier] = _make_client(identifier)
+
+
+def _make_client(identifier):
+  client_conf = conf.AWS_ACCOUNTS[identifier]
+  return Client.from_config(client_conf)
+
+
+def get_s3fs(identifier='default'):
+   connection = get_client(identifier).get_s3_connection()
+   return S3FileSystem(connection)

+ 49 - 0
desktop/libs/aws/src/aws/client.py

@@ -0,0 +1,49 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import boto
+import boto.s3
+
+
+class Client(object):
+  def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, region=None):
+    self._access_key_id = aws_access_key_id
+    self._secret_access_key = aws_secret_access_key
+    self._region = region
+
+  @classmethod
+  def from_config(cls, conf):
+    access_key_id = conf.ACCESS_KEY_ID.get()
+    secret_access_key = conf.SECRET_ACCESS_KEY.get()
+    env_cred_allowed = conf.ALLOW_ENVIRONMENT_CREDENTIALS.get()
+
+    if None in (access_key_id, secret_access_key) and not env_cred_allowed:
+      raise ValueError('Can\'t create AWS client, credential is not configured')
+
+    return cls(
+      aws_access_key_id=conf.ACCESS_KEY_ID.get(),
+      aws_secret_access_key=conf.SECRET_ACCESS_KEY.get(),
+      region=conf.REGION.get()
+    )
+
+  def get_s3_connection(self):
+    connection = boto.s3.connect_to_region(self._region,
+                                           aws_access_key_id=self._access_key_id,
+                                           aws_secret_access_key=self._secret_access_key)
+    if connection is None:
+      raise ValueError('Can not construct S3 Connection for region %s' % self._region)
+    return connection

+ 70 - 0
desktop/libs/aws/src/aws/conf.py

@@ -0,0 +1,70 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+from boto.regioninfo import get_regions
+
+from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, coerce_bool
+
+
+AWS_ACCOUNTS = UnspecifiedConfigSection(
+  'aws_accounts',
+  help='One entry for each AWS account',
+  each=ConfigSection(
+    help='Information about single AWS account',
+    members=dict(
+      ACCESS_KEY_ID=Config(
+        key='access_key_id',
+        type=str,
+        private=True
+      ),
+      SECRET_ACCESS_KEY=Config(
+        key='secret_access_key',
+        type=str,
+        private=True
+      ),
+      ALLOW_ENVIRONMENT_CREDENTIALS=Config(
+        help='Allow to use environment sources of credentials (environment variables, EC2 profile).',
+        key='allow_environment_credentials',
+        default=True,
+        type=coerce_bool
+      ),
+      REGION=Config(
+        key='region',
+        default='us-east-1',
+        type=str
+      )
+    )
+  )
+)
+
+
+def config_validator(user):
+  res = []
+
+  if 'default' not in AWS_ACCOUNTS.keys():
+    res.append(('aws.aws_accounts', 'Default AWS account is not configured'))
+
+  regions = get_regions('s3')  # S3 is only supported service so far
+  region_names = [r.name for r in regions]
+
+  for name in AWS_ACCOUNTS.keys():
+    region_name = AWS_ACCOUNTS[name].REGION.get()
+    if region_name not in region_names:
+      res.append(('aws.aws_accounts.%s.region' % name,
+                  'Unknown region %s' % region_name))
+
+  return res

+ 121 - 0
desktop/libs/aws/src/aws/s3/__init__.py

@@ -0,0 +1,121 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import calendar
+import errno
+import logging
+import posixpath
+import re
+import sys
+import time
+
+from functools import wraps
+from boto.exception import S3ResponseError
+
+from desktop.lib.fs import utils as fs_utils
+
+ERRNO_MAP = {
+  403: errno.EACCES,
+  404: errno.ENOENT
+}
+DEFAULT_ERRNO = errno.EINVAL
+
+S3_PATH_RE = re.compile('^/*[sS]3://([^/]+)(/(.*?([^/]+)?/?))?$')
+S3_ROOT = 's3://'
+
+
+def lookup_s3error(error):
+  err_no = ERRNO_MAP.get(error.status, DEFAULT_ERRNO)
+  return IOError(err_no, error.reason)
+
+
+def translate_s3_error(fn):
+  @wraps(fn)
+  def wrapped(*args, **kwargs):
+    try:
+      return fn(*args, **kwargs)
+    except S3ResponseError:
+      _, exc, tb = sys.exc_info()
+      logging.error('S3 error: %s' % exc)
+      lookup = lookup_s3error(exc)
+      raise lookup.__class__, lookup, tb
+  return wrapped
+
+
+def parse_uri(uri):
+  """
+  Returns tuple (bucket_name, key_name, key_basename).
+  Raises ValueError if invalid S3 URI is passed.
+  """
+  match = S3_PATH_RE.match(uri)
+  if not match:
+    raise ValueError("Invalid S3 URI: %s" % uri)
+  key = match.group(3) or ''
+  basename = match.group(4) or ''
+  return match.group(1), key, basename
+
+
+def is_root(uri):
+  """
+  Check if URI is S3 root (S3://)
+  """
+  return uri.lower() == S3_ROOT
+
+
+def abspath(cd, uri):
+  """
+  Returns absolute URI, examples:
+
+  abspath('s3://bucket/key', key2') == 's3://bucket/key/key2'
+  abspath('s3://bucket/key', 's3://bucket2/key2') == 's3://bucket2/key2'
+  """
+  if not uri.lower().startswith(S3_ROOT):
+    uri = fs_utils.normpath(join(cd, '..', uri))
+  return uri
+
+
+def join(*comp_list):
+  def _prep(uri):
+    try:
+      return '/%s/%s' % parse_uri(uri)[:2]
+    except ValueError:
+      return '/' if is_root(uri) else uri
+  joined = posixpath.join(*map(_prep, comp_list))
+  if joined and joined[0] == '/':
+    joined = 's3:/%s' % joined
+  return joined
+
+
+def s3datetime_to_timestamp(datetime):
+  """
+  Returns timestamp (seconds) by datetime string from S3 API responses.
+  S3 REST API returns two types of datetime strings:
+  * `Thu, 26 Feb 2015 20:42:07 GMT` for Object HEAD requests
+    (see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html);
+  * `2015-02-26T20:42:07.000Z` for Bucket GET requests
+    (see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html).
+  """
+  # There is chance (depends on platform) to get
+  # `'z' is a bad directive in format ...` error (see https://bugs.python.org/issue6641),
+  # but S3 always returns time in GMT, so `GMT` and `.000Z` can be pruned.
+  try:
+    stripped = time.strptime(datetime[:-4], '%a, %d %b %Y %H:%M:%S')
+    assert datetime[-4:] == ' GMT', 'Time [%s] is not in GMT.' % datetime
+  except ValueError:
+    stripped = time.strptime(datetime[:-5], '%Y-%m-%dT%H:%M:%S')
+    assert datetime[-5:] == '.000Z', 'Time [%s] is not in GMT.' % datetime
+  return int(calendar.timegm(stripped))

+ 69 - 0
desktop/libs/aws/src/aws/s3/s3_test.py

@@ -0,0 +1,69 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+from nose.tools import assert_raises, eq_
+
+from aws import s3
+
+
+def test_parse_uri():
+  p = s3.parse_uri
+
+  eq_(('bucket', 'folder/key', 'key'), p('s3://bucket/folder/key'))
+  eq_(('bucket', 'folder/key/', 'key'), p('s3://bucket/folder/key/'))
+  eq_(('bucket', 'folder/key/', 'key'), p('S3://bucket/folder/key/'))
+  eq_(('bucket', '', ''), p('s3://bucket'))
+  eq_(('bucket', '', ''), p('s3://bucket/'))
+
+  assert_raises(ValueError, p, '/local/path')
+  assert_raises(ValueError, p, 'ftp://ancient/archive')
+  assert_raises(ValueError, p, 's3:/missed/slash')
+  assert_raises(ValueError, p, 's3://')
+
+
+def test_join():
+  j = s3.join
+  eq_("s3://b", j("s3://", "b"))
+  eq_("s3://b/f", j("s3://b", "f"))
+  eq_("s3://b/f1/f2", j("s3://b", "f1", "f2"))
+  eq_("s3://b/f1/f2/../f3", j("s3://b/f1/f2", "../f3"))
+
+
+def test_abspath():
+  a = s3.abspath
+  eq_('s3://a/b/d', a('s3://a/b/c', 'd'))
+  eq_('s3://d', a('s3://a/b/c', 's3://d'))
+
+
+def test_is_root():
+  i = s3.is_root
+  eq_(True, i('s3://'))
+  eq_(True, i('S3://'))
+  eq_(False, i('s3:/'))
+  eq_(False, i('s3://bucket'))
+  eq_(False, i('/local/path'))
+
+
+def test_s3datetime_to_timestamp():
+  f = s3.s3datetime_to_timestamp
+  eq_(1424983327, f('Thu, 26 Feb 2015 20:42:07 GMT'))
+  eq_(1424983327, f('2015-02-26T20:42:07.000Z'))
+
+  assert_raises(ValueError, f, '2/26/2015 20:42:07')
+
+  assert_raises(AssertionError, f, 'Thu, 26 Feb 2015 20:42:07 PDT')
+  assert_raises(AssertionError, f, '2015-02-26T20:42:07.040Z')

+ 42 - 0
desktop/libs/aws/src/aws/s3/s3file.py

@@ -0,0 +1,42 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import errno
+
+from boto.s3.keyfile import KeyFile
+
+from aws.s3 import translate_s3_error
+
+DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
+
+
+def open(key, mode='r'):
+  if mode == 'r':
+    return _ReadableS3File(key)
+  else:
+    raise IOError(errno.EINVAL, 'Unavailable mode "%s"' % mode)
+
+
+class _ReadableS3File(KeyFile):
+  def __init__(self, key):
+      key_copy = key.bucket.get_key(key.name, validate=False)
+      KeyFile.__init__(self, key_copy)
+
+  @translate_s3_error
+  def read(self, length=DEFAULT_READ_SIZE):
+    return KeyFile.read(self, length)

+ 62 - 0
desktop/libs/aws/src/aws/s3/s3file_test.py

@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import os
+
+from nose.tools import eq_
+
+from aws.s3 import s3file
+from aws.s3.s3test_utils import S3TestBase
+
+
+QUOTE_EN = 'a journey of a thousand miles begins with a single step'
+QUOTE_CH = u'千里之行,始於足下'
+
+
+class S3FileTest(S3TestBase):
+  def test_basic_read(self):
+    path = self.get_test_path('test_basic_read.txt')
+    key = self.get_key(path)
+    with self.cleaning(path):
+      key.set_contents_from_string(QUOTE_EN)
+      eq_(QUOTE_EN, s3file.open(key, 'r').read())
+      eq_(QUOTE_EN[:4], s3file.open(key, 'r').read(length=4))
+
+  def test_unicode_read(self):
+    path = self.get_test_path('test_unicode_read.txt')
+    key = self.get_key(path)
+    with self.cleaning(path):
+      key.set_contents_from_string(QUOTE_CH)
+      eq_(QUOTE_CH.encode('utf-8'), s3file.open(key, 'r').read())
+      eq_(QUOTE_CH.encode('utf-8')[:4], s3file.open(key, 'r').read(length=4))
+
+  def test_seek(self):
+    path = self.get_test_path('test_seek.txt')
+    key = self.get_key(path)
+    with self.cleaning(path):
+      key.set_contents_from_string(QUOTE_EN)
+      f = s3file.open(key, 'r')
+      f.seek(0, os.SEEK_SET)
+      eq_(QUOTE_EN[:2], f.read(2))
+      f.seek(1, os.SEEK_SET)
+      eq_(QUOTE_EN[1:][:2], f.read(2))
+      f.seek(-1, os.SEEK_END)
+      eq_(QUOTE_EN[-1:], f.read())
+      f.seek(0, os.SEEK_SET)
+      f.seek(2, os.SEEK_CUR)
+      eq_(QUOTE_EN[2:][:2], f.read(2))

+ 328 - 0
desktop/libs/aws/src/aws/s3/s3fs.py

@@ -0,0 +1,328 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import sys
+
+import errno
+import itertools
+import logging
+import os
+import posixpath
+
+from boto.exception import S3ResponseError
+from boto.s3.key import Key
+from boto.s3.prefix import Prefix
+
+from aws import s3
+from aws.s3 import translate_s3_error, s3file
+from aws.s3.s3stat import S3Stat
+
+from desktop.lib.fs import utils as fs_utils
+
+DEFAULT_READ_SIZE = 1024 * 1024  # 1MB
+LOG = logging.getLogger(__name__)
+
+
+class S3FileSystem(object):
+  def __init__(self, s3_connection):
+    self._s3_connection = s3_connection
+    self._bucket_cache = None
+
+  def _init_bucket_cache(self):
+    if self._bucket_cache is None:
+      buckets = self._s3_connection.get_all_buckets()
+      self._bucket_cache = {}
+      for bucket in buckets:
+        self._bucket_cache[bucket.name] = bucket
+
+  def _get_bucket(self, name):
+    self._init_bucket_cache()
+    if name not in self._bucket_cache:
+      self._bucket_cache[name] = self._s3_connection.get_bucket(name)
+    return self._bucket_cache[name]
+
+  def _get_key(self, path, validate=True):
+    bucket_name, key_name = s3.parse_uri(path)[:2]
+    bucket = self._get_bucket(bucket_name)
+    try:
+      return bucket.get_key(key_name, validate=validate)
+    except:
+      e, exc, tb = sys.exc_info()
+      raise ValueError(e)
+
+  def _stats(self, path):
+    if s3.is_root(path):
+      return S3Stat.for_s3_root()
+
+    try:
+      key = self._get_key(path, validate=True)
+    except S3ResponseError as e:
+      if e.status == 404:
+        return None
+      else:
+        exc_class, exc, tb = sys.exc_info()
+        raise exc_class, exc, tb
+
+    if key is None:
+      key = self._get_key(path, validate=False)
+    return self._stats_key(key)
+
+  @staticmethod
+  def _stats_key(key):
+    if key.size is not None:
+      is_directory_name = not key.name or key.name[-1] == '/'
+      return S3Stat.from_key(key, is_dir=is_directory_name)
+    else:
+      key.name = S3FileSystem._append_separator(key.name)
+      ls = key.bucket.get_all_keys(prefix=key.name, max_keys=1)
+      if len(ls) > 0:
+        return S3Stat.from_key(key, is_dir=True)
+    return None
+
+  @staticmethod
+  def _append_separator(path):
+    if path and not path.endswith('/'):
+      path += '/'
+    return path
+
+  @staticmethod
+  def _cut_separator(path):
+    return path.endswith('/') and path[:-1] or path
+
+  @staticmethod
+  def isroot(path):
+    return s3.is_root(path)
+
+  @staticmethod
+  def join(*comp_list):
+    return s3.join(*comp_list)
+
+  @staticmethod
+  def normpath(path):
+    return fs_utils.normpath(path)
+
+  @translate_s3_error
+  def open(self, path, mode='r'):
+    key = self._get_key(path, validate=True)
+    if key is None:
+      raise IOError(errno.ENOENT, "No such file or directory: '%s'" % path)
+    return s3file.open(key, mode=mode)
+
+  @translate_s3_error
+  def read(self, path, offset, length):
+    fh = self.open(path, 'r')
+    fh.seek(offset, os.SEEK_SET)
+    return fh.read(length)
+
+  @translate_s3_error
+  def isfile(self, path):
+    stat = self._stats(path)
+    if stat is None:
+      return False
+    return not stat.isDir
+
+  @translate_s3_error
+  def isdir(self, path):
+    stat = self._stats(path)
+    if stat is None:
+      return False
+    return stat.isDir
+
+  @translate_s3_error
+  def exists(self, path):
+    return self._stats(path) is not None
+
+  @translate_s3_error
+  def stats(self, path):
+    path = fs_utils.normpath(path)
+    stats = self._stats(path)
+    if stats:
+      return stats
+    raise IOError(errno.ENOENT, "No such file or directory: '%s'" % path)
+
+  @translate_s3_error
+  def listdir_stats(self, path, glob=None):
+    if glob is not None:
+      raise NotImplementedError("Option `glob` is not implemented")
+
+    if s3.is_root(path):
+      self._init_bucket_cache()
+      return [S3Stat.from_bucket(b) for b in self._bucket_cache.values()]
+
+    bucket_name, prefix = s3.parse_uri(path)[:2]
+    bucket = self._get_bucket(bucket_name)
+    prefix = self._append_separator(prefix)
+    res = []
+    for item in bucket.list(prefix=prefix, delimiter='/'):
+      if isinstance(item, Prefix):
+        res.append(S3Stat.from_key(Key(item.bucket, item.name), is_dir=True))
+      else:
+        if item.name == prefix:
+          continue
+        res.append(self._stats_key(item))
+    return res
+
+  def listdir(self, path, glob=None):
+    return [s3.parse_uri(x.path)[2] for x in self.listdir_stats(path, glob)]
+
+  @translate_s3_error
+  def rmtree(self, path, skipTrash=False):
+    if not skipTrash:
+      raise NotImplementedError('Moving to trash is not implemented for S3')
+    key = self._get_key(path, validate=False)
+
+    if key.exists():
+      to_delete = iter([key])
+    else:
+      to_delete = iter([])
+
+    if self.isdir(path):
+      # add `/` to prevent removing of `s3://b/a_new` trying to remove `s3://b/a`
+      prefix = self._append_separator(key.name)
+      keys = key.bucket.list(prefix=prefix)
+      to_delete = itertools.chain(keys, to_delete)
+    result = key.bucket.delete_keys(to_delete)
+    if result.errors:
+      msg = "%d errors occurred during deleting '%s':\n%s" % (
+        len(result.errors),
+        '\n'.join(map(repr, result.errors)))
+      LOG.error(msg)
+      raise IOError(msg)
+
+  @translate_s3_error
+  def remove(self, path, skip_trash=False):
+    if not skip_trash:
+      raise NotImplementedError('Moving to trash is not implemented for S3')
+    key = self._get_key(path, validate=False)
+    key.bucket.delete_key(key.name)
+
+  def restore(self, *args, **kwargs):
+    raise NotImplementedError('Moving to trash is not implemented for S3')
+
+  @translate_s3_error
+  def mkdir(self, path, *args, **kwargs):
+    """
+    Creates a directory and any parent directory if necessary.
+
+    Actually it creates an empty object: s3://[bucket]/[path]/
+    """
+    stats = self._stats(path)
+    if stats:
+      if stats.isDir:
+        return None
+      else:
+        raise IOError(errno.ENOTDIR, "'%s' already exists and is not a directory" % path)
+    path = self._append_separator(path)  # folder-key should ends by /
+    self.create(path)  # create empty object
+
+  @translate_s3_error
+  def copy(self, src, dst, recursive=False, *args, **kwargs):
+    self._copy(src, dst, recursive=recursive, use_src_basename=True)
+
+  @translate_s3_error
+  def copyfile(self, src, dst, *args, **kwargs):
+    if self.isdir(dst):
+      raise IOError(errno.EINVAL, "Copy dst '%s' is a directory" % dst)
+    self._copy(src, dst, recursive=False, use_src_basename=False)
+
+  @translate_s3_error
+  def copy_remote_dir(self, src, dst, *args, **kwargs):
+    self._copy(src, dst, recursive=True, use_src_basename=False)
+
+  def _copy(self, src, dst, recursive, use_src_basename):
+    src_st = self.stats(src)
+    if src_st.isDir and not recursive:
+      return # omitting directory
+
+    dst = s3.abspath(src, dst)
+    dst_st = self._stats(dst)
+    if src_st.isDir and dst_st and not dst_st.isDir:
+      raise IOError(errno.EEXIST, "Cannot overwrite non-directory '%s' with directory '%s'" % (dst, src))
+
+    src_bucket, src_key = s3.parse_uri(src)[:2]
+    dst_bucket, dst_key = s3.parse_uri(dst)[:2]
+
+    keep_src_basename = use_src_basename and dst_st and dst_st.isDir
+    src_bucket = self._get_bucket(src_bucket)
+    dst_bucket = self._get_bucket(dst_bucket)
+
+    if keep_src_basename:
+      cut = len(posixpath.dirname(src_key))  # cut of an parent directory name
+      if cut:
+        cut += 1
+    else:
+      cut = len(src_key)
+      if not src_key.endswith('/'):
+        cut += 1
+
+    for key in src_bucket.list(prefix=src_key):
+      if not key.name.startswith(src_key):
+        raise RuntimeError("Invalid key to transform: %s" % key.name)
+      dst_name = posixpath.normpath(s3.join(dst_key, key.name[cut:]))
+      key.copy(dst_bucket, dst_name)
+
+  @translate_s3_error
+  def rename(self, old, new):
+    new = s3.abspath(old, new)
+    self.copy(old, new, recursive=True)
+    self.rmtree(old, skipTrash=True)
+
+  @translate_s3_error
+  def rename_star(self, old_dir, new_dir):
+    if not self.isdir(old_dir):
+      raise IOError(errno.ENOTDIR, "'%s' is not a directory" % old_dir)
+    if self.isfile(new_dir):
+      raise IOError(errno.ENOTDIR, "'%s' is not a directory" % new_dir)
+    ls = self.listdir(old_dir)
+    for entry in ls:
+      self.rename(s3.join(old_dir, entry), s3.join(new_dir, entry))
+
+  @translate_s3_error
+  def create(self, path, overwrite=False, data=None):
+    key = self._get_key(path, validate=False)
+    key.set_contents_from_string(data or '', replace=overwrite)
+
+  @translate_s3_error
+  def copyFromLocal(self, local_src, remote_dst, *args, **kwargs):
+    local_src = self._cut_separator(local_src)
+    remote_dst = self._cut_separator(remote_dst)
+
+    def _copy_file(src, dst):
+      key = self._get_key(dst, validate=False)
+      fp = open(src, 'r')
+      key.set_contents_from_file(fp)
+
+    if os.path.isdir(local_src):
+      for (local_dir, sub_dirs, files) in os.walk(local_src, followlinks=False):
+        remote_dir = local_dir.replace(local_src, remote_dst)
+
+        if not sub_dirs and not files:
+          self.mkdir(remote_dir)
+        else:
+          for file_name in files:
+            _copy_file(os.path.join(local_dir, file_name), os.path.join(remote_dir, file_name))
+    else:
+      file_name = os.path.split(local_src)[1]
+      if self.isdir(remote_dst):
+        remote_file = os.path.join(remote_dst, file_name)
+      else:
+        remote_file = remote_dst
+      _copy_file(local_src, remote_file)
+
+  def setuser(self, user):
+    pass  # user-concept doesn't have sense for this implementation

+ 229 - 0
desktop/libs/aws/src/aws/s3/s3fs_test.py

@@ -0,0 +1,229 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import os
+import tempfile
+import string
+
+from nose.tools import assert_true, assert_false, assert_raises, eq_
+
+from aws.s3 import join, parse_uri
+from aws.s3.s3fs import S3FileSystem
+from aws.s3.s3test_utils import S3TestBase, generate_id
+
+
+class S3FSTest(S3TestBase):
+  @classmethod
+  def setUpClass(cls):
+    S3TestBase.setUpClass()
+    if not cls.shouldSkip():
+      cls.fs = S3FileSystem(cls.s3_connection)
+
+  def test_open(self):
+    path = self.get_test_path('test_open.txt')
+
+    with self.cleaning(path):
+      assert_raises(IOError, self.fs.open, path)
+
+      key = self.get_key(path)
+      key.set_contents_from_string('Hello')
+
+      fh1 = self.fs.open(path)
+      eq_('He', fh1.read(length=2))
+
+      fh2 = self.fs.open(path, mode='r')
+      eq_('Hello', fh2.read())
+
+      eq_('llo', fh1.read())
+
+      assert_raises(Exception, self.fs.open, path, mode='w')
+      assert_raises(Exception, self.fs.open, path, mode='?r')
+
+  def test_read(self):
+    path = self.get_test_path('test_read.txt')
+    with self.cleaning(path):
+      key = self.get_key(path)
+      key.set_contents_from_string('Hello')
+
+      eq_('Hel', self.fs.read(path, 0, 3))
+      eq_('ell', self.fs.read(path, 1, 3))
+
+  def test_isfile(self):
+    pass
+
+  def test_isdir(self):
+    pass
+
+  def test_exists(self):
+    dir_path = self.get_test_path('test_exists')
+    file_path = join(dir_path, 'file')
+
+    assert_false(self.fs.exists(dir_path))
+    assert_false(self.fs.exists(file_path))
+
+    self.fs.create(file_path)
+
+    assert_true(self.fs.exists(dir_path))
+    assert_true(self.fs.exists(file_path))
+
+    assert_true(self.fs.exists('s3://%s' % self.bucket_name))
+    assert_true(self.fs.exists('s3://'))
+    fake_bucket = 'fake%s' % generate_id(8, string.ascii_lowercase + string.digits)
+    assert_false(self.fs.exists('s3://%s' % fake_bucket))
+
+  def test_stats(self):
+    assert_raises(ValueError, self.fs.stats, 'ftp://archive')
+    not_exists = self.get_test_path('does_not_exist')
+    assert_raises(IOError, self.fs.stats, not_exists)
+
+    root_stat = self.fs.stats('s3://')
+    eq_(True, root_stat.isDir)
+    eq_('s3://', root_stat.path)
+
+    bucket_stat = self.fs.stats('s3://%s' % self.bucket_name)
+    eq_(True, bucket_stat.isDir)
+    eq_('s3://%s' % self.bucket_name, bucket_stat.path)
+    
+  def test_copyfile(self):
+    src_path = self.get_test_path('test_copy_file_src')
+    dst_path = self.get_test_path('test_copy_file_dst')
+    with self.cleaning(src_path, dst_path):
+      data = "To boldly go where no one has gone before\n" * 2000
+      self.fs.create(src_path, data=data)
+      self.fs.create(dst_path, data="some initial data")
+
+      self.fs.copyfile(src_path, dst_path)
+      actual = self.fs.read(dst_path, 0, len(data) + 100)
+      eq_(data, actual)
+
+  def test_full_copy(self):
+    src_path = self.get_test_path('test_full_copy_src')
+    dst_path = self.get_test_path('test_full_copy_dst')
+
+    src_file_path = join(src_path, 'file.txt')
+    dst_file_path = join(dst_path, 'file.txt')
+
+    with self.cleaning(src_path, dst_path):
+      self.fs.mkdir(src_path)
+      self.fs.mkdir(dst_path)
+
+      data = "To boldly go where no one has gone before\n" * 2000
+      self.fs.create(src_file_path, data=data)
+
+      # File to directory copy.
+      self.fs.copy(src_file_path, dst_path)
+      assert_true(self.fs.exists(dst_file_path))
+
+      # Directory to directory copy.
+      self.fs.copy(src_path, dst_path, True)
+      base_name = parse_uri(src_path)[2]
+      dst_folder_path = join(dst_path, base_name)
+      assert_true(self.fs.exists(dst_folder_path))
+      assert_true(self.fs.exists(join(dst_folder_path, 'file.txt')))
+
+      # Copy directory to file should fail.
+      assert_raises(IOError, self.fs.copy, src_path, dst_file_path, True)
+
+  def test_copy_remote_dir(self):
+    src_dir = self.get_test_path('test_copy_remote_dir_src')
+    dst_dir = self.get_test_path('test_copy_remote_dir_dst')
+
+    with self.cleaning(src_dir, dst_dir):
+      self.fs.mkdir(src_dir)
+
+      self.fs.create(join(src_dir, 'file_one.txt'), data='foo')
+      self.fs.create(join(src_dir, 'file_two.txt'), data='bar')
+
+      self.fs.mkdir(dst_dir)
+      self.fs.copy_remote_dir(src_dir, dst_dir)
+
+      src_stat = self.fs.listdir_stats(src_dir)
+      dst_stat = self.fs.listdir_stats(dst_dir)
+
+      src_names = set([stat.name for stat in src_stat])
+      dst_names = set([stat.name for stat in dst_stat])
+      assert_true(src_names)
+      eq_(src_names, dst_names)
+
+  def test_copy_from_local(self):
+    src_name = 'test_copy_from_local_src'
+    src_path = os.path.join(tempfile.gettempdir(), src_name)
+    dst_path = self.get_test_path('test_copy_from_local_dst')
+
+    data = "To boldly go where no one has gone before\n" * 2000
+    f = open(src_path, 'w')
+    f.write(data)
+    f.close()
+
+    with self.cleaning(dst_path):
+      self.fs.copyFromLocal(src_path, dst_path)
+      actual = self.fs.read(dst_path, 0, len(data) + 100)
+      eq_(data, actual)
+
+  def test_rename_star(self):
+    src_dir = self.get_test_path('test_rename_star_src')
+    dst_dir = self.get_test_path('test_rename_star_dst')
+
+    with self.cleaning(src_dir, dst_dir):
+      self.fs.mkdir(src_dir)
+      self.fs.create(join(src_dir, 'file_one.txt'), data='foo')
+      self.fs.create(join(src_dir, 'file_two.txt'), data='bar')
+
+      src_ls = self.fs.listdir(src_dir)
+      eq_(2, len(src_ls))
+      assert_true('file_one.txt' in src_ls)
+      assert_true('file_two.txt' in src_ls)
+
+      src_stat = self.fs.listdir_stats(src_dir)
+
+      self.fs.mkdir(dst_dir)
+      self.fs.rename_star(src_dir, dst_dir)
+
+      dst_stat = self.fs.listdir_stats(dst_dir)
+
+      src_names = set([stat.name for stat in src_stat])
+      dst_names = set([stat.name for stat in dst_stat])
+      assert_true(src_names)
+      eq_(src_names, dst_names)
+
+  def test_rmtree(self):
+    assert_raises(NotImplementedError, self.fs.rmtree, 'universe', skipTrash=False)
+
+    directory = self.get_test_path('test_rmtree')
+    with self.cleaning(directory):
+      self.fs.mkdir(directory)
+      nested_dir = join(directory, 'nested_dir')
+      self.fs.mkdir(nested_dir)
+      file_path = join(nested_dir, 'file')
+      key = self.get_key(file_path)
+      key.set_contents_from_string('Some content')
+
+      self.fs.rmtree(directory, skipTrash=True)
+
+      assert_false(self.fs.exists(file_path))
+      assert_false(self.fs.exists(nested_dir))
+      assert_false(self.fs.exists(directory))
+
+  def test_listing_buckets(self):
+    buckets = self.fs.listdir('s3://')
+    assert_true(len(buckets) > 0)
+
+
+
+
+
+

+ 104 - 0
desktop/libs/aws/src/aws/s3/s3stat.py

@@ -0,0 +1,104 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import stat
+import posixpath
+
+from aws.s3 import s3datetime_to_timestamp
+
+
+class S3Stat(object):
+  DIR_MODE = 0777 | stat.S_IFDIR
+  FILE_MODE = 0666 | stat.S_IFREG
+
+  def __init__(self, name, path, isDir, size, mtime):
+    self.name = name
+    self.path = path
+    self.isDir = isDir
+    self.size = size
+    self.mtime = mtime
+
+  def __getitem__(self, key):
+    try:
+      return getattr(self, key)
+    except AttributeError:
+      raise KeyError(key)
+
+  def __setitem__(self, key, value):
+    # What about derivable values?
+    setattr(self, key, value)
+
+  @property
+  def type(self):
+    return 'DIRECTORY' if self.isDir else 'FILE'
+
+  @property
+  def mode(self):
+    return S3Stat.DIR_MODE if self.isDir else S3Stat.FILE_MODE
+
+  @property
+  def user(self):
+    return ''
+
+  @property
+  def group(self):
+    return ''
+
+  @property
+  def atime(self):
+    return self.mtime
+
+  @property
+  def aclBit(self):
+    return False
+
+  @classmethod
+  def from_bucket(cls, bucket):
+    return cls(bucket.name, 's3://%s' % bucket.name, True, 0, 0)
+
+  @classmethod
+  def from_key(cls, key, is_dir=False):
+    if key.name:
+      name = posixpath.basename(key.name[:-1] if key.name[-1] == '/' else key.name)
+      path = 's3://%s/%s' % (key.bucket.name, key.name)
+    else:
+      name = ''
+      path = 's3://%s' % key.bucket.name
+
+    size = key.size or 0
+    mtime = s3datetime_to_timestamp(key.last_modified) if key.last_modified else 0
+    return cls(name, path, is_dir, size, mtime)
+
+  @classmethod
+  def for_s3_root(cls):
+    return cls('S3', 's3://', True, 0, 0)
+
+  def to_json_dict(self):
+    """
+    Returns a dictionary for easy serialization
+    """
+    keys = ('path', 'size', 'atime', 'mtime', 'mode', 'user', 'group', 'aclBit')
+    res = {}
+    for k in keys:
+      res[k] = self[k]
+    return res
+
+
+
+
+
+

+ 84 - 0
desktop/libs/aws/src/aws/s3/s3stat_test.py

@@ -0,0 +1,84 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import stat
+
+from nose.tools import eq_
+
+from aws.s3.s3stat import S3Stat
+
+
+def test_derivable_properties():
+  s = S3Stat('foo', 's3://bar/foo', False, 40, 1424983327)
+  eq_('FILE', s.type)
+  eq_(0666 | stat.S_IFREG, s.mode)
+  eq_('', s.user)
+  eq_('', s.group)
+  eq_(1424983327, s.atime)
+  eq_(False, s.aclBit)
+
+  s = S3Stat('bar', 's3://bar', True, 0, 1424983327)
+  eq_('DIRECTORY', s.type)
+  eq_(0777 | stat.S_IFDIR, s.mode)
+
+
+def test_from_bucket():
+  s = S3Stat.from_bucket(FakeBucket('boo'))
+  eq_('DIRECTORY', s.type)
+  eq_('boo', s.name)
+  eq_('s3://boo', s.path)
+  eq_(0, s.size)
+  eq_(0, s.atime)
+
+
+def test_from_key():
+  key = FakeKey('foo', FakeBucket('bar'), 42, 'Thu, 26 Feb 2015 20:42:07 GMT')
+  s = S3Stat.from_key(key)
+  eq_('FILE', s.type)
+  eq_('foo', s.name)
+  eq_('s3://bar/foo', s.path)
+  eq_(42, s.size)
+  eq_(1424983327, s.mtime)
+
+  key.size = None
+  key.last_modified = None
+  s = S3Stat.from_key(key, is_dir=True)
+  eq_('DIRECTORY', s.type)
+  eq_(0, s.size)
+  eq_(0, s.atime)
+
+
+def test_for_s3_root():
+  s = S3Stat.for_s3_root()
+  eq_('DIRECTORY', s.type)
+  eq_('S3', s.name)
+  eq_('s3://', s.path)
+  eq_(0, s.size)
+  eq_(0, s.atime)
+
+
+class FakeBucket(object):
+  def __init__(self, name):
+    self.name = name
+
+
+class FakeKey(object):
+  def __init__(self, name, bucket, size=None, last_modified=None):
+    self.name = name
+    self.bucket = bucket
+    self.size = size
+    self.last_modified = last_modified

+ 97 - 0
desktop/libs/aws/src/aws/s3/s3test_utils.py

@@ -0,0 +1,97 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import logging
+import os
+import random
+import string
+import unittest
+
+import aws
+
+from contextlib import contextmanager
+
+from aws.s3 import parse_uri, join
+
+
+def get_test_bucket():
+  return os.environ.get('TEST_S3_BUCKET', '')
+
+
+def generate_id(size=6, chars=string.ascii_uppercase + string.digits):
+  return ''.join(random.choice(chars) for x in range(size))
+
+
+class S3TestBase(unittest.TestCase):
+  @classmethod
+  def setUpClass(cls):
+    cls.bucket_name = get_test_bucket()
+
+    cls._should_skip = False
+    if not cls.bucket_name:
+      cls._should_skip = True
+      cls._skip_msg = 'TEST_S3_BUCKET environment variable isn\'t set'
+      return
+
+    cls.path_prefix = 'test-hue/%s' % generate_id(size=16)
+    cls.s3_connection = aws.get_client('default').get_s3_connection()
+    cls.bucket = cls.s3_connection.get_bucket(cls.bucket_name, validate=True)
+
+  @classmethod
+  def shouldSkip(cls):
+    return cls._should_skip
+
+  def setUp(self):
+    if self.shouldSkip():
+      raise self.skipTest(self._skip_msg)
+
+  @classmethod
+  def tearDownClass(cls):
+    if not cls.shouldSkip():
+      cls.clean_up(cls.get_test_path())
+
+  @classmethod
+  def get_test_path(cls, path=None):
+    base_path = join('s3://', cls.bucket_name, cls.path_prefix)
+    if path:
+      return join(base_path, path)
+    return base_path
+
+  @classmethod
+  def get_key(cls, path, validate=False):
+    bucket_name, key_name = parse_uri(path)[:2]
+    bucket = cls.s3_connection.get_bucket(bucket_name)
+    return bucket.get_key(key_name, validate=validate)
+
+  @classmethod
+  def clean_up(cls, *paths):
+    for path in paths:
+      key = cls.get_key(path, validate=False)
+      try:
+        listing = key.bucket.list(prefix=key.name)
+        key.bucket.delete_keys(listing)
+      except:
+        pass
+
+  @classmethod
+  @contextmanager
+  def cleaning(cls, *paths):
+    try:
+      yield paths
+    finally:
+      cls.clean_up(*paths)
+