Эх сурвалжийг харах

[webhdfs] Be able to browse filesystem via webhdfs

* Added rest client library, with a simple http client.
* Added a webhdfs implementation of the HadoopFileSystem,
  currently capable of browing the directory structure.
* Add webhdfs config to indented ini files
* Read file working
* Implement file creation and append
* Add the write interface to the legacy File object
* Implement file/directory removal
* Add chown, chmod, mkdir, rename, get_home_dir, get_content_summary
* Modify the config to allow specifying a service URL
* Update to REST client
* Automatic json decoding based on response Content-Type.
* Add pseudo_hdfs4 module to allow testing against CDH4 HDFS
bc Wong 14 жил өмнө
parent
commit
4770a7c0b3

+ 3 - 3
apps/filebrowser/src/filebrowser/views.py

@@ -302,7 +302,6 @@ def listdir(request, path, chooser):
 
     home_dir_path = request.user.get_home_directory()
 
-
     breadcrumbs = parse_breadcrumbs(path)
     
     data = {
@@ -323,10 +322,11 @@ def listdir(request, path, chooser):
 
     # Include parent dir, unless at filesystem root.
     if normpath(path) != posixpath.sep:
-        parent_stat = request.fs.stats(posixpath.join(path, ".."))
+        parent_path = request.fs.join(path, "..")
+        parent_stat = request.fs.stats(parent_path)
         # The 'path' field would be absolute, but we want its basename to be
         # actually '..' for display purposes. Encode it since _massage_stats expects byte strings.
-        parent_stat['path'] = posixpath.join(path, "..")
+        parent_stat['path'] = parent_path
         stats.insert(0, parent_stat)
 
     data['files'] = [_massage_stats(request, stat) for stat in stats]

+ 107 - 102
desktop/conf.dist/hue.ini

@@ -16,101 +16,101 @@
 
 [desktop]
 
-# Set this to a random string, the longer the better.
-# This is used for secure hashing in the session store.
-secret_key=
+  # Set this to a random string, the longer the better.
+  # This is used for secure hashing in the session store.
+  secret_key=
 
-# Webserver listens on this address and port
-http_host=0.0.0.0
-http_port=8088
+  # Webserver listens on this address and port
+  http_host=0.0.0.0
+  http_port=8088
 
-# Time zone name
-time_zone=America/Los_Angeles
+  # Time zone name
+  time_zone=America/Los_Angeles
 
-# Turn off debug
-django_debug_mode=0
+  # Turn off debug
+  django_debug_mode=0
 
-# Turn off backtrace for server error
-http_500_debug_mode=0
+  # Turn off backtrace for server error
+  http_500_debug_mode=0
 
-# Set to true to use CherryPy as the webserver, set to false
-# to use Spawning as the webserver. Defaults to Spawning if
-# key is not specified.
-## use_cherrypy_server = false
+  # Set to true to use CherryPy as the webserver, set to false
+  # to use Spawning as the webserver. Defaults to Spawning if
+  # key is not specified.
+  ## use_cherrypy_server = false
 
-# Webserver runs as this user
-## server_user=hue
-## server_group=hue
+  # Webserver runs as this user
+  ## server_user=hue
+  ## server_group=hue
 
-# If set to false, runcpserver will not actually start the web server.
-# Used if Apache is being used as a WSGI container.
-## enable_server=yes
+  # If set to false, runcpserver will not actually start the web server.
+  # Used if Apache is being used as a WSGI container.
+  ## enable_server=yes
 
-# Number of threads used by the CherryPy web server
-## cherrypy_server_threads=10
+  # Number of threads used by the CherryPy web server
+  ## cherrypy_server_threads=10
 
-# Filename of SSL Certificate
-## ssl_certificate=
+  # Filename of SSL Certificate
+  ## ssl_certificate=
 
-# Filename of SSL RSA Private Key
-## ssl_private_key=
+  # Filename of SSL RSA Private Key
+  ## ssl_private_key=
 
-# Default encoding for site data
-## default_site_encoding=utf-8
+  # Default encoding for site data
+  ## default_site_encoding=utf-8
 
-# Configuration options for user authentication into the web application
-# ------------------------------------------------------------------------
-[[auth]]
+  # Configuration options for user authentication into the web application
+  # ------------------------------------------------------------------------
+  [[auth]]
 
-# Authentication backend. Common settings are:
-# - django.contrib.auth.backends.ModelBackend (entirely Django backend)
-# - desktop.auth.backend.AllowAllBackend (allows everyone)
-# - desktop.auth.backend.AllowFirstUserDjangoBackend
-#     (Default. Relies on Django and user manager, after the first login)
+    # Authentication backend. Common settings are:
+    # - django.contrib.auth.backends.ModelBackend (entirely Django backend)
+    # - desktop.auth.backend.AllowAllBackend (allows everyone)
+    # - desktop.auth.backend.AllowFirstUserDjangoBackend
+    #     (Default. Relies on Django and user manager, after the first login)
 
 
-# Configuration options for specifying the Desktop Database.  For more info,
-# see http://docs.djangoproject.com/en/1.1/ref/settings/#database-engine
-# ------------------------------------------------------------------------
-[[database]]
-# Database engine is typically one of:
-# postgresql, mysql, sqlite3, or oracle
-#
-# Note that for sqlite3, 'name', below is a filename;
-# for other backends, it is the database name.
-## engine=sqlite3
-## host=
-## port=
-## user=
-## password=
-## name=
+  # Configuration options for specifying the Desktop Database.  For more info,
+  # see http://docs.djangoproject.com/en/1.1/ref/settings/#database-engine
+  # ------------------------------------------------------------------------
+  [[database]]
+    # Database engine is typically one of:
+    # postgresql, mysql, sqlite3, or oracle
+    #
+    # Note that for sqlite3, 'name', below is a filename;
+    # for other backends, it is the database name.
+    ## engine=sqlite3
+    ## host=
+    ## port=
+    ## user=
+    ## password=
+    ## name=
 
 
-# Configuration options for connecting to an external SMTP server
-# ------------------------------------------------------------------------
-[[smtp]]
+  # Configuration options for connecting to an external SMTP server
+  # ------------------------------------------------------------------------
+  [[smtp]]
 
-# The SMTP server information for email notification delivery
-host=localhost
-port=25
-user=
-password=
+    # The SMTP server information for email notification delivery
+    host=localhost
+    port=25
+    user=
+    password=
 
-# Whether to use a TLS (secure) connection when talking to the SMTP server
-tls=no
+    # Whether to use a TLS (secure) connection when talking to the SMTP server
+    tls=no
 
-# Default email address to use for various automated notification from Hue  
-## default_from_email=hue@localhost
+    # Default email address to use for various automated notification from Hue  
+    ## default_from_email=hue@localhost
 
 
-# Configuration options for Kerberos integration for secured Hadoop clusters
-# ------------------------------------------------------------------------
-[[kerberos]]
+  # Configuration options for Kerberos integration for secured Hadoop clusters
+  # ------------------------------------------------------------------------
+  [[kerberos]]
 
-# Path to Hue's Kerberos keytab file
-## hue_keytab=
-# Kerberos principal name for Hue
-## hue_principal=hue/hostname.foo.com
+    # Path to Hue's Kerberos keytab file
+    ## hue_keytab=
+    # Kerberos principal name for Hue
+    ## hue_principal=hue/hostname.foo.com
 
 
 ###########################################################################
@@ -119,33 +119,38 @@ tls=no
 
 [hadoop]
 
-# If you installed Hadoop in a different location, you need to set hadoop_home,
-# in which bin/hadoop, the Hadoop wrapper script, is found.
-#
-# NOTE: Hue depends on Cloudera's Distribution of Hadoop version 2 (CDH2)
-# or later.
-hadoop_home=/usr/lib/hadoop-0.20
-# hadoop_conf_dir=/etc/hadoop/conf
-
-
-# Configuration for HDFS NameNode
-# ------------------------------------------------------------------------
-[[hdfs_clusters]]
-
-[[[default]]]
-# Enter the host and port on which you are running the Hadoop NameNode
-namenode_host=localhost
-hdfs_port=8020
-http_port=50070
-# Thrift plugin port for the name node
-## thrift_port=10090
-
-# Configuration for MapReduce JobTracker
-# ------------------------------------------------------------------------
-[[mapred_clusters]]
-
-[[[default]]]
-# Enter the host on which you are running the Hadoop JobTracker
-jobtracker_host=localhost
-# Thrift plug-in port for the JobTracker
-## thrift_port=9290
+  # If you installed Hadoop in a different location, you need to set
+  # hadoop_home, in which bin/hadoop, the Hadoop wrapper script, is found.
+  #
+  # NOTE: Hue depends on Cloudera's Distribution of Hadoop version 3 (CDH3)
+  # or later.
+  hadoop_home=/usr/lib/hadoop-0.20
+  # hadoop_conf_dir=/etc/hadoop/conf
+
+
+  # Configuration for HDFS NameNode
+  # ------------------------------------------------------------------------
+  [[hdfs_clusters]]
+
+    [[[default]]]
+      # Enter the host and port on which you are running the Hadoop NameNode
+      namenode_host=localhost
+      hdfs_port=8020
+      http_port=50070
+      # Thrift plugin port for the name node
+      ## thrift_port=10090
+
+      # Use WebHdfs/HttpFs as the communication mechanism. To fallback to
+      # using the Thrift plugin (used in Hue 1.x), this must be uncommented
+      # and explicitly set to the empty value.
+      ## webhdfs_url=
+
+  # Configuration for MapReduce JobTracker
+  # ------------------------------------------------------------------------
+  [[mapred_clusters]]
+
+    [[[default]]]
+      # Enter the host on which you are running the Hadoop JobTracker
+      jobtracker_host=localhost
+      # Thrift plug-in port for the JobTracker
+      ## thrift_port=9290

+ 23 - 41
desktop/conf/pseudo-distributed.ini.tmpl

@@ -1,51 +1,33 @@
 [desktop]
-send_dbug_messages=1
-database_logging=1
+  send_dbug_messages=1
+  database_logging=0
 
-[[auth]]
-# Uncomment the next line in order to use PAM for authentication.
-# This will check usernames and passwords against your linux system.
-#
-# WARNING: this may be a security concern if Hue does not
-# run beneath SSL.
-# backend=desktop.auth.backend.PamBackend
-
-[[local_filesystems]]
-# Work in progress: specify local paths that should also be browsable
-[[[tmpfs]]]
-path=/tmp
+  [[auth]]
+    # Uncomment the next line in order to use PAM for authentication.
+    # This will check usernames and passwords against your linux system.
+    #
+    # WARNING: this may be a security concern if Hue does not
+    # run beneath SSL.
+    # backend=desktop.auth.backend.PamBackend
 
 
 [hadoop]
-# Point this variable at the path to your Hadoop wrapper script
-hadoop_home=$HADOOP_HOME
-
-[[hdfs_clusters]]
-[[[default]]]
-namenode_host=localhost
-
-[[mapred_clusters]]
-[[[default]]]
-jobtracker_host=localhost
+  # Point this variable at the path to your Hadoop wrapper script
+  hadoop_home=$HADOOP_HOME
 
-[health]
-[[ganglia_clusters]]
-[[[default]]]
-# If you are not running Ganglia, you can delete this whole
-# section or just leave it be.
-gmetad_host=localhost
-gmetad_port=8649
-ganglia_url=http://%(gmetad_host)s:80/ganglia/
+  [[hdfs_clusters]]
+    [[[default]]]
+      security_enabled=false
+      namenode_host=localhost
+      hdfs_port=8020
+      http_port=50070
 
+  [[mapred_clusters]]
+    [[[default]]]
+      jobtracker_host=localhost
 
-[[lint_checks]]
-# Users can define new thresholds for lint checks. This is an advanced feature
-# but useful for testing.
-#[[[dfs_free_space]]]
-#critical_level=100000000000
-#warn_level    =1000000000000
 
 [beeswax]
-# Point this variable to your Hive installation config dir (if applicable)
-# hive_conf_dir=
-beeswax_server_heapsize=128
+  # Point this variable to your Hive installation config dir (if applicable)
+  # hive_conf_dir=
+  beeswax_server_heapsize=128

+ 1 - 1
desktop/core/src/desktop/lib/fsmanager.py

@@ -56,7 +56,7 @@ def get_default_hdfs():
   _init_filesystems()
   for name, fs in _filesystems.iteritems():
     # Return the first HDFS encountered
-    if fs.uri.startswith('hdfs'):
+    if fs.uri.startswith('hdfs') or fs.uri.startswith('http'):
       return name, fs
   return None, None
 

+ 56 - 30
desktop/core/src/desktop/lib/rest/http_client.py

@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 # Licensed to Cloudera, Inc. under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -17,9 +16,12 @@
 
 import cookielib
 import logging
+import posixpath
 import urllib
 import urllib2
 
+__docformat__ = "epytext"
+
 LOG = logging.getLogger(__name__)
 
 class RestException(Exception):
@@ -31,20 +33,36 @@ class RestException(Exception):
     self._error = error
     self._code = None
     self._message = str(error)
-    if isinstance(error, urllib2.HTTPError):
+    # See if there is a code or a message. (For urllib2.HTTPError.)
+    try:
       self._code = error.getcode()
       self._message = error.read()
+    except AttributeError:
+      pass
 
   def __str__(self):
-    res = self._message
+    res = self._message or ""
     if self._code is not None:
       res += " (error %s)" % (self._code,)
     return res
 
+  def get_parent_ex(self):
+    if isinstance(self._error, Exception):
+      return self._error
+    return None
+
+  @property
+  def code(self):
+    return self._code
+
+  @property
+  def message(self):
+    return self._message
+
 
 class HttpClient(object):
   """
-  Basic HTTP client tailed for rest APIs.
+  Basic HTTP client tailored for rest APIs.
   """
   def __init__(self, base_url, exc_class=None, logger=None):
     """
@@ -56,6 +74,7 @@ class HttpClient(object):
     self._base_url = base_url.rstrip('/')
     self._exc_class = exc_class or RestException
     self._logger = logger or LOG
+    self._headers = { }
 
     # Make a basic auth handler that does nothing. Set credentials later.
     self._passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
@@ -74,8 +93,19 @@ class HttpClient(object):
     @param username: Login name.
     @param password: Login password.
     @param realm: The authentication realm.
+    @return: The current object
     """
     self._passmgr.add_password(realm, self._base_url, username, password)
+    return self
+
+  def set_headers(self, headers):
+    """
+    Add headers to the request
+    @param headers: A dictionary with the key value pairs for the headers
+    @return: The current object
+    """
+    self._headers = headers
+    return self
 
 
   @property
@@ -86,47 +116,43 @@ class HttpClient(object):
   def logger(self):
     return self._logger
 
-  def execute(self, http_method, path, **params):
+  def execute(self, http_method, path, params=None, data=None):
     """
     Submit an HTTP request.
     @param http_method: GET, POST, PUT, DELETE
     @param path: The path of the resource.
-    @param params: Key-value data.
+    @param params: Key-value parameter data.
+    @param data: The data to attach to the body of the request.
 
-    @rtype: json
-    @return: The JSON result of the API call.
+    @return: The result of urllib2.urlopen()
     """
     # Prepare URL and params
-    param_str = urllib.urlencode(params)
+    url = self._make_url(path, params)
     if http_method in ("GET", "DELETE"):
-      url = "%s/%s?%s" % (self._base_url, path, param_str)
-      data = None
-    elif http_method == "POST":
-      url = "%s/%s" % (self._base_url, path)
-      data = param_str
-    elif http_method == "PUT":
-      url = "%s/%s" % (self._base_url, path)
-      data = param_str
-    else:
-      raise NotImplementedError("Method type %s not supported" % (http_method,))
+      if data is not None:
+        self.logger.warn(
+            "GET method does not pass any data. Path '%s'" % (path,))
+        data = None
 
     # Setup the request
-    request = urllib2.Request(url)
+    request = urllib2.Request(url, data)
+    # Hack/workaround because urllib2 only does GET and POST
     request.get_method = lambda: http_method
-    request.get_data = lambda: data
+    for k, v in self._headers.items():
+      request.add_header(k, v)
 
     # Call it
     self.logger.debug("%s %s" % (http_method, url))
     try:
-      call = self._opener.open(request)
+      return self._opener.open(request)
     except urllib2.HTTPError, ex:
       raise self._exc_class(ex)
 
-    try:
-      resp = call.read()
-      self.logger.debug("%s Got response: %s%s" %
-                        (http_method, resp[:32], len(resp) > 32 and "..." or ""))
-    except Exception, ex:
-      raise Exception("Command '%s %s' failed: %s" %
-                      (http_method, path, ex))
-    return resp
+  def _make_url(self, path, params):
+    res = self._base_url
+    if path:
+      res += posixpath.normpath('/' + path)
+    if params:
+      param_str = urllib.urlencode(params)
+      res += '?' + param_str
+    return res

+ 42 - 24
desktop/core/src/desktop/lib/rest/resource.py

@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 # Licensed to Cloudera, Inc. under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -20,6 +19,7 @@ try:
 except ImportError:
   import simplejson as json
 import logging
+import posixpath
 
 LOG = logging.getLogger(__name__)
 
@@ -36,31 +36,47 @@ class Resource(object):
     self._client = client
     self._path = relpath.strip('/')
 
+  @property
+  def base_url(self):
+    return self._client.base_url
+
   def _join_uri(self, relpath):
     if relpath is None:
       return self._path
-    return (self._path + '/' + relpath).strip('/')
+    return self._path + posixpath.normpath('/' + relpath)
 
-  def _invoke(self, method, relpath=None, json_decode=True, **params):
+  def invoke(self, method, relpath=None, params=None, data=None):
     """
     Invoke an API method.
-    @return: JSON dictionary.
+    @return: Raw body or JSON dictionary (if response content type is JSON).
     """
     path = self._join_uri(relpath)
-    res = self._client.execute(method, path, **params)
-    if not json_decode:
-      return res
+    resp = self._client.execute(method, path, params=params, data=data)
 
     try:
-      # Return JSON
-      json_dict = json.loads(res)
-      return json_dict
+      body = resp.read()
     except Exception, ex:
-      self._client.logger.exception('Server response: %s' % (res,))
-      raise ex
-
-
-  def get(self, relpath=None, **params):
+      raise Exception("Command '%s %s' failed: %s" %
+                      (method, path, ex))
+
+    self._client.logger.debug(
+        "%s Got response: %s%s" %
+        (method, body[:32], len(body) > 32 and "..." or ""))
+
+    # Is the response application/json?
+    if resp.info().getmaintype() == "application" and \
+         resp.info().getsubtype() == "json":
+      try:
+        json_dict = json.loads(body)
+        return json_dict
+      except Exception, ex:
+        self._client.logger.exception('JSON decode error: %s' % (body,))
+        raise ex
+    else:
+      return body
+
+
+  def get(self, relpath=None, params=None):
     """
     Invoke the GET method on a resource.
     @param relpath: Optional. A relative path to this resource's path.
@@ -68,37 +84,39 @@ class Resource(object):
 
     @return: A dictionary of the JSON result.
     """
-    return self._invoke("GET", relpath, **params)
+    return self.invoke("GET", relpath, params)
 
 
-  def get_raw(self, relpath=None, **params):
+  def delete(self, relpath=None, params=None):
     """
-    Invoke the GET method on a resource.
+    Invoke the DELETE method on a resource.
     @param relpath: Optional. A relative path to this resource's path.
     @param params: Key-value data.
 
-    @return: Raw response body.
+    @return: A dictionary of the JSON result.
     """
-    return self._invoke("GET", relpath, json_decode=False, **params)
+    return self.invoke("DELETE", relpath, params)
 
 
-  def post(self, relpath=None, **params):
+  def post(self, relpath=None, params=None, data=None):
     """
     Invoke the POST method on a resource.
     @param relpath: Optional. A relative path to this resource's path.
     @param params: Key-value data.
+    @param data: Optional. Body of the request.
 
     @return: A dictionary of the JSON result.
     """
-    return self._invoke("POST", relpath, **params)
+    return self.invoke("POST", relpath, params, data)
 
 
-  def put(self, relpath=None, **params):
+  def put(self, relpath=None, params=None, data=None):
     """
     Invoke the PUT method on a resource.
     @param relpath: Optional. A relative path to this resource's path.
     @param params: Key-value data.
+    @param data: Optional. Body of the request.
 
     @return: A dictionary of the JSON result.
     """
-    return self._invoke("PUT", relpath, **params)
+    return self.invoke("PUT", relpath, params, data)

+ 8 - 4
desktop/libs/hadoop/src/hadoop/cluster.py

@@ -15,7 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from hadoop.fs import hadoopfs, LocalSubFileSystem
+from hadoop.fs import hadoopfs, webhdfs, LocalSubFileSystem
 from hadoop.job_tracker import LiveJobTracker
 
 from desktop.lib.paths import get_build_dir
@@ -34,9 +34,13 @@ def _make_filesystem(identifier):
     return LocalSubFileSystem(path)
   else:
     cluster_conf = conf.HDFS_CLUSTERS[identifier]
-    return hadoopfs.HadoopFileSystem.from_config(
-      cluster_conf,
-      hadoop_bin_path=conf.HADOOP_BIN.get())
+    # The only way to disable webhdfs is to specify an empty value
+    if cluster_conf.WEBHDFS_URL.get() != '':
+      return webhdfs.WebHdfs.from_config(cluster_conf)
+    else:
+      return hadoopfs.HadoopFileSystem.from_config(
+        cluster_conf,
+        hadoop_bin_path=conf.HADOOP_BIN.get())
 
 def _make_mrcluster(identifier):
   cluster_conf = conf.MR_CLUSTERS[identifier]

+ 6 - 0
desktop/libs/hadoop/src/hadoop/conf.py

@@ -131,6 +131,12 @@ HDFS_CLUSTERS = UnspecifiedConfigSection(
                             type=int),
       NN_HTTP_PORT=Config("http_port", help="Hadoop HTTP port for the name node", default=50070,
                             type=int),
+      WEBHDFS_URL=Config("webhdfs_url",
+                         help="The URL to WebHDFS/HttpFs service. Defaults to " +
+                         "the WebHDFS URL on the NameNode. To use the legacy" +
+                         "Thrift plugin communication mechanism, this must be" +
+                         "set to an empty value.",
+                         type=str, default=None),
       NN_KERBEROS_PRINCIPAL=Config("nn_kerberos_principal", help="Kerberos principal for NameNode",
                                    default="hdfs", type=str),
       DN_KERBEROS_PRINCIPAL=Config("dn_kerberos_principal", help="Kerberos principal for DataNode",

+ 15 - 4
desktop/libs/hadoop/src/hadoop/fs/__init__.py

@@ -31,15 +31,26 @@ native python interfaces.
 """
 
 import __builtin__
+import errno
 import grp
+import logging
 import os
-import errno
+import posixpath
 import pwd
+import re
 import shutil
 import stat
-import logging
-import posixpath
-import re
+import sys
+
+# SEEK_SET and family is found in posixfile or os, depending on the python version
+if sys.version_info[:2] < (2, 5):
+  import posixfile
+  _tmp_mod = posixfile
+else:
+  _tmp_mod = os
+SEEK_SET, SEEK_CUR, SEEK_END = _tmp_mod.SEEK_SET, _tmp_mod.SEEK_CUR, _tmp_mod.SEEK_END
+del _tmp_mod
+
 
 # The web (and POSIX) always uses forward slash as a separator
 LEADING_DOUBLE_SEPARATORS = re.compile("^" + posixpath.sep*2)

+ 20 - 0
desktop/libs/hadoop/src/hadoop/fs/exceptions.py

@@ -15,7 +15,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+try:
+  import json
+except:
+  import simplejson as json
+
 from desktop.lib.django_util import StructuredException
+from desktop.lib.rest.http_client import RestException
+
 
 class PermissionDeniedException(StructuredException):
   def __init__(self, msg, orig_exc=None):
@@ -23,3 +30,16 @@ class PermissionDeniedException(StructuredException):
     StructuredException.__init__(self,
       "PERMISSION_DENIED",
       msg)
+
+
+class WebHdfsException(RestException):
+  def __init__(self, error):
+    RestException.__init__(self, error)
+
+    try:
+      json_body = json.loads(self._message)['RemoteException']
+      self.server_exc = json_body['exception']
+      self._message = "%s: %s" % (self.server_exc, json_body['message'])
+    except:
+      # Don't mask the original exception
+      self.server_exc = None

+ 36 - 31
desktop/libs/hadoop/src/hadoop/fs/hadoopfs.py

@@ -25,15 +25,10 @@ import posixpath
 import random
 import stat as statconsts
 import subprocess
-import sys
-import time
 import urlparse
-import tempfile
 import threading
 
 from thrift.transport import TTransport
-from thrift.transport import TSocket
-from thrift.protocol import TBinaryProtocol
 
 from django.utils.encoding import smart_str, force_unicode
 from desktop.lib import thrift_util, i18n
@@ -42,17 +37,9 @@ from hadoop.api.hdfs import Namenode, Datanode
 from hadoop.api.hdfs.constants import QUOTA_DONT_SET, QUOTA_RESET
 from hadoop.api.common.ttypes import RequestContext, IOException
 import hadoop.conf
-from hadoop.fs import normpath
+from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
 from hadoop.fs.exceptions import PermissionDeniedException
 
-# SEEK_SET and family is found in posixfile or os, depending on the python version
-if sys.version_info[:2] < (2, 5):
-  import posixfile
-  _tmp_mod = posixfile
-else:
-  _tmp_mod = os
-SEEK_SET, SEEK_CUR, SEEK_END = _tmp_mod.SEEK_SET, _tmp_mod.SEEK_CUR, _tmp_mod.SEEK_END
-del _tmp_mod
 
 LOG = logging.getLogger(__name__)
 
@@ -172,7 +159,41 @@ def _coerce_exceptions(function):
   return wrapper
 
 
-class HadoopFileSystem(object):
+class Hdfs(object):
+  """
+  An abstract HDFS proxy
+  """
+
+  @staticmethod
+  def basename(path):
+    return posixpath.basename(path)
+
+  @staticmethod
+  def dirname(path):
+    return posixpath.dirname(path)
+
+  @staticmethod
+  def split(path):
+    return posixpath.split(path)
+
+  @staticmethod
+  def join(first, *comp_list):
+    return posixpath.join(first, *comp_list)
+
+  @staticmethod
+  def abspath(path):
+    return posixpath.abspath(path)
+
+  @staticmethod
+  def normpath(path):
+    res = posixpath.normpath(path)
+    # Python normpath() doesn't eliminate leading double slashes
+    if res.startswith('//'):
+      return res[1:]
+    return res
+
+
+class HadoopFileSystem(Hdfs):
   """
   Implementation of Filesystem APIs through Thrift to a Hadoop cluster.
   """
@@ -635,22 +656,6 @@ class HadoopFileSystem(object):
       'atime': stat.atime
       }
 
-  @staticmethod
-  def basename(path):
-    return posixpath.basename(path)
-
-  @staticmethod
-  def dirname(path):
-    return posixpath.dirname(path)
-
-  @staticmethod
-  def split(path):
-    return posixpath.split(path)
-
-  @staticmethod
-  def join(first, *comp_list):
-    return posixpath.join(first, *comp_list)
-
   @staticmethod
   def urlsplit(url):
     """

+ 466 - 0
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py

@@ -0,0 +1,466 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Interfaces for Hadoop filesystem access via HttpFs/WebHDFS
+"""
+
+import errno
+import logging
+import random
+import threading
+
+from django.utils.encoding import smart_str
+from desktop.lib.rest import http_client, resource
+from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
+from hadoop.fs.hadoopfs import encode_fs_path, Hdfs
+from hadoop.fs.exceptions import WebHdfsException
+from hadoop.fs.webhdfs_types import WebHdfsStat, WebHdfsContentSummary
+
+
+DEFAULT_USER = 'hue_webui'
+
+# The number of bytes to read if not specified
+DEFAULT_READ_SIZE = 1024*1024 # 1MB
+
+LOG = logging.getLogger(__name__)
+
+class WebHdfs(Hdfs):
+  """
+  WebHdfs implements the filesystem interface via the WebHDFS rest protocol.
+  """
+  def __init__(self, url,
+               hdfs_superuser="hdfs",
+               security_enabled=False,
+               temp_dir="/tmp"):
+    self._url = url
+    self._superuser = hdfs_superuser
+    self._security_enabled = security_enabled
+    self._temp_dir = temp_dir
+
+    self._client = self._make_client(url)
+    self._root = resource.Resource(self._client)
+
+    # To store user info
+    self._thread_local = threading.local()
+    self.setuser(DEFAULT_USER)
+
+    LOG.debug("Initializing Hadoop WebHdfs: %s (security: %s, superuser: %s)" %
+              (self._url, self._security_enabled, self._superuser))
+
+  @classmethod
+  def from_config(cls, hdfs_config):
+    return cls(url=_get_service_url(hdfs_config),
+               security_enabled=hdfs_config.SECURITY_ENABLED.get(),
+               temp_dir=hdfs_config.TEMP_DIR.get())
+
+  def __str__(self):
+    return "WebHdfs at %s" % (self._url,)
+
+  def _make_client(self, url):
+    return http_client.HttpClient(
+        url, exc_class=WebHdfsException, logger=LOG)
+
+  @property
+  def uri(self):
+    return self._url
+
+  @property
+  def superuser(self):
+    return self._superuser
+  
+  @property
+  def user(self):
+    return self.thread_local
+
+  def _getparams(self):
+    return { "user.name" : self._thread_local.user }
+
+  def setuser(self, user):
+    self._thread_local.user = user
+
+
+  def listdir_stats(self, path, glob=None):
+    """
+    listdir_stats(path, glob=None) -> [ WebHdfsStat ]
+
+    Get directory listing with stats.
+    """
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    if glob is not None:
+      params['filter'] = glob
+    params['op'] = 'LISTSTATUS'
+    json = self._root.get(path, params)
+    filestatus_list = json['FileStatuses']['FileStatus']
+    return [ WebHdfsStat(st, path) for st in filestatus_list ]
+
+  def listdir(self, path, glob=None):
+    """
+    listdir(path, glob=None) -> [ entry names ]
+
+    Get directory entry names without stats.
+    """
+    dirents = self.listdir_stats(self, path, glob)
+    return [ x.path for x in dirents ]
+
+  def get_content_summary(self, path):
+    """
+    get_content_summary(path) -> WebHdfsContentSummary
+    """
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    params['op'] = 'GETCONTENTSUMMARY'
+    json = self._root.get(path, params)
+    return WebHdfsContentSummary(json['ContentSummary'])
+
+
+  def _stats(self, path):
+    """This version of stats returns None if the entry is not found"""
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    params['op'] = 'GETFILESTATUS'
+    try:
+      json = self._root.get(path, params)
+      return WebHdfsStat(json['FileStatus'], path)
+    except WebHdfsException, ex:
+      if ex.server_exc == 'FileNotFoundException':
+        return None
+      raise ex
+
+  def stats(self, path):
+    """
+    stats(path) -> WebHdfsStat
+    """
+    res = self._stats(path)
+    if res is not None:
+      return res
+    raise IOError(errno.ENOENT, "File %s not found" % (smart_str(path),))
+
+  def exists(self, path):
+    return self._stats(path) is not None
+
+  def isdir(self, path):
+    sb = self._stats(path)
+    if sb is None:
+      return False
+    return sb.isDir
+
+  def isfile(self, path):
+    sb = self._stats(path)
+    if sb is None:
+      return False
+    return not sb.isDir
+
+  def _delete(self, path, recursive=False):
+    """
+    _delete(path, recursive=False)
+
+    Delete a file or directory.
+    """
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    params['op'] = 'DELETE'
+    params['recursive'] = recursive and 'true' or 'false'
+    result = self._root.delete(path, params)
+    # This part of the API is nonsense.
+    # The lack of exception should indicate success.
+    if not result['boolean']:
+      raise IOError('Delete failed: %s' % (smart_str(path),))
+
+  def remove(self, path):
+    """Delete a file."""
+    self._delete(path, recursive=False)
+
+  def rmdir(self, path):
+    """Delete a file."""
+    self._delete(path, recursive=False)
+
+  def rmtree(self, path):
+    """Delete a tree recursively."""
+    self._delete(path, recursive=True)
+
+  def mkdir(self, path, mode=None):
+    """
+    mkdir(path, mode=None)
+
+    Creates a directory and any parent directory if necessary.
+    """
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    params['op'] = 'MKDIRS'
+    if mode is not None:
+      params['permission'] = safe_octal(mode)
+    success = self._root.put(path, params)
+    if not success:
+      raise IOError("Mkdir failed: %s" % (smart_str(path),))
+
+  def mktemp(self, subdir='', prefix='tmp'):
+    """
+    mktemp(subdir, prefix) ->  <temp_dir>/subdir/prefix.<rand>
+    Return a unique temporary filename with prefix in the cluster's temp dir.
+    """
+    RANDOM_BITS = 64
+
+    base = self.join(self._temp_dir, subdir)
+    if not self.isdir(base):
+      self.mkdir(base)
+
+    while True:
+      name = "%s.%s" % (prefix, random.getrandbits(RANDOM_BITS))
+      candidate = self.join(base, name)
+      if not self.exists(candidate):
+        return candidate
+
+  def rename(self, old, new):
+    """rename(old, new)"""
+    old = encode_fs_path(Hdfs.normpath(old))
+    new = encode_fs_path(Hdfs.normpath(new))
+    params = self._getparams()
+    params['op'] = 'RENAME'
+    # Encode `new' because it's in the params
+    params['destination'] = smart_str(new)
+    result = self._root.put(old, params)
+    if not result['boolean']:
+      raise IOError("Rename failed: %s -> %s" %
+                    (smart_str(old), smart_str(new)))
+
+  def chown(self, path, user=None, group=None):
+    """chown(path, user=None, group=None)"""
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    params['op'] = 'SETOWNER'
+    if user is not None:
+      params['owner'] = user
+    if group is not None:
+      params['group'] = group
+    self._root.put(path, params)
+
+  def chmod(self, path, mode):
+    """chmod(path, mode)"""
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    params['op'] = 'SETPERMISSION'
+    params['permission'] = safe_octal(mode)
+    self._root.put(path, params)
+
+  def get_home_dir(self):
+    """get_home_dir() -> Home directory for the current user"""
+    params = self._getparams()
+    params['op'] = 'GETHOMEDIRECTORY'
+    res = self._root.get(params=params)
+    return res['Path']
+
+
+  def read(self, path, offset, length, bufsize=None):
+    """
+    read(path, offset, length[, bufsize]) -> data
+
+    Read data from a file.
+    """
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    params['op'] = 'OPEN'
+    params['offset'] = long(offset)
+    params['length'] = long(length)
+    if bufsize is not None:
+      params['bufsize'] = bufsize
+    return self._root.get(path, params)
+
+  def open(self, path, mode='r'):
+    """
+    DEPRECATED!
+    open(path, mode='r') -> File object
+
+    This exists for legacy support and backwards compatibility only.
+    Please use read().
+    """
+    return File(self, path, mode)
+
+
+  def create(self, path, overwrite=False, blocksize=None,
+             replication=None, permission=None, data=None):
+    """
+    create(path, overwrite=False, blocksize=None, replication=None, permission=None)
+
+    Creates a file with the specified parameters.
+    """
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    params['op'] = 'CREATE'
+    params['overwrite'] = overwrite and 'true' or 'false'
+    if blocksize is not None:
+      params['blocksize'] = long(blocksize)
+    if replication is not None:
+      params['replication'] = int(replication)
+    if permission is not None:
+      params['permission'] = safe_octal(permission)
+
+    self._invoke_with_redirect('PUT', path, params, data)
+
+
+  def append(self, path, data):
+    """
+    append(path, data)
+
+    Append data to a given file.
+    """
+    path = encode_fs_path(Hdfs.normpath(path))
+    params = self._getparams()
+    params['op'] = 'APPEND'
+    self._invoke_with_redirect('POST', path, params, data)
+
+
+  def _invoke_with_redirect(self, method, path, params=None, data=None):
+    """
+    Issue a request, and expect a redirect, and then submit the data to
+    the redirected location. This is used for create, write, etc.
+
+    Returns the response from the redirected request.
+    """
+    next_url = None
+    try:
+      # Do not pass data in the first leg.
+      self._root.invoke(method, path, params)
+    except WebHdfsException, ex:
+      # This is expected. We get a 307 redirect.
+      # The following call may throw.
+      next_url = self._get_redirect_url(ex)
+
+    if next_url is None:
+      raise WebHdfsException(
+        "Failed to create '%s'. HDFS did not return a redirect" % (path,))
+
+    # Now talk to the real thing. The redirect url already includes the params.
+    client = self._make_client(next_url)
+    return resource.Resource(client).invoke(method, data=data)
+
+
+  def _get_redirect_url(self, webhdfs_ex):
+    """Retrieve the redirect url from an exception object"""
+    try:
+      # The actual HttpError (307) is wrapped inside
+      http_error = webhdfs_ex.get_parent_ex()
+      if http_error is None:
+        raise webhdfs_ex
+
+      if http_error.code not in (301, 302, 303, 307):
+        LOG.error("Response is not a redirect: %s" % (webhdfs_ex,))
+        raise webhdfs_ex
+      return http_error.headers.getheader('location')
+    except Exception, ex:
+      LOG.error("Failed to read redirect from response: %s (%s)" %
+                (webhdfs_ex, ex))
+      raise webhdfs_ex
+
+  def get_delegation_token(self, renewer):
+    """get_delegation_token(user) -> Delegation token"""
+    params = self._getparams()
+    params['op'] = 'GETDELEGATIONTOKEN'
+    params['renewer'] = renewer
+    res = self._root.get(params=params)
+    return res['Token']['urlString']
+
+
+
+class File(object):
+  """
+  DEPRECATED!
+
+  Represent an open file on HDFS. This exists to mirror the old thriftfs
+  interface, for backwards compatibility only.
+  """
+  def __init__(self, fs, path, mode='r'):
+    self._fs = fs
+    self._path = normpath(path)
+    self._pos = 0
+    self._mode = mode
+
+    try:
+      self._stat = fs.stats(path)
+      if self._stat.isDir:
+        raise IOError(errno.EISDIR, "Is a directory: '%s'" % (smart_str(path),))
+    except IOError, ex:
+      if ex.errno == errno.ENOENT and mode == 'r':
+        raise ex
+      self._stat = None
+
+  def seek(self, offset, whence=0):
+    """Set the file pointer to the given spot. @see file.seek"""
+    if whence == SEEK_SET:
+      self._pos = offset
+    elif whence == SEEK_CUR:
+      self._pos += offset
+    elif whence == SEEK_END:
+      self.stat()
+      self._pos = self._fs.stats(self._path).size + offset
+    else:
+      raise IOError(errno.EINVAL, "Invalid argument to seek for whence")
+
+  def stat(self):
+    self._stat = self._fs.stats(self._path)
+    return self._stat
+
+  def tell(self):
+    return self._pos
+
+  def read(self, length=DEFAULT_READ_SIZE):
+    data = self._fs.read(self._path, self._pos, length)
+    self._pos += len(data)
+    return data
+
+  def write(self, data):
+    """Append the data to the end of the file"""
+    self.append(data)
+
+  def append(self, data):
+    if 'w' not in self._mode:
+      raise IOError(errno.EINVAL, "File not open for writing")
+
+    if self._stat is None:
+      # File not there yet.
+      self._fs.create(self._path, data=data)
+      self.stat()
+    else:
+      self._fs.append(self._path, data=data)
+
+  def flush(self):
+    pass
+
+  def close(self):
+    pass
+
+
+def safe_octal(octal_value):
+  """
+  safe_octal(octal_value) -> octal value in string
+
+  This correctly handles octal values specified as a string or as a numeric.
+  """
+  try:
+    return oct(octal_value)
+  except TypeError:
+    return str(octal_value)
+
+def _get_service_url(hdfs_config):
+  override = hdfs_config.WEBHDFS_URL.get()
+  if override:
+    return override
+
+  host = hdfs_config.NN_HOST.get()
+  port = hdfs_config.NN_HTTP_PORT.get()
+  return "http://%s:%s/webhdfs/v1" % (host, port)

+ 78 - 0
desktop/libs/hadoop/src/hadoop/fs/webhdfs_types.py

@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Return types from WebHDFS api calls.
+"""
+
+import stat
+
+from hadoop.fs.hadoopfs import Hdfs
+
+class WebHdfsStat(object):
+  """
+  Information about a path in HDFS.
+
+  Modelled after org.apache.hadoop.fs.FileStatus
+  """
+
+  def __init__(self, file_status, parent_path):
+    self.path = Hdfs.join(parent_path, file_status['pathSuffix'])
+    self.isDir = file_status['type'] == 'DIRECTORY'
+    self.atime = file_status['accessTime'] / 1000
+    self.mtime = file_status['modificationTime'] / 1000
+    self.user = file_status['owner']
+    self.group = file_status['group']
+    self.size = file_status['length']
+    self.blockSize = file_status['blockSize']
+    self.replication = file_status['replication']
+
+    self.mode = int(file_status['permission'], 8)
+    if self.isDir:
+      self.mode |= stat.S_IFDIR
+    else:
+      self.mode |= stat.S_IFREG
+
+  def __str__(self):
+    return "[WebHdfsStat] %7s %8s %8s %12s %s%s" % \
+        (oct(self.mode), self.user, self.group, self.size, self.path,
+         self.isDir and '/' or "")
+
+  def __repr__(self):
+    return "<WebHdfsStat %s>" % (self.path,)
+
+  def __getitem__(self, key):
+    try:
+      return getattr(self, key)
+    except AttributeError:
+      raise KeyError(key)
+
+  def __setitem__(self, key, value):
+    setattr(self, key, value)
+
+
+class WebHdfsContentSummary(object):
+  """
+  Content summary info on a directory
+  """
+  def __init__(self, summary):
+    for k, v in summary.iteritems():
+      setattr(self, k, v)
+
+  def __str__(self):
+    return "[WebHdfsContentSummary] nDirs:%s; nFiles:%s (quota %s); du:%s (quota %s)" % \
+        (self.directoryCount, self.fileCount, self.quota, self.spaceConsumed, self.spaceQuota)

+ 328 - 0
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -0,0 +1,328 @@
+
+import atexit
+import logging
+import pwd
+import os
+import shutil
+import signal
+import subprocess
+import tempfile
+import textwrap
+import time
+
+import desktop
+import hadoop
+from hadoop.mini_cluster import find_unused_port, write_config
+
+# Shared global HDFS (for CDH4).
+_shared_cluster = None
+
+LOG = logging.getLogger(__name__)
+
+# Class to use for the cluster's GMSP.
+CLUSTER_GMSP = 'org.apache.hadoop.security.StaticUserGroupMapping'
+
+# users and their groups which are used in Hue tests.
+TEST_USER_GROUP_MAPPING = {
+   'test': ['test','users','supergroup'], 'chown_test': ['chown_test'],
+   'notsuperuser': ['notsuperuser'], 'gamma': ['gamma'],
+   'webui': ['webui'], 'hue': ['supergroup']
+}
+
+# How long we're willing to wait for the cluster to start
+STARTUP_DEADLINE = 60.0
+
+
+class PseudoHdfs4(object):
+  """This class runs HDFS (CDH4) locally, in pseudo-distributed mode"""
+
+  def __init__(self):
+    self._tmpdir = tempfile.mkdtemp()
+    self._superuser = pwd.getpwuid(os.getuid()).pw_name
+    self._fs = None
+
+    self._log_dir = None
+    self._dfs_http_port = None
+    self._dfs_http_address = None
+    self._namenode_port = None
+    self._fs_default_name = None
+
+    self._nn_proc = None
+    self._dn_proc = None
+
+    self.shutdown_hook = None
+
+  def __str__(self):
+    return "PseudoHdfs4 (%s) at %s" % (self._fs_default_name, self._tmpdir)
+
+  @property
+  def superuser(self):
+    return self._superuser
+
+  @property
+  def fs_default_name(self):
+    return self._fs_default_name
+
+  @property
+  def namenode_port(self):
+    return self._namenode_port
+
+  @property
+  def dfs_http_address(self):
+    return self._dfs_http_address
+
+  @property
+  def dfs_http_port(self):
+    return self._dfs_http_port
+
+  @property
+  def fs(self):
+    if self._fs is None:
+      if self._dfs_http_address is None:
+        LOG.warn("Attempt to access uninitialized filesystem")
+        return None
+      self._fs = hadoop.fs.webhdfs.WebHdfs(
+        "http://%s/webhdfs/v1" % (self._dfs_http_address,))
+    return self._fs
+
+  def stop(self):
+    """Kills the cluster ungracefully."""
+    while self._nn_proc is not None and self._nn_proc.poll() is None:
+      os.kill(self._nn_proc.pid, signal.SIGKILL)
+      LOG.info('Stopping NameNode pid %s' % (self._nn_proc.pid,))
+      time.sleep(0.5)
+
+    while self._dn_proc is not None and self._dn_proc.poll() is None:
+      os.kill(self._dn_proc.pid, signal.SIGKILL)
+      LOG.info('Stopping DataNode pid %s' % (self._dn_proc.pid,))
+      time.sleep(0.5)
+
+    self._nn_proc = None
+    self._dn_proc = None
+
+    LOG.info('Cleaning up temp directory "%s"' % (self._tmpdir,))
+    shutil.rmtree(self._tmpdir)
+
+    if self.shutdown_hook is not None:
+      self.shutdown_hook()
+
+
+  def _tmppath(self, filename):
+    """Return a filepath inside temp dir"""
+    return os.path.join(self._tmpdir, filename)
+
+  def _logpath(self, filename):
+    """Return a filepath inside log dir"""
+    return os.path.join(self._log_dir, filename)
+
+  def start(self):
+    LOG.info("Using temporary directory: %s" % (self._tmpdir,))
+
+    # Fix up superuser group mapping
+    if self.superuser not in TEST_USER_GROUP_MAPPING:
+      TEST_USER_GROUP_MAPPING[self.superuser] = [self.superuser]
+
+    # This is where we prepare our Hadoop configuration
+    conf_dir = self._tmppath('conf')
+    os.mkdir(conf_dir)
+
+    self._log_dir = self._tmppath('logs')
+    os.mkdir(self._log_dir)
+
+    # Write out the Hadoop conf files
+    self._write_hadoop_metrics_conf(conf_dir)
+    self._write_core_site()
+    self._write_hdfs_site()
+
+    # More stuff to setup in the environment
+    env = dict(
+      HADOOP_CONF_DIR = conf_dir,
+      HADOOP_CLASSPATH = ":".join([
+        hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()]),
+      HADOOP_HEAPSIZE = "128",
+      HADOOP_LOG_DIR = self._log_dir,
+      USER = self.superuser,
+      LANG = "en_US.UTF-8",
+    )
+
+    if "JAVA_HOME" in os.environ:
+      env['JAVA_HOME'] = os.environ['JAVA_HOME']
+
+    # Format HDFS
+    self._format(conf_dir, env)
+
+    # Run them
+    self._nn_proc = self._start_daemon('namenode', conf_dir, env)
+    self._dn_proc = self._start_daemon('datanode', conf_dir, env)
+
+    # Make sure they're running
+    deadline = time.time() + STARTUP_DEADLINE
+    while not self._is_ready():
+      if time.time() > deadline:
+        self.stop()
+        raise RuntimeError('%s is taking too long to start' % (self,))
+      time.sleep(5)
+
+  def _format(self, conf_dir, env):
+    args = (hadoop.conf.HADOOP_BIN.get(), 
+            '--config', conf_dir,
+            'namenode', '-format')
+    LOG.info('Formatting HDFS: %s' % (args,))
+
+    ignore = file('/dev/null', 'w+')
+    ret = subprocess.call(args, env=env, stdout=ignore, stderr=ignore)
+    if ret != 0:
+      raise RuntimeError('Failed to format namenode')
+
+
+  def _is_ready(self):
+    def log_exit(exit_code, proc_name):
+      LOG.info('%s exited with %s' % (proc_name, exit_code))
+      LOG.debug('--------------------- STDOUT:\n' +
+                file(self._logpath(proc_name + '.stdout')))
+      LOG.debug('--------------------- STDERR:\n' +
+                file(self._logpath(proc_name + '.stderr')))
+
+    if self._nn_proc.poll() is not None:
+      log_exit('namenode', self._nn_proc.poll())
+      return False
+    if self._dn_proc.poll() is not None:
+      log_exit('datanode', self._dn_proc.poll())
+      return False
+
+    # Run a `dfsadmin -report' against it
+    dfsreport = subprocess.Popen(
+      (hadoop.conf.HADOOP_BIN.get(),
+       'dfsadmin',
+       '-Dfs.default.name=%s' % self._fs_default_name,
+       '-report'),
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE)
+
+    ret = dfsreport.wait()
+    if ret != 0:
+      LOG.debug('DFS not ready yet.\n%s\n%s' %
+                (dfsreport.stderr.read(), dfsreport.stdout.read()))
+      return False
+
+    # Check that the DN is servicing
+    report_out = dfsreport.stdout.read()
+    if 'Datanodes available: 1' in report_out:
+      return True
+    LOG.debug('Waiting for DN to come up .................\n%s' % (report_out,))
+    return False
+
+
+  def _start_daemon(self, proc_name, conf_dir, env):
+    """Start a hadoop daemon. Returns the Popen object."""
+    args = (hadoop.conf.HADOOP_BIN.get(), 
+            '--config', conf_dir,
+            proc_name)
+    LOG.info('Starting pseudo HDFS4 cluster: %s' % (args,))
+    stdout = file(self._logpath(proc_name + ".stdout"), 'w')
+    stderr = file(self._logpath(proc_name + ".stderr"), 'w')
+
+    return subprocess.Popen(
+      args=args,
+      stdout=stdout,
+      stderr=stderr,
+      env=env)
+
+  def _write_hdfs_site(self):
+    self._dfs_http_port = find_unused_port()
+    self._dfs_http_address = 'localhost:%s' % (self._dfs_http_port,)
+
+    hdfs_configs = {
+      'dfs.webhdfs.enabled': 'true',
+      'dfs.http.address': self._dfs_http_address,
+      'dfs.namenode.safemode.extension': 1,
+      'dfs.namenode.safemode.threshold-pct': 0,
+      'dfs.replication': 1,
+      'dfs.safemode.min.datanodes': 1,
+    }
+    write_config(hdfs_configs, self._tmppath('conf/hdfs-site.xml'))
+
+  def _write_core_site(self):
+    # Prep user group mapping file
+    ugm_properties = self._tmppath('ugm.properties')
+    self._write_static_group_mapping(ugm_properties)
+    self._namenode_port = find_unused_port()
+    self._fs_default_name = 'hdfs://localhost:%s' % (self._namenode_port,)
+
+    core_configs = {
+      'fs.default.name': self._fs_default_name,
+      'hadoop.security.authorization': 'true',
+      'hadoop.security.authentication': 'simple',
+      'hadoop.security.group.mapping': CLUSTER_GMSP,
+      'hadoop.security.static.group.mapping.file': ugm_properties,
+      'hadoop.proxyuser.%s.groups' % (self.superuser,): 'users,supergroup',
+      'hadoop.proxyuser.%s.hosts' % (self.superuser,): 'localhost',
+      'hadoop.tmp.dir': self._tmppath('hadoop_tmp_dir'),
+    }
+    write_config(core_configs, self._tmppath('conf/core-site.xml'))
+
+  def _write_hadoop_metrics_conf(self, conf_dir):
+    f = file(os.path.join(conf_dir, "hadoop-metrics.properties"), "w")
+    try:
+      f.write(textwrap.dedent("""
+          dfs.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
+          mapred.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
+          jvm.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
+          rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
+          """))
+    finally:
+      f.close()
+
+  def _write_static_group_mapping(self, filename):
+    f = file(filename, 'w')
+    try:
+      for user, groups in TEST_USER_GROUP_MAPPING.iteritems():
+        f.write('%s = %s\n' % (user, ','.join(groups)))
+    finally:
+      f.close()
+
+
+def shared_cluster():
+  """Create a shared cluster"""
+  global _shared_cluster
+  if _shared_cluster is None:
+    cluster = PseudoHdfs4()
+    atexit.register(cluster.stop)
+    cluster.start()
+
+    # Fix config to reflect the cluster setup.
+    closers = [
+      hadoop.conf.HDFS_CLUSTERS['default'].NN_HOST.set_for_testing('localhost'),
+      hadoop.conf.HDFS_CLUSTERS['default'].NN_HTTP_PORT.set_for_testing(cluster.dfs_http_port),
+      hadoop.conf.HDFS_CLUSTERS['default'].NN_HDFS_PORT.set_for_testing(cluster.namenode_port),
+    ]
+
+    desktop.lib.fsmanager.reset()
+    old = hadoop.cluster.clear_caches()
+
+    def restore_config():
+      hadoop.cluster.restore_caches(old)
+      for x in closers:
+        x()
+
+    cluster.shutdown_hook = restore_config
+    _shared_cluster = cluster
+  return _shared_cluster
+
+
+#
+# Simply try to exercise it
+#
+if __name__ == '__main__':
+  logging.basicConfig(level=logging.DEBUG)
+  desktop.lib.conf.initialize([hadoop.conf])
+
+  cluster = PseudoHdfs4()
+  cluster.start()
+  LOG.info("%s running" % (cluster,))
+  LOG.info("fs.default.name=%s" % (cluster.fs_default_name,))
+  LOG.info("dfs.http.address=%s" % (cluster.dfs_http_address,))
+
+  from IPython.Shell import IPShellEmbed
+  IPShellEmbed()()
+  cluster.stop()