Browse Source

HUE-7803 [core] Remove deprecated HadoopFileSystem Python part

Romain Rigaux 8 years ago
parent
commit
71cfd66
1 changed files with 1 additions and 459 deletions
  1. 1 459
      desktop/libs/hadoop/src/hadoop/fs/hadoopfs.py

+ 1 - 459
desktop/libs/hadoop/src/hadoop/fs/hadoopfs.py

@@ -27,19 +27,15 @@ import logging
 import os
 import posixpath
 import random
-import stat as statconsts
 import subprocess
 import urlparse
-import threading
 
 from thrift.transport import TTransport
 
 from django.utils.encoding import smart_str, force_unicode
 from django.utils.translation import ugettext as _
-from desktop.lib import thrift_util, i18n
+from desktop.lib import i18n
 from desktop.lib.conf import validate_port
-from hadoop.api.hdfs import Namenode, Datanode
-from hadoop.api.hdfs.constants import QUOTA_DONT_SET, QUOTA_RESET
 from hadoop.api.common.ttypes import RequestContext, IOException
 import hadoop.conf
 from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
@@ -361,461 +357,7 @@ class Hdfs(object):
     raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'listdir_stats'})
 
 
-"""
-Deprecated! Use WebHdfs instead
-"""
-class HadoopFileSystem(Hdfs):
-  """
-  Implementation of Filesystem APIs through Thrift to a Hadoop cluster.
-  """
-
-  def __init__(self, host, thrift_port, hdfs_port=8020,
-               nn_kerberos_principal="hdfs",
-               dn_kerberos_principal="hdfs",
-               security_enabled=False,
-               hadoop_bin_path="hadoop",
-               temp_dir='/tmp'):
-    """
-    @param host hostname or IP of the namenode
-    @param thrift_port port on which the Thrift plugin is listening
-    @param hdfs_port port on which NameNode IPC is listening
-    @param hadoop_bin_path path to find the hadoop wrapper script on the
-                           installed system - default is fine if it is in
-                           the user's PATH env
-    @param temp_dir Temporary directory, for mktemp()
-    """
-    self.host = host
-    self.thrift_port = thrift_port
-    self.hdfs_port = hdfs_port
-    self.security_enabled = security_enabled
-    self.nn_kerberos_principal = nn_kerberos_principal
-    self.dn_kerberos_principal = dn_kerberos_principal
-    self.hadoop_bin_path = hadoop_bin_path
-    self._resolve_hadoop_path()
-    self.security_enabled = security_enabled
-    self._temp_dir = temp_dir
-
-    self.nn_client = thrift_util.get_client(
-      Namenode.Client, host, thrift_port,
-      service_name="HDFS Namenode HUE Plugin",
-      use_sasl=security_enabled,
-      kerberos_principal=nn_kerberos_principal,
-      timeout_seconds=NN_THRIFT_TIMEOUT)
-
-    # The file systems are cached globally.  We store
-    # user information in a thread-local variable so that
-    # safety can be preserved there.
-    self.thread_local = threading.local()
-    self.setuser(DEFAULT_USER)
-    LOG.debug("Initialized HadoopFS: %s:%d (%s)", host, thrift_port, hadoop_bin_path)
-
-  @classmethod
-  def from_config(cls, fs_config, hadoop_bin_path="hadoop"):
-    return cls(host=fs_config.NN_HOST.get(),
-               thrift_port=fs_config.NN_THRIFT_PORT.get(),
-               hdfs_port=fs_config.NN_HDFS_PORT.get(),
-               security_enabled=fs_config.SECURITY_ENABLED.get(),
-               nn_kerberos_principal=fs_config.NN_KERBEROS_PRINCIPAL.get(),
-               dn_kerberos_principal=fs_config.DN_KERBEROS_PRINCIPAL.get(),
-               hadoop_bin_path=hadoop_bin_path)
-
-
-  def _get_hdfs_base(self):
-    return "hdfs://%s:%d" % (self.host, self.hdfs_port) # TODO(todd) fetch the port from the NN thrift
-
-  def _resolve_hadoop_path(self):
-    """The hadoop_bin_path configuration may be a non-absolute path, in which case
-    it's checked against $PATH.
-
-    If the hadoop binary can't be found anywhere, raises an Exception.
-    """
-    for path_dir in os.getenv("PATH", "").split(os.pathsep):
-      path = os.path.join(path_dir, self.hadoop_bin_path)
-      if os.path.exists(path):
-        self.hadoop_bin_path = os.path.abspath(path)
-        return
-    raise OSError(errno.ENOENT, "Hadoop binary (%s) does not exist." % (self.hadoop_bin_path,))
-
-  @property
-  def uri(self):
-    return self._get_hdfs_base()
-
-  @property
-  def superuser(self):
-    """
-    Retrieves the user that Hadoop considers as
-    "superuser" by looking at ownership of /.
-    This is slightly inaccurate.
-    """
-    return self.stats("/")["user"]
-
-  def setuser(self, user):
-    # Hadoop determines the groups the user belongs to on the server side.
-    self.thread_local.request_context = RequestContext()
-    if not self.request_context.confOptions:
-      self.request_context.confOptions = {}
-    self.thread_local.request_context.confOptions['effective_user'] = user
-    self.thread_local.user = user
-
-  @property
-  def user(self):
-    return self.thread_local.user
-
-  @property
-  def groups(self):
-    return self.thread_local.groups
-
-  @property
-  def request_context(self):
-    return self.thread_local.request_context
-
-  @_coerce_exceptions
-  def open(self, path, mode="r", *args, **kwargs):
-    if mode == "w":
-      return FileUpload(self, path, mode, *args, **kwargs)
-    return File(self, path, mode, *args, **kwargs)
-
-  @_coerce_exceptions
-  def remove(self, path):
-    path = encode_fs_path(path)
-    stat = self._hadoop_stat(path)
-    if not stat:
-      raise IOError(errno.ENOENT, "File not found: %s" % path)
-    if stat.isDir:
-      raise IOError(errno.EISDIR, "Is a directory: %s" % path)
-
-    success = self.nn_client.unlink(
-      self.request_context, normpath(path), recursive=False)
-    if not success:
-      raise IOError("Unlink failed")
-
-  @_coerce_exceptions
-  def mkdir(self, path, mode=0755):
-    # TODO(todd) there should be a mkdir that isn't mkdirHIER
-    # (this is mkdir -p I think)
-    path = encode_fs_path(path)
-    success = self.nn_client.mkdirhier(self.request_context, normpath(path), mode)
-    if not success:
-      raise IOError("mkdir failed")
-
-  def _rmdir(self, path, recursive=False):
-    path = encode_fs_path(path)
-    stat = self._hadoop_stat(path)
-    if not stat:
-      raise IOError(errno.ENOENT, "Directory not found: %s" % (path,))
-    if not stat.isDir:
-      raise IOError(errno.EISDIR, "Is not a directory: %s" % (path,))
-
-    success = self.nn_client.unlink(
-      self.request_context, normpath(path), recursive=recursive)
-    if not success:
-      raise IOError("Unlink failed")
-
-  @_coerce_exceptions
-  def rmdir(self, path):
-    return self._rmdir(path)
-
-  @_coerce_exceptions
-  def rmtree(self, path):
-    return self._rmdir(path, True)
-
-  @_coerce_exceptions
-  def listdir(self, path):
-    path = encode_fs_path(path)
-    stats = self.nn_client.ls(self.request_context, normpath(path))
-    return [self.basename(decode_fs_path(stat.path)) for stat in stats]
 
-  @_coerce_exceptions
-  def listdir_stats(self, path):
-    path = encode_fs_path(path)
-    stats = self.nn_client.ls(self.request_context, normpath(path))
-    return [self._unpack_stat(s) for s in stats]
-
-  @_coerce_exceptions
-  def get_content_summaries(self, paths):
-    paths = [ normpath(encode_fs_path(path)) for path in paths ]
-    summaries = self.nn_client.multiGetContentSummary(self.request_context, paths)
-    def _fix_summary(summary):
-      summary.path = decode_fs_path(summary.path)
-      return summary
-    return [_fix_summary(s) for s in summaries]
-
-  @_coerce_exceptions
-  def rename(self, old, new):
-    old = encode_fs_path(old)
-    new = encode_fs_path(new)
-    success = self.nn_client.rename(
-      self.request_context, normpath(old), normpath(new))
-    if not success: #TODO(todd) these functions should just throw if failed
-      raise IOError("Rename failed")
-
-  @_coerce_exceptions
-  def rename_star(self, old_dir, new_dir):
-    """Equivalent to `mv old_dir/* new"""
-    if not self.isdir(old_dir):
-      raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (old_dir,))
-    if not self.exists(new_dir):
-      self.mkdir(new_dir)
-    elif not self.isdir(new_dir):
-      raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (new_dir,))
-    ls = self.listdir(old_dir)
-    for dirent in ls:
-      self.rename(HadoopFileSystem.join(old_dir, dirent),
-                  HadoopFileSystem.join(new_dir, dirent))
-
-  @_coerce_exceptions
-  def exists(self, path):
-    stat = self._hadoop_stat(path)
-    return stat is not None
-
-  @_coerce_exceptions
-  def isfile(self, path):
-    stat = self._hadoop_stat(path)
-    if stat is None:
-      return False
-    return not stat.isDir
-
-  @_coerce_exceptions
-  def isdir(self, path):
-    stat = self._hadoop_stat(path)
-    if stat is None:
-      return False
-    return stat.isDir
-
-  @_coerce_exceptions
-  def stats(self, path, raise_on_fnf=True):
-    stat = self._hadoop_stat(path)
-    if not stat:
-      if raise_on_fnf:
-        raise IOError(errno.ENOENT, "File %s not found" % (path,))
-      else:
-        return None
-    ret = self._unpack_stat(stat)
-    return ret
-
-  @_coerce_exceptions
-  def chmod(self, path, mode):
-    path = encode_fs_path(path)
-    self.nn_client.chmod(self.request_context, normpath(path), mode)
-
-  @_coerce_exceptions
-  def chown(self, path, user, group):
-    path = encode_fs_path(path)
-    self.nn_client.chown(self.request_context, normpath(path), user, group)
-
-  @_coerce_exceptions
-  def get_namenode_info(self):
-    (capacity, used, available) = self.nn_client.df(self.request_context)
-    return dict(
-      usage=dict(capacity_bytes=capacity,
-                 used_bytes=used,
-                 available_bytes=available),
-      )
-
-  @_coerce_exceptions
-  def _get_blocks(self, path, offset, length):
-    """
-    Get block locations from the Name Node. Returns an array of Block
-    instances that might look like:
-      [ Block(path='/user/todd/motd', genStamp=1001, blockId=5564389078175231298,
-        nodes=[DatanodeInfo(xceiverCount=1, capacity=37265149952, name='127.0.0.1:50010',
-        thriftPort=53417, state=1, remaining=18987925504, host='127.0.0.1',
-        storageID='DS-1238582576-127.0.1.1-50010-1240968238474', dfsUsed=36864)], numBytes=424)]
-    """
-    path = encode_fs_path(path)
-    blocks = self.nn_client.getBlocks(self.request_context, normpath(path), offset, length)
-    def _fix_block(blk):
-      blk.path = decode_fs_path(blk.path)
-      return blk
-    return [_fix_block(blk) for blk in blocks]
-
-
-  def _hadoop_stat(self, path):
-    """Returns None if file does not exist."""
-    path = encode_fs_path(path)
-    try:
-      stat = self.nn_client.stat(self.request_context, normpath(path))
-      stat.path = decode_fs_path(stat.path)
-      return stat
-    except IOException, ioe:
-      if ioe.clazz == 'java.io.FileNotFoundException':
-        return None
-      raise
-
-  @_coerce_exceptions
-  def _read_block(self, block, offset, len):
-    """
-    Reads a chunk of data from the given block from the first available
-    datanode that serves it.
-
-    @param block a thrift Block object
-    @param offset offset from the beginning of the block (not file)
-    @param len the number of bytes to read
-    """
-    errs = []
-    unipath = block.path
-    block.path = encode_fs_path(block.path)
-    try:
-      for node in block.nodes:
-        dn_conn = self._connect_dn(node)
-        try:
-          try:
-            data = dn_conn.readBlock(self.request_context, block, offset, len)
-            return data.data
-          except Exception, e:
-            errs.append(e)
-        finally:
-          dn_conn.close()
-    finally:
-      block.path = unipath
-
-    raise IOError("Could not read block %s from any replicas: %s" % (block, repr(errs)))
-
-  @_coerce_exceptions
-  def set_diskspace_quota(self, path, size):
-    """
-    Set the diskspace quota of a given path.
-    @param path The path to the given hdfs resource
-    @param size The amount of bytes that a given subtree of files can grow to.
-    """
-    path = encode_fs_path(path)
-    if normpath(path) == '/':
-      raise ValueError('Cannot set quota for "/"')
-
-    if size < 0:
-      raise ValueError("The size quota should be 0 or positive or unset")
-
-    self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_DONT_SET, size)
-
-
-  @_coerce_exceptions
-  def set_namespace_quota(self, path, num_files):
-    """
-    Set the maximum number of files of a given path.
-    @param path The path to the given hdfs resource
-    @param num_files The amount of files that can exist within that subtree.
-    """
-    path = encode_fs_path(path)
-    if normpath(path) == '/':
-      raise ValueError('Cannot set quota for "/"')
-
-    if num_files < 0:
-      raise ValueError("The number of files quota should be 0 or positive or unset")
-
-    self.nn_client.setQuota(self.request_context, normpath(path), num_files, QUOTA_DONT_SET)
-
-  @_coerce_exceptions
-  def clear_diskspace_quota(self, path):
-    """
-    Remove the diskspace quota at a given path
-    """
-    path = encode_fs_path(path)
-    self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_DONT_SET, QUOTA_RESET)
-
-  @_coerce_exceptions
-  def clear_namespace_quota(self, path):
-    """
-    Remove the namespace quota at a given path
-    """
-    path = encode_fs_path(path)
-    self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_RESET, QUOTA_DONT_SET)
-
-
-  @_coerce_exceptions
-  def get_diskspace_quota(self, path):
-    """
-    Get the current space quota in bytes for disk space. None if it is unset
-    """
-    path = encode_fs_path(path)
-    space_quota = self.nn_client.getContentSummary(self.request_context, normpath(path)).spaceQuota
-    if space_quota == QUOTA_RESET or space_quota == QUOTA_DONT_SET:
-      return None
-    else:
-      return space_quota
-
-
-  @_coerce_exceptions
-  def get_namespace_quota(self, path):
-    """
-    Get the current quota in number of files. None if it is unset
-    """
-    path = encode_fs_path(path)
-    file_count_quota = self.nn_client.getContentSummary(self.request_context, normpath(path)).quota
-    if file_count_quota == QUOTA_RESET or file_count_quota == QUOTA_DONT_SET:
-      return None
-    else:
-      return file_count_quota
-
-  @_coerce_exceptions
-  def get_usage_and_quota(self, path):
-    """
-    Returns a dictionary with "file_count", "file_quota",
-    "space_used", and "space_quota".  The quotas
-    may be None.
-    """
-    path = encode_fs_path(path)
-    summary = self.nn_client.getContentSummary(self.request_context, normpath(path))
-    ret = dict()
-    ret["file_count"] = summary.fileCount
-    ret["space_used"] = summary.spaceConsumed
-    if summary.quota in (QUOTA_RESET, QUOTA_DONT_SET):
-      ret["file_quota"] = None
-    else:
-      ret["file_quota"] = summary.quota
-    if summary.spaceQuota in (QUOTA_RESET, QUOTA_DONT_SET):
-      ret["space_quota"] = None
-    else:
-      ret["space_quota"] = summary.spaceQuota
-    return ret
-
-  @_coerce_exceptions
-  def get_delegation_token(self):
-    # TODO(atm): The second argument here should really be the Hue kerberos
-    # principal, which doesn't exist yet. Todd's working on that.
-    return self.nn_client.getDelegationToken(self.request_context, 'hadoop')
-
-  def _connect_dn(self, node):
-    dn_conf = thrift_util.ConnectionConfig(
-      Datanode.Client,
-      node.host,
-      node.thriftPort,
-      "HDFS Datanode Thrift",
-      use_sasl=self.security_enabled,
-      kerberos_principal=self.dn_kerberos_principal,
-      timeout_seconds=DN_THRIFT_TIMEOUT)
-
-    service, protocol, transport = \
-        thrift_util.connect_to_thrift(dn_conf)
-    transport.open()
-    service.close = lambda: transport.close()
-    return service
-
-  @staticmethod
-  def _unpack_stat(stat):
-    """Unpack a Thrift "Stat" object into a dictionary that looks like fs.stat"""
-    mode = stat.perms
-    if stat.isDir:
-      mode |= statconsts.S_IFDIR
-    else:
-      mode |= statconsts.S_IFREG
-
-    return {
-      'path': decode_fs_path(stat.path),
-      'size': stat.length,
-      'mtime': stat.mtime / 1000,
-      'mode': mode,
-      'user': stat.owner,
-      'group': stat.group,
-      'atime': stat.atime
-      }
-
-  @staticmethod
-  def urlsplit(url):
-    """
-    Take an HDFS path (hdfs://nn:port/foo) or just (/foo) and split it into
-    the standard urlsplit's 5-tuple.
-    """
-    return Hdfs.urlsplit(url)
 
 
 def require_open(func):