浏览代码

HUE-8737 [core] Futurize desktop/libs/hadoop for Python 3.5

Ying Chen 6 年之前
父节点
当前提交
e476ead4f2
共有 27 个文件被更改,包括 250 次插入158 次删除
  1. 6 6
      desktop/libs/hadoop/src/hadoop/cluster.py
  2. 5 5
      desktop/libs/hadoop/src/hadoop/conf.py
  3. 4 2
      desktop/libs/hadoop/src/hadoop/core_site.py
  4. 18 7
      desktop/libs/hadoop/src/hadoop/fs/__init__.py
  5. 3 3
      desktop/libs/hadoop/src/hadoop/fs/fs_test.py
  6. 1 1
      desktop/libs/hadoop/src/hadoop/fs/fsutils.py
  7. 1 1
      desktop/libs/hadoop/src/hadoop/fs/fsutils_tests.py
  8. 20 10
      desktop/libs/hadoop/src/hadoop/fs/hadoopfs.py
  9. 43 38
      desktop/libs/hadoop/src/hadoop/fs/test_webhdfs.py
  10. 5 4
      desktop/libs/hadoop/src/hadoop/fs/upload.py
  11. 33 21
      desktop/libs/hadoop/src/hadoop/fs/webhdfs.py
  12. 7 3
      desktop/libs/hadoop/src/hadoop/fs/webhdfs_types.py
  13. 4 3
      desktop/libs/hadoop/src/hadoop/hdfs_site.py
  14. 29 17
      desktop/libs/hadoop/src/hadoop/mini_cluster.py
  15. 25 23
      desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py
  16. 4 3
      desktop/libs/hadoop/src/hadoop/ssl_client_site.py
  17. 1 0
      desktop/libs/hadoop/src/hadoop/test_base.py
  18. 2 1
      desktop/libs/hadoop/src/hadoop/test_hdfs_site.py
  19. 2 1
      desktop/libs/hadoop/src/hadoop/test_ssl_client_site.py
  20. 9 3
      desktop/libs/hadoop/src/hadoop/tests.py
  21. 10 2
      desktop/libs/hadoop/src/hadoop/yarn/clients.py
  22. 1 0
      desktop/libs/hadoop/src/hadoop/yarn/history_server_api.py
  23. 2 0
      desktop/libs/hadoop/src/hadoop/yarn/mapreduce_api.py
  24. 1 0
      desktop/libs/hadoop/src/hadoop/yarn/node_manager_api.py
  25. 2 1
      desktop/libs/hadoop/src/hadoop/yarn/resource_manager_api.py
  26. 10 2
      desktop/libs/hadoop/src/hadoop/yarn/spark_history_server_api.py
  27. 2 1
      desktop/libs/hadoop/src/hadoop/yarn/tests.py

+ 6 - 6
desktop/libs/hadoop/src/hadoop/cluster.py

@@ -43,7 +43,7 @@ def rm_ha(funct):
   def decorate(api, *args, **kwargs):
   def decorate(api, *args, **kwargs):
     try:
     try:
       return funct(api, *args, **kwargs)
       return funct(api, *args, **kwargs)
-    except Exception, ex:
+    except Exception as ex:
       ex_message = str(ex)
       ex_message = str(ex)
       if 'Connection refused' in ex_message or 'Connection aborted' in ex_message or 'standby RM' in ex_message:
       if 'Connection refused' in ex_message or 'Connection aborted' in ex_message or 'standby RM' in ex_message:
         LOG.info('Resource Manager not available, trying another RM: %s.' % ex)
         LOG.info('Resource Manager not available, trying another RM: %s.' % ex)
@@ -79,7 +79,7 @@ def get_all_hdfs():
     return FS_CACHE
     return FS_CACHE
 
 
   FS_CACHE = {}
   FS_CACHE = {}
-  for identifier in conf.HDFS_CLUSTERS.keys():
+  for identifier in list(conf.HDFS_CLUSTERS.keys()):
     FS_CACHE[identifier] = _make_filesystem(identifier)
     FS_CACHE[identifier] = _make_filesystem(identifier)
   return FS_CACHE
   return FS_CACHE
 
 
@@ -108,7 +108,7 @@ def get_yarn():
   if MR_NAME_CACHE in conf.YARN_CLUSTERS and conf.YARN_CLUSTERS[MR_NAME_CACHE].SUBMIT_TO.get():
   if MR_NAME_CACHE in conf.YARN_CLUSTERS and conf.YARN_CLUSTERS[MR_NAME_CACHE].SUBMIT_TO.get():
     return conf.YARN_CLUSTERS[MR_NAME_CACHE]
     return conf.YARN_CLUSTERS[MR_NAME_CACHE]
 
 
-  for name in conf.YARN_CLUSTERS.keys():
+  for name in list(conf.YARN_CLUSTERS.keys()):
     yarn = conf.YARN_CLUSTERS[name]
     yarn = conf.YARN_CLUSTERS[name]
     if yarn.SUBMIT_TO.get():
     if yarn.SUBMIT_TO.get():
       return yarn
       return yarn
@@ -121,9 +121,9 @@ def get_next_ha_yarncluster(current_user=None):
   from hadoop.yarn.resource_manager_api import ResourceManagerApi
   from hadoop.yarn.resource_manager_api import ResourceManagerApi
   global MR_NAME_CACHE
   global MR_NAME_CACHE
 
 
-  has_ha = sum([conf.YARN_CLUSTERS[name].SUBMIT_TO.get() for name in conf.YARN_CLUSTERS.keys()]) >= 2
+  has_ha = sum([conf.YARN_CLUSTERS[name].SUBMIT_TO.get() for name in list(conf.YARN_CLUSTERS.keys())]) >= 2
 
 
-  for name in conf.YARN_CLUSTERS.keys():
+  for name in list(conf.YARN_CLUSTERS.keys()):
     config = conf.YARN_CLUSTERS[name]
     config = conf.YARN_CLUSTERS[name]
     if config.SUBMIT_TO.get():
     if config.SUBMIT_TO.get():
       rm = ResourceManagerApi(config.RESOURCE_MANAGER_API_URL.get(), config.SECURITY_ENABLED.get(), config.SSL_CERT_CA_VERIFY.get())
       rm = ResourceManagerApi(config.RESOURCE_MANAGER_API_URL.get(), config.SECURITY_ENABLED.get(), config.SSL_CERT_CA_VERIFY.get())
@@ -143,7 +143,7 @@ def get_next_ha_yarncluster(current_user=None):
             return (config, rm)
             return (config, rm)
           else:
           else:
             LOG.info('RM %s is not RUNNING, skipping it: %s' % (name, cluster_info))
             LOG.info('RM %s is not RUNNING, skipping it: %s' % (name, cluster_info))
-        except Exception, ex:
+        except Exception as ex:
           LOG.exception('RM %s is not available, skipping it: %s' % (name, ex))
           LOG.exception('RM %s is not available, skipping it: %s' % (name, ex))
       else:
       else:
         return (config, rm)
         return (config, rm)

+ 5 - 5
desktop/libs/hadoop/src/hadoop/conf.py

@@ -55,7 +55,7 @@ UPLOAD_CHUNK_SIZE = Config(
 
 
 
 
 def has_hdfs_enabled():
 def has_hdfs_enabled():
-  return HDFS_CLUSTERS.keys()
+  return list(HDFS_CLUSTERS.keys())
 
 
 def get_hadoop_conf_dir_default():
 def get_hadoop_conf_dir_default():
   """ get from environment variable HADOOP_CONF_DIR or "/etc/hadoop/conf" """
   """ get from environment variable HADOOP_CONF_DIR or "/etc/hadoop/conf" """
@@ -213,7 +213,7 @@ def config_validator(user):
 
 
   # HDFS_CLUSTERS
   # HDFS_CLUSTERS
   has_default = False
   has_default = False
-  for name in HDFS_CLUSTERS.keys():
+  for name in list(HDFS_CLUSTERS.keys()):
     cluster = HDFS_CLUSTERS[name]
     cluster = HDFS_CLUSTERS[name]
     res.extend(webhdfs.test_fs_configuration(cluster))
     res.extend(webhdfs.test_fs_configuration(cluster))
     if name == 'default':
     if name == 'default':
@@ -222,7 +222,7 @@ def config_validator(user):
     res.append(("hadoop.hdfs_clusters", "You should have an HDFS called 'default'."))
     res.append(("hadoop.hdfs_clusters", "You should have an HDFS called 'default'."))
 
 
   # YARN_CLUSTERS
   # YARN_CLUSTERS
-  for name in YARN_CLUSTERS.keys():
+  for name in list(YARN_CLUSTERS.keys()):
     cluster = YARN_CLUSTERS[name]
     cluster = YARN_CLUSTERS[name]
     if cluster.SUBMIT_TO.get():
     if cluster.SUBMIT_TO.get():
       submit_to.append('yarn_clusters.' + name)
       submit_to.append('yarn_clusters.' + name)
@@ -259,13 +259,13 @@ def test_yarn_configurations(user):
 
 
   try:
   try:
     from jobbrowser.api import get_api # Required for cluster HA testing
     from jobbrowser.api import get_api # Required for cluster HA testing
-  except Exception, e:
+  except Exception as e:
     LOG.warn('Jobbrowser is disabled, skipping test_yarn_configurations')
     LOG.warn('Jobbrowser is disabled, skipping test_yarn_configurations')
     return result
     return result
 
 
   try:
   try:
     get_api(user, None).get_jobs(user, username=user.username, state='all', text='')
     get_api(user, None).get_jobs(user, username=user.username, state='all', text='')
-  except Exception, e:
+  except Exception as e:
     msg = 'Failed to contact an active Resource Manager: %s' % e
     msg = 'Failed to contact an active Resource Manager: %s' % e
     LOG.exception(msg)
     LOG.exception(msg)
     result.append(('Resource Manager', msg))
     result.append(('Resource Manager', msg))

+ 4 - 2
desktop/libs/hadoop/src/hadoop/core_site.py

@@ -15,10 +15,12 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from __future__ import absolute_import
 import errno
 import errno
 import logging
 import logging
 
 
-import confparse
+from hadoop import conf
+from hadoop import confparse
 
 
 from desktop.lib.paths import get_config_root_hadoop
 from desktop.lib.paths import get_config_root_hadoop
 
 
@@ -65,7 +67,7 @@ def _parse_core_site():
   try:
   try:
     _CORE_SITE_PATH = get_config_root_hadoop('core-site.xml')
     _CORE_SITE_PATH = get_config_root_hadoop('core-site.xml')
     data = file(_CORE_SITE_PATH, 'r').read()
     data = file(_CORE_SITE_PATH, 'r').read()
-  except IOError, err:
+  except IOError as err:
     if err.errno != errno.ENOENT:
     if err.errno != errno.ENOENT:
       LOG.error('Cannot read from "%s": %s' % (_CORE_SITE_PATH, err))
       LOG.error('Cannot read from "%s": %s' % (_CORE_SITE_PATH, err))
       return
       return

+ 18 - 7
desktop/libs/hadoop/src/hadoop/fs/__init__.py

@@ -29,8 +29,14 @@ We maintain this usage of paths as arguments.
 When possible, the interfaces here have fidelity to the
 When possible, the interfaces here have fidelity to the
 native python interfaces.
 native python interfaces.
 """
 """
-
-import __builtin__
+from __future__ import division
+from future import standard_library
+from functools import reduce
+standard_library.install_aliases()
+from builtins import map
+from builtins import range
+from past.utils import old_div
+from builtins import object
 import errno
 import errno
 import grp
 import grp
 import logging
 import logging
@@ -42,6 +48,11 @@ import shutil
 import stat
 import stat
 import sys
 import sys
 
 
+if sys.version_info[0] > 2:
+  from builtins import open as builtins_open
+else:
+  from __builtin__ import open as builtins_open
+
 SEEK_SET, SEEK_CUR, SEEK_END = os.SEEK_SET, os.SEEK_CUR, os.SEEK_END
 SEEK_SET, SEEK_CUR, SEEK_END = os.SEEK_SET, os.SEEK_CUR, os.SEEK_END
 
 
 
 
@@ -150,7 +161,7 @@ class LocalSubFileSystem(object):
     if paths is None and 0 not in users and 0 not in groups:
     if paths is None and 0 not in users and 0 not in groups:
       paths = [0]
       paths = [0]
     # complicated way of taking the intersection of three lists.
     # complicated way of taking the intersection of three lists.
-    assert not reduce(set.intersection, map(set, [paths, users, groups]))
+    assert not reduce(set.intersection, list(map(set, [paths, users, groups])))
     def wrapped(*args):
     def wrapped(*args):
       self = args[0]
       self = args[0]
       newargs = list(args[1:])
       newargs = list(args[1:])
@@ -166,7 +177,7 @@ class LocalSubFileSystem(object):
     return wrapped
     return wrapped
 
 
   # These follow their namesakes.
   # These follow their namesakes.
-  open = _wrap(__builtin__.open)
+  open = _wrap(builtins_open)
   remove = _wrap(os.remove)
   remove = _wrap(os.remove)
   mkdir = _wrap(os.mkdir)
   mkdir = _wrap(os.mkdir)
   rmdir = _wrap(os.rmdir)
   rmdir = _wrap(os.rmdir)
@@ -189,7 +200,7 @@ class LocalSubFileSystem(object):
     path = self._resolve_path(path)
     path = self._resolve_path(path)
     try:
     try:
       statobj = os.stat(path)
       statobj = os.stat(path)
-    except OSError, ose:
+    except OSError as ose:
       if ose.errno == errno.ENOENT and not raise_on_fnf:
       if ose.errno == errno.ENOENT and not raise_on_fnf:
         return None
         return None
       raise
       raise
@@ -240,9 +251,9 @@ class FakeStatus(object):
     o = dict()
     o = dict()
     GB = 1024*1024*1024
     GB = 1024*1024*1024
     o["bytesTotal"] = 5*GB
     o["bytesTotal"] = 5*GB
-    o["bytesUsed"] = 5*GB/2
+    o["bytesUsed"] = old_div(5*GB,2)
     o["bytesRemaining"] = 2*GB
     o["bytesRemaining"] = 2*GB
-    o["bytesNonDfs"] = GB/2
+    o["bytesNonDfs"] = old_div(GB,2)
     o["liveDataNodes"] = 13
     o["liveDataNodes"] = 13
     o["deadDataNodes"] = 2
     o["deadDataNodes"] = 2
     o["upgradeStatus"] = dict(version=13, percentComplete=100, finalized=True)
     o["upgradeStatus"] = dict(version=13, percentComplete=100, finalized=True)

+ 3 - 3
desktop/libs/hadoop/src/hadoop/fs/fs_test.py

@@ -107,7 +107,7 @@ def test_hdfs_copy():
   copy_test_dst = minicluster.fs_prefix + '/copy_test_dst'
   copy_test_dst = minicluster.fs_prefix + '/copy_test_dst'
   try:
   try:
     data = "I will not make flatulent noises in class\n" * 2000
     data = "I will not make flatulent noises in class\n" * 2000
-    minifs.create(copy_test_src, permission=0646, data=data)
+    minifs.create(copy_test_src, permission=0o646, data=data)
     minifs.create(copy_test_dst, data="some initial data")
     minifs.create(copy_test_dst, data="some initial data")
 
 
     minifs.copyfile(copy_test_src, copy_test_dst)
     minifs.copyfile(copy_test_src, copy_test_dst)
@@ -115,7 +115,7 @@ def test_hdfs_copy():
     assert_equal(data, actual)
     assert_equal(data, actual)
 
 
     sb = minifs.stats(copy_test_dst)
     sb = minifs.stats(copy_test_dst)
-    assert_equal(0646, stat.S_IMODE(sb.mode))
+    assert_equal(0o646, stat.S_IMODE(sb.mode))
   finally:
   finally:
     minifs.do_as_superuser(minifs.rmtree, copy_test_src)
     minifs.do_as_superuser(minifs.rmtree, copy_test_src)
     minifs.do_as_superuser(minifs.rmtree, copy_test_dst)
     minifs.do_as_superuser(minifs.rmtree, copy_test_dst)
@@ -137,7 +137,7 @@ def test_hdfs_full_copy():
     # File to directory copy.
     # File to directory copy.
     # No guarantees on file permissions at the moment.
     # No guarantees on file permissions at the moment.
     data = "I will not make flatulent noises in class\n" * 2000
     data = "I will not make flatulent noises in class\n" * 2000
-    minifs.create(prefix + '/src/file.txt', permission=0646, data=data)
+    minifs.create(prefix + '/src/file.txt', permission=0o646, data=data)
     minifs.copy(prefix + '/src/file.txt', prefix + '/dest')
     minifs.copy(prefix + '/src/file.txt', prefix + '/dest')
     assert_true(minifs.exists(prefix + '/dest/file.txt'))
     assert_true(minifs.exists(prefix + '/dest/file.txt'))
 
 

+ 1 - 1
desktop/libs/hadoop/src/hadoop/fs/fsutils.py

@@ -29,7 +29,7 @@ def do_overwrite_save(fs, path, data):
         try:
         try:
             fs.create(path_dest, overwrite=False, data=data)
             fs.create(path_dest, overwrite=False, data=data)
             logging.info("Wrote to " + path_dest)
             logging.info("Wrote to " + path_dest)
-        except Exception, e:
+        except Exception as e:
             # An error occurred in writing, we should clean up
             # An error occurred in writing, we should clean up
             # the tmp file if it exists, before re-raising
             # the tmp file if it exists, before re-raising
             try:
             try:

+ 1 - 1
desktop/libs/hadoop/src/hadoop/fs/fsutils_tests.py

@@ -43,7 +43,7 @@ class FsUtilsTests(unittest.TestCase):
   def tearDown(self):
   def tearDown(self):
     try:
     try:
       self.cluster.fs.purge_trash()
       self.cluster.fs.purge_trash()
-    except Exception, e:
+    except Exception as e:
       LOG.error('Could not clean up trash: %s', e)
       LOG.error('Could not clean up trash: %s', e)
 
 
   def test_remove_header(self):
   def test_remove_header(self):

+ 20 - 10
desktop/libs/hadoop/src/hadoop/fs/hadoopfs.py

@@ -22,13 +22,19 @@ Only some utils and Hdfs are still used.
 
 
 Interfaces for Hadoop filesystem access via the HADOOP-4707 Thrift APIs.
 Interfaces for Hadoop filesystem access via the HADOOP-4707 Thrift APIs.
 """
 """
+from __future__ import division
+from past.builtins import cmp
+from future import standard_library
+standard_library.install_aliases()
+from past.utils import old_div
+from builtins import object
 import errno
 import errno
 import logging
 import logging
 import os
 import os
 import posixpath
 import posixpath
 import random
 import random
 import subprocess
 import subprocess
-import urlparse
+import sys
 
 
 from django.utils.encoding import smart_str, force_unicode
 from django.utils.encoding import smart_str, force_unicode
 from django.utils.translation import ugettext as _
 from django.utils.translation import ugettext as _
@@ -39,6 +45,10 @@ import hadoop.conf
 from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
 from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
 from hadoop.fs.exceptions import PermissionDeniedException
 from hadoop.fs.exceptions import PermissionDeniedException
 
 
+if sys.version_info[0] > 2:
+  from urllib.parse import urlsplit as lib_urlsplit
+else:
+  from urlparse import urlsplit as lib_urlsplit
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
@@ -78,7 +88,7 @@ def _coerce_exceptions(function):
   def wrapper(*args, **kwargs):
   def wrapper(*args, **kwargs):
     try:
     try:
       return function(*args, **kwargs)
       return function(*args, **kwargs)
-    except Exception, e:
+    except Exception as e:
       e.msg = force_unicode(e.msg, errors='replace')
       e.msg = force_unicode(e.msg, errors='replace')
       e.stack = force_unicode(e.stack, errors='replace')
       e.stack = force_unicode(e.stack, errors='replace')
       LOG.exception("Exception in Hadoop FS call " + function.__name__)
       LOG.exception("Exception in Hadoop FS call " + function.__name__)
@@ -139,7 +149,7 @@ class Hdfs(object):
     schema = url[:i]
     schema = url[:i]
     if schema not in ('hdfs', 'viewfs'):
     if schema not in ('hdfs', 'viewfs'):
       # Default to standard for non-hdfs
       # Default to standard for non-hdfs
-      return urlparse.urlsplit(url)
+      return lib_urlsplit(url)
     url = url[i+3:]
     url = url[i+3:]
     i = url.find('/')
     i = url.find('/')
     if i == -1:
     if i == -1:
@@ -184,7 +194,7 @@ class Hdfs(object):
       finally:
       finally:
         self.setuser(user)
         self.setuser(user)
 
 
-  def copyFromLocal(self, local_src, remote_dst, mode=0755):
+  def copyFromLocal(self, local_src, remote_dst, mode=0o755):
     remote_dst = remote_dst.endswith(posixpath.sep) and remote_dst[:-1] or remote_dst
     remote_dst = remote_dst.endswith(posixpath.sep) and remote_dst[:-1] or remote_dst
     local_src = local_src.endswith(posixpath.sep) and local_src[:-1] or local_src
     local_src = local_src.endswith(posixpath.sep) and local_src[:-1] or local_src
 
 
@@ -194,7 +204,7 @@ class Hdfs(object):
       (basename, filename) = os.path.split(local_src)
       (basename, filename) = os.path.split(local_src)
       self._copy_file(local_src, self.isdir(remote_dst) and self.join(remote_dst, filename) or remote_dst)
       self._copy_file(local_src, self.isdir(remote_dst) and self.join(remote_dst, filename) or remote_dst)
 
 
-  def _copy_dir(self, local_dir, remote_dir, mode=0755):
+  def _copy_dir(self, local_dir, remote_dir, mode=0o755):
     self.mkdir(remote_dir, mode=mode)
     self.mkdir(remote_dir, mode=mode)
 
 
     for f in os.listdir(local_dir):
     for f in os.listdir(local_dir):
@@ -217,7 +227,7 @@ class Hdfs(object):
       src = file(local_src)
       src = file(local_src)
       try:
       try:
         try:
         try:
-          self.create(remote_dst, permission=0755)
+          self.create(remote_dst, permission=0o755)
           chunk = src.read(chunk_size)
           chunk = src.read(chunk_size)
           while chunk:
           while chunk:
             self.append(remote_dst, chunk)
             self.append(remote_dst, chunk)
@@ -435,7 +445,7 @@ class FileUpload(object):
 
 
     self.subprocess_env = i18n.make_utf8_env()
     self.subprocess_env = i18n.make_utf8_env()
 
 
-    if self.subprocess_env.has_key('HADOOP_CLASSPATH'):
+    if 'HADOOP_CLASSPATH' in self.subprocess_env:
       self.subprocess_env['HADOOP_CLASSPATH'] += ':' + hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
       self.subprocess_env['HADOOP_CLASSPATH'] += ':' + hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
     else:
     else:
       self.subprocess_env['HADOOP_CLASSPATH'] = hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
       self.subprocess_env['HADOOP_CLASSPATH'] = hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
@@ -460,7 +470,7 @@ class FileUpload(object):
   def close(self):
   def close(self):
     try:
     try:
       (stdout, stderr) = self.putter.communicate()
       (stdout, stderr) = self.putter.communicate()
-    except IOError, ioe:
+    except IOError as ioe:
       logging.debug("Saw IOError writing %r" % self.path, exc_info=1)
       logging.debug("Saw IOError writing %r" % self.path, exc_info=1)
       if ioe.errno == errno.EPIPE:
       if ioe.errno == errno.EPIPE:
         stdout, stderr = self.putter.communicate()
         stdout, stderr = self.putter.communicate()
@@ -509,7 +519,7 @@ class BlockCache(object):
     if _max_idx < _min_idx:
     if _max_idx < _min_idx:
       return None
       return None
 
 
-    pivot_idx = (_max_idx + _min_idx) / 2
+    pivot_idx = old_div((_max_idx + _min_idx), 2)
     pivot_block = self.blocks[pivot_idx]
     pivot_block = self.blocks[pivot_idx]
     if pos < pivot_block.startOffset:
     if pos < pivot_block.startOffset:
       return self.find_block(pos, _min_idx, pivot_idx - 1)
       return self.find_block(pos, _min_idx, pivot_idx - 1)
@@ -536,7 +546,7 @@ class BlockCache(object):
       blocks_dict[nb.blockId] = nb
       blocks_dict[nb.blockId] = nb
 
 
     # Convert back to sorted list
     # Convert back to sorted list
-    block_list = blocks_dict.values()
+    block_list = list(blocks_dict.values())
     block_list.sort(cmp=lambda a,b: cmp(a.startOffset, b.startOffset))
     block_list.sort(cmp=lambda a,b: cmp(a.startOffset, b.startOffset))
 
 
     # Update cache with new data
     # Update cache with new data

+ 43 - 38
desktop/libs/hadoop/src/hadoop/fs/test_webhdfs.py

@@ -16,6 +16,10 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from builtins import map
+from builtins import zip
+from builtins import range
+from builtins import object
 import logging
 import logging
 import os
 import os
 import random
 import random
@@ -30,6 +34,7 @@ from hadoop import pseudo_hdfs4
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.fs.hadoopfs import Hdfs
 from hadoop.fs.hadoopfs import Hdfs
 from hadoop.pseudo_hdfs4 import is_live_cluster
 from hadoop.pseudo_hdfs4 import is_live_cluster
+from functools import reduce
 
 
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
@@ -46,7 +51,7 @@ class WebhdfsTests(unittest.TestCase):
 
 
     cls.cluster.fs.setuser('test')
     cls.cluster.fs.setuser('test')
     cls.cluster.fs.mkdir(cls.prefix)
     cls.cluster.fs.mkdir(cls.prefix)
-    cls.cluster.fs.chmod(cls.prefix, 01777)
+    cls.cluster.fs.chmod(cls.prefix, 0o1777)
 
 
   def setUp(self):
   def setUp(self):
     self.cluster.fs.setuser('test')
     self.cluster.fs.setuser('test')
@@ -117,10 +122,10 @@ class WebhdfsTests(unittest.TestCase):
       f.write(data)
       f.write(data)
       f.close()
       f.close()
 
 
-      for i in xrange(1, 10):
+      for i in range(1, 10):
         f = fs.open(test_file, "r")
         f = fs.open(test_file, "r")
 
 
-        for j in xrange(1, 100):
+        for j in range(1, 100):
           offset = random.randint(0, len(data) - 1)
           offset = random.randint(0, len(data) - 1)
           f.seek(offset, os.SEEK_SET)
           f.seek(offset, os.SEEK_SET)
           assert_equals(data[offset:offset+50], f.read(50))
           assert_equals(data[offset:offset+50], f.read(50))
@@ -138,7 +143,7 @@ class WebhdfsTests(unittest.TestCase):
     f = fs.open(test_file, "w")
     f = fs.open(test_file, "w")
     f.write("foo")
     f.write("foo")
     f.close()
     f.close()
-    fs.chmod(test_file, 0400)
+    fs.chmod(test_file, 0o400)
     fs.setuser("notsuperuser")
     fs.setuser("notsuperuser")
     f = fs.open(test_file)
     f = fs.open(test_file)
 
 
@@ -149,7 +154,7 @@ class WebhdfsTests(unittest.TestCase):
 
 
     prefix = self.prefix + '/test_umask'
     prefix = self.prefix + '/test_umask'
     fs_umask = fs._umask
     fs_umask = fs._umask
-    fs._umask = 01022
+    fs._umask = 0o1022
 
 
     try:
     try:
       test_dir = prefix + '/umask_test_dir'
       test_dir = prefix + '/umask_test_dir'
@@ -167,7 +172,7 @@ class WebhdfsTests(unittest.TestCase):
       fs._umask = fs_umask
       fs._umask = fs_umask
 
 
     fs_umask = fs._umask
     fs_umask = fs._umask
-    fs._umask = 0077
+    fs._umask = 0o077
     prefix += '/2'
     prefix += '/2'
 
 
     try:
     try:
@@ -189,14 +194,14 @@ class WebhdfsTests(unittest.TestCase):
 
 
     prefix = self.prefix + '/test_umask_overriden'
     prefix = self.prefix + '/test_umask_overriden'
     fs_umask = fs._umask
     fs_umask = fs._umask
-    fs._umask = 01022
+    fs._umask = 0o1022
 
 
     try:
     try:
       test_dir = prefix + '/umask_test_dir'
       test_dir = prefix + '/umask_test_dir'
-      fs.mkdir(test_dir, 0333)
+      fs.mkdir(test_dir, 0o333)
 
 
       test_file = prefix + '/umask_test.txt'
       test_file = prefix + '/umask_test.txt'
-      fs.create(test_file, permission=0333)
+      fs.create(test_file, permission=0o333)
 
 
       assert_equals('40333', '%o' % fs.stats(test_dir).mode)
       assert_equals('40333', '%o' % fs.stats(test_dir).mode)
       assert_equals('100333', '%o' % fs.stats(test_file).mode)
       assert_equals('100333', '%o' % fs.stats(test_file).mode)
@@ -209,7 +214,7 @@ class WebhdfsTests(unittest.TestCase):
 
 
     prefix = self.prefix + '/test_umask_without_sticky'
     prefix = self.prefix + '/test_umask_without_sticky'
     fs_umask = fs._umask
     fs_umask = fs._umask
-    fs._umask = 022
+    fs._umask = 0o22
 
 
     try:
     try:
       test_dir = prefix + '/umask_test_dir'
       test_dir = prefix + '/umask_test_dir'
@@ -239,7 +244,7 @@ class WebhdfsTests(unittest.TestCase):
     new_owner = 'testcopy'
     new_owner = 'testcopy'
     new_owner_dir = self.prefix  + '/' + new_owner + '/test-copy'
     new_owner_dir = self.prefix  + '/' + new_owner + '/test-copy'
 
 
-    fs.copy_remote_dir(src_dir, new_owner_dir, dir_mode=0755, owner=new_owner)
+    fs.copy_remote_dir(src_dir, new_owner_dir, dir_mode=0o755, owner=new_owner)
 
 
     dir_stat = fs.stats(new_owner_dir)
     dir_stat = fs.stats(new_owner_dir)
     assert_equals(new_owner, dir_stat.user)
     assert_equals(new_owner, dir_stat.user)
@@ -332,12 +337,12 @@ class WebhdfsTests(unittest.TestCase):
       # Test exception can handle non-ascii characters
       # Test exception can handle non-ascii characters
       try:
       try:
         self.cluster.fs.rmtree(dir_path)
         self.cluster.fs.rmtree(dir_path)
-      except IOError, ex:
+      except IOError as ex:
         LOG.info('Successfully caught error: %s' % ex)
         LOG.info('Successfully caught error: %s' % ex)
     finally:
     finally:
       try:
       try:
         self.cluster.fs.rmtree(prefix)
         self.cluster.fs.rmtree(prefix)
-      except Exception, ex:
+      except Exception as ex:
         LOG.error('Failed to cleanup %s: %s' % (prefix, ex))
         LOG.error('Failed to cleanup %s: %s' % (prefix, ex))
 
 
       # Reset encoding
       # Reset encoding
@@ -360,22 +365,22 @@ class WebhdfsTests(unittest.TestCase):
       f.close()
       f.close()
 
 
       # Check currrent permissions are not 777 (666 for file)
       # Check currrent permissions are not 777 (666 for file)
-      fs.chmod(dir1, 01000, recursive=True)
-      assert_equals(041000, fs.stats(dir1).mode)
-      assert_equals(041000, fs.stats(subdir1).mode)
-      assert_equals(0101000, fs.stats(file1).mode)
+      fs.chmod(dir1, 0o1000, recursive=True)
+      assert_equals(0o41000, fs.stats(dir1).mode)
+      assert_equals(0o41000, fs.stats(subdir1).mode)
+      assert_equals(0o101000, fs.stats(file1).mode)
 
 
       # Chmod non-recursive
       # Chmod non-recursive
-      fs.chmod(dir1, 01222, recursive=False)
-      assert_equals(041222, fs.stats(dir1).mode)
-      assert_equals(041000, fs.stats(subdir1).mode)
-      assert_equals(0101000, fs.stats(file1).mode)
+      fs.chmod(dir1, 0o1222, recursive=False)
+      assert_equals(0o41222, fs.stats(dir1).mode)
+      assert_equals(0o41000, fs.stats(subdir1).mode)
+      assert_equals(0o101000, fs.stats(file1).mode)
 
 
       # Chmod recursive
       # Chmod recursive
-      fs.chmod(dir1, 01444, recursive=True)
-      assert_equals(041444, fs.stats(dir1).mode)
-      assert_equals(041444, fs.stats(subdir1).mode)
-      assert_equals(0101444, fs.stats(file1).mode)
+      fs.chmod(dir1, 0o1444, recursive=True)
+      assert_equals(0o41444, fs.stats(dir1).mode)
+      assert_equals(0o41444, fs.stats(subdir1).mode)
+      assert_equals(0o101444, fs.stats(file1).mode)
     finally:
     finally:
       fs.rmtree(dir1, skip_trash=True)
       fs.rmtree(dir1, skip_trash=True)
       fs.setuser('test')
       fs.setuser('test')
@@ -429,9 +434,9 @@ class WebhdfsTests(unittest.TestCase):
       assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path(PATH)))
       assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path(PATH)))
       trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path(PATH))
       trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path(PATH))
       trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path(PATH), trash_dir, PATH[1:]) for trash_dir in trash_dirs]
       trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path(PATH), trash_dir, PATH[1:]) for trash_dir in trash_dirs]
-      exists = map(self.cluster.fs.exists, trash_paths)
+      exists = list(map(self.cluster.fs.exists, trash_paths))
       assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
       assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
-      trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
+      trash_path = reduce(lambda a, b: a[0] and a or b, list(zip(exists, trash_paths)))[1]
 
 
       # Restore
       # Restore
       self.cluster.fs.restore(trash_path)
       self.cluster.fs.restore(trash_path)
@@ -440,7 +445,7 @@ class WebhdfsTests(unittest.TestCase):
     finally:
     finally:
       try:
       try:
         self.cluster.fs.rmtree(PATH)
         self.cluster.fs.rmtree(PATH)
-      except Exception, ex:
+      except Exception as ex:
         LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
         LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
 
 
   def test_trash_and_purge(self):
   def test_trash_and_purge(self):
@@ -455,9 +460,9 @@ class WebhdfsTests(unittest.TestCase):
       assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path(PATH)))
       assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path(PATH)))
       trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path(PATH))
       trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path(PATH))
       trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path(PATH), trash_dir, PATH[1:]) for trash_dir in trash_dirs]
       trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path(PATH), trash_dir, PATH[1:]) for trash_dir in trash_dirs]
-      exists = map(self.cluster.fs.exists, trash_paths)
+      exists = list(map(self.cluster.fs.exists, trash_paths))
       assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
       assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
-      trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
+      trash_path = reduce(lambda a, b: a[0] and a or b, list(zip(exists, trash_paths)))[1]
 
 
       # Purge
       # Purge
       self.cluster.fs.purge_trash()
       self.cluster.fs.purge_trash()
@@ -466,7 +471,7 @@ class WebhdfsTests(unittest.TestCase):
     finally:
     finally:
       try:
       try:
         self.cluster.fs.rmtree(PATH)
         self.cluster.fs.rmtree(PATH)
-      except Exception, ex:
+      except Exception as ex:
         LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
         LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
 
 
   def test_restore_error(self):
   def test_restore_error(self):
@@ -481,9 +486,9 @@ class WebhdfsTests(unittest.TestCase):
       assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path(PATH)))
       assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path(PATH)))
       trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path(PATH))
       trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path(PATH))
       trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path(PATH), trash_dir, PATH[1:]) for trash_dir in trash_dirs]
       trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path(PATH), trash_dir, PATH[1:]) for trash_dir in trash_dirs]
-      exists = map(self.cluster.fs.exists, trash_paths)
+      exists = list(map(self.cluster.fs.exists, trash_paths))
       assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
       assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
-      trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
+      trash_path = reduce(lambda a, b: a[0] and a or b, list(zip(exists, trash_paths)))[1]
 
 
       # Purge
       # Purge
       self.cluster.fs.purge_trash()
       self.cluster.fs.purge_trash()
@@ -495,7 +500,7 @@ class WebhdfsTests(unittest.TestCase):
     finally:
     finally:
       try:
       try:
         self.cluster.fs.rmtree(PATH)
         self.cluster.fs.rmtree(PATH)
-      except Exception, ex:
+      except Exception as ex:
         LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
         LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
 
 
   def test_trash_permissions(self):
   def test_trash_permissions(self):
@@ -510,16 +515,16 @@ class WebhdfsTests(unittest.TestCase):
       assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path(PATH)))
       assert_true(self.cluster.fs.exists(self.cluster.fs.trash_path(PATH)))
       trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path(PATH))
       trash_dirs = self.cluster.fs.listdir(self.cluster.fs.trash_path(PATH))
       trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path(PATH), trash_dir, PATH[1:]) for trash_dir in trash_dirs]
       trash_paths = [self.cluster.fs.join(self.cluster.fs.trash_path(PATH), trash_dir, PATH[1:]) for trash_dir in trash_dirs]
-      exists = map(self.cluster.fs.exists, trash_paths)
+      exists = list(map(self.cluster.fs.exists, trash_paths))
       assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
       assert_true(reduce(lambda a, b: a or b, exists), trash_paths)
-      trash_path = reduce(lambda a, b: a[0] and a or b, zip(exists, trash_paths))[1]
+      trash_path = reduce(lambda a, b: a[0] and a or b, list(zip(exists, trash_paths)))[1]
 
 
       # Restore
       # Restore
       assert_raises(IOError, self.cluster.fs.do_as_user, 'nouser', self.cluster.fs.restore, trash_path)
       assert_raises(IOError, self.cluster.fs.do_as_user, 'nouser', self.cluster.fs.restore, trash_path)
     finally:
     finally:
       try:
       try:
         self.cluster.fs.rmtree(PATH)
         self.cluster.fs.rmtree(PATH)
-      except Exception, ex:
+      except Exception as ex:
         LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
         LOG.error('Failed to cleanup %s: %s' % (PATH, ex))
 
 
   def test_trash_users(self):
   def test_trash_users(self):
@@ -560,7 +565,7 @@ class WebhdfsTests(unittest.TestCase):
       for directory in CLEANUP:
       for directory in CLEANUP:
         try:
         try:
           self.cluster.fs.rmtree(dir)
           self.cluster.fs.rmtree(dir)
-        except Exception, ex:
+        except Exception as ex:
           LOG.error('Failed to cleanup %s: %s' % (directory, ex))
           LOG.error('Failed to cleanup %s: %s' % (directory, ex))
 
 
   def test_check_access(self):
   def test_check_access(self):

+ 5 - 4
desktop/libs/hadoop/src/hadoop/fs/upload.py

@@ -25,6 +25,7 @@ which is triggered by a magic prefix ("HDFS") in the field name.
 See http://docs.djangoproject.com/en/1.2/topics/http/file-uploads/
 See http://docs.djangoproject.com/en/1.2/topics/http/file-uploads/
 """
 """
 
 
+from builtins import object
 import errno
 import errno
 import logging
 import logging
 import time
 import time
@@ -74,7 +75,7 @@ class HDFStemporaryUploadedFile(object):
     # Check access permissions before attempting upload
     # Check access permissions before attempting upload
     try:
     try:
       self._fs.check_access(destination, 'rw-')
       self._fs.check_access(destination, 'rw-')
-    except WebHdfsException, e:
+    except WebHdfsException as e:
       LOG.exception(e)
       LOG.exception(e)
       raise HDFSerror(_('User %s does not have permissions to write to path "%s".') % (request.user.username, destination))
       raise HDFSerror(_('User %s does not have permissions to write to path "%s".') % (request.user.username, destination))
 
 
@@ -97,7 +98,7 @@ class HDFStemporaryUploadedFile(object):
     try:
     try:
       self.size = size
       self.size = size
       self.close()
       self.close()
-    except Exception, ex:
+    except Exception as ex:
       LOG.exception('Error uploading file to %s' % (self._path,))
       LOG.exception('Error uploading file to %s' % (self._path,))
       raise
       raise
 
 
@@ -105,7 +106,7 @@ class HDFStemporaryUploadedFile(object):
     try:
     try:
       self._fs.remove(self._path, True)
       self._fs.remove(self._path, True)
       self._do_cleanup = False
       self._do_cleanup = False
-    except IOError, ex:
+    except IOError as ex:
       if ex.errno != errno.ENOENT:
       if ex.errno != errno.ENOENT:
         LOG.exception('Failed to remove temporary upload file "%s". '
         LOG.exception('Failed to remove temporary upload file "%s". '
                       'Please cleanup manually: %s' % (self._path, ex))
                       'Please cleanup manually: %s' % (self._path, ex))
@@ -159,7 +160,7 @@ class HDFSfileUploadHandler(FileUploadHandler):
         LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(),))
         LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(),))
         self._activated = True
         self._activated = True
         self._starttime = time.time()
         self._starttime = time.time()
-      except Exception, ex:
+      except Exception as ex:
         LOG.error("Not using HDFS upload handler: %s" % (ex,))
         LOG.error("Not using HDFS upload handler: %s" % (ex,))
         self.request.META['upload_failed'] = ex
         self.request.META['upload_failed'] = ex
 
 

+ 33 - 21
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py

@@ -19,18 +19,23 @@
 Interfaces for Hadoop filesystem access via HttpFs/WebHDFS
 Interfaces for Hadoop filesystem access via HttpFs/WebHDFS
 """
 """
 
 
+from future import standard_library
+standard_library.install_aliases()
+from builtins import oct
+from builtins import object
 import errno
 import errno
 import logging
 import logging
 import posixpath
 import posixpath
 import stat
 import stat
+import sys
 import threading
 import threading
 import time
 import time
-import urllib
+import urllib.request, urllib.error
 
 
-from urlparse import urlparse
 from django.utils.encoding import smart_str
 from django.utils.encoding import smart_str
 from django.utils.translation import ugettext as _
 from django.utils.translation import ugettext as _
 from desktop.lib.rest import http_client, resource
 from desktop.lib.rest import http_client, resource
+from past.builtins import long
 from hadoop.fs import normpath as fs_normpath, SEEK_SET, SEEK_CUR, SEEK_END
 from hadoop.fs import normpath as fs_normpath, SEEK_SET, SEEK_CUR, SEEK_END
 from hadoop.fs.hadoopfs import Hdfs
 from hadoop.fs.hadoopfs import Hdfs
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.fs.exceptions import WebHdfsException
@@ -41,6 +46,13 @@ from hadoop.hdfs_site import get_nn_sentry_prefixes, get_umask_mode, get_supergr
 import hadoop.conf
 import hadoop.conf
 import desktop.conf
 import desktop.conf
 
 
+if sys.version_info[0] > 2:
+  from urllib.parse import unquote as urllib_quote
+  from urllib.parse import urlparse
+else:
+  from urllib import unquote as urllib_quote
+  from urlparse import urlparse
+
 DEFAULT_HDFS_SUPERUSER = desktop.conf.DEFAULT_HDFS_SUPERUSER.get()
 DEFAULT_HDFS_SUPERUSER = desktop.conf.DEFAULT_HDFS_SUPERUSER.get()
 
 
 # The number of bytes to read if not specified
 # The number of bytes to read if not specified
@@ -63,7 +75,7 @@ class WebHdfs(Hdfs):
                security_enabled=False,
                security_enabled=False,
                ssl_cert_ca_verify=True,
                ssl_cert_ca_verify=True,
                temp_dir="/tmp",
                temp_dir="/tmp",
-               umask=01022,
+               umask=0o1022,
                hdfs_supergroup=None):
                hdfs_supergroup=None):
     self._url = url
     self._url = url
     self._superuser = hdfs_superuser
     self._superuser = hdfs_superuser
@@ -157,7 +169,7 @@ class WebHdfs(Hdfs):
           # The owner of '/' is usually the superuser
           # The owner of '/' is usually the superuser
           sb = self.stats('/')
           sb = self.stats('/')
           self._superuser = sb.user
           self._superuser = sb.user
-        except Exception, ex:
+        except Exception as ex:
           LOG.exception('Failed to determine superuser of %s: %s' % (self, ex))
           LOG.exception('Failed to determine superuser of %s: %s' % (self, ex))
           self._superuser = DEFAULT_HDFS_SUPERUSER
           self._superuser = DEFAULT_HDFS_SUPERUSER
 
 
@@ -182,7 +194,7 @@ class WebHdfs(Hdfs):
 
 
       json = self._root.get(path, params, headers)
       json = self._root.get(path, params, headers)
       trash_path = json['Path']
       trash_path = json['Path']
-    except WebHdfsException, e:
+    except WebHdfsException as e:
       exceptions = ['IllegalArgumentException', 'UnsupportedOperationException']
       exceptions = ['IllegalArgumentException', 'UnsupportedOperationException']
       if any(x in e.message for x in exceptions):
       if any(x in e.message for x in exceptions):
         LOG.warn('WebHDFS operation GETTRASHROOT is not implemented, returning default trash path: %s' % trash_path)
         LOG.warn('WebHDFS operation GETTRASHROOT is not implemented, returning default trash path: %s' % trash_path)
@@ -284,7 +296,7 @@ class WebHdfs(Hdfs):
     try:
     try:
       json = self._root.get(path, params, headers)
       json = self._root.get(path, params, headers)
       return WebHdfsStat(json['FileStatus'], path)
       return WebHdfsStat(json['FileStatus'], path)
-    except WebHdfsException, ex:
+    except WebHdfsException as ex:
       if ex.server_exc == 'FileNotFoundException' or ex.code == 404:
       if ex.server_exc == 'FileNotFoundException' or ex.code == 404:
         return None
         return None
       raise ex
       raise ex
@@ -519,7 +531,7 @@ class WebHdfs(Hdfs):
     params['op'] = 'GETHOMEDIRECTORY'
     params['op'] = 'GETHOMEDIRECTORY'
     headers = self._getheaders()
     headers = self._getheaders()
     res = self._root.get(params=params, headers=headers)
     res = self._root.get(params=params, headers=headers)
-    for key, value in res.iteritems():
+    for key, value in res.items():
       if key.lower() == "path":
       if key.lower() == "path":
         return self.normpath(value)
         return self.normpath(value)
 
 
@@ -550,7 +562,7 @@ class WebHdfs(Hdfs):
           del params['doas']
           del params['doas']
         if 'user.name' in params:
         if 'user.name' in params:
           del params['user.name']
           del params['user.name']
-    quoted_path = urllib.quote(smart_str(path))
+    quoted_path = urllib_quote(smart_str(path))
     return self._client._make_url(quoted_path, params)
     return self._client._make_url(quoted_path, params)
 
 
   def read(self, path, offset, length, bufsize=None):
   def read(self, path, offset, length, bufsize=None):
@@ -569,7 +581,7 @@ class WebHdfs(Hdfs):
     headers = self._getheaders()
     headers = self._getheaders()
     try:
     try:
       return self._root.get(path, params, headers)
       return self._root.get(path, params, headers)
-    except WebHdfsException, ex:
+    except WebHdfsException as ex:
       if "out of the range" in ex.message:
       if "out of the range" in ex.message:
         return ""
         return ""
       raise ex
       raise ex
@@ -587,11 +599,11 @@ class WebHdfs(Hdfs):
 
 
 
 
   def getDefaultFilePerms(self):
   def getDefaultFilePerms(self):
-    return 0666 & (01777 ^ self._umask)
+    return 0o666 & (0o1777 ^ self._umask)
 
 
 
 
   def getDefaultDirPerms(self):
   def getDefaultDirPerms(self):
-    return 01777 & (01777 ^ self._umask)
+    return 0o1777 & (0o1777 ^ self._umask)
 
 
 
 
   def create(self, path, overwrite=False, blocksize=None, replication=None, permission=None, data=None):
   def create(self, path, overwrite=False, blocksize=None, replication=None, permission=None, data=None):
@@ -689,7 +701,7 @@ class WebHdfs(Hdfs):
     headers = self._getheaders()
     headers = self._getheaders()
     try:
     try:
       return self._root.get(path, params, headers)
       return self._root.get(path, params, headers)
-    except WebHdfsException, ex:
+    except WebHdfsException as ex:
       if ex.code == 500 or ex.code == 400:
       if ex.code == 500 or ex.code == 400:
         LOG.warn('Failed to check access to path %s, CHECKACCESS operation may not be supported.' % path)
         LOG.warn('Failed to check access to path %s, CHECKACCESS operation may not be supported.' % path)
         return None
         return None
@@ -839,7 +851,7 @@ class WebHdfs(Hdfs):
     try:
     try:
       # Do not pass data in the first leg.
       # Do not pass data in the first leg.
       self._root.invoke(method, path, params, headers=headers)
       self._root.invoke(method, path, params, headers=headers)
-    except WebHdfsException, ex:
+    except WebHdfsException as ex:
       # This is expected. We get a 307 redirect.
       # This is expected. We get a 307 redirect.
       # The following call may throw.
       # The following call may throw.
       next_url = self._get_redirect_url(ex)
       next_url = self._get_redirect_url(ex)
@@ -870,7 +882,7 @@ class WebHdfs(Hdfs):
         LOG.error("Response is not a redirect: %s" % webhdfs_ex)
         LOG.error("Response is not a redirect: %s" % webhdfs_ex)
         raise webhdfs_ex
         raise webhdfs_ex
       return http_error.response.headers['location']
       return http_error.response.headers['location']
-    except Exception, ex:
+    except Exception as ex:
       LOG.exception("Failed to read redirect from response: %s (%s)" % (webhdfs_ex, ex))
       LOG.exception("Failed to read redirect from response: %s (%s)" % (webhdfs_ex, ex))
       raise webhdfs_ex
       raise webhdfs_ex
 
 
@@ -945,7 +957,7 @@ class File(object):
       self._stat = fs.stats(path)
       self._stat = fs.stats(path)
       if self._stat.isDir:
       if self._stat.isDir:
         raise IOError(errno.EISDIR, _("Is a directory: '%s'") % path)
         raise IOError(errno.EISDIR, _("Is a directory: '%s'") % path)
-    except IOError, ex:
+    except IOError as ex:
       if ex.errno == errno.ENOENT and 'w' in self._mode:
       if ex.errno == errno.ENOENT and 'w' in self._mode:
         self._fs.create(self._path)
         self._fs.create(self._path)
         self.stat()
         self.stat()
@@ -999,9 +1011,9 @@ def safe_octal(octal_value):
   This correctly handles octal values specified as a string or as a numeric.
   This correctly handles octal values specified as a string or as a numeric.
   """
   """
   try:
   try:
-    return oct(octal_value)
+    return oct(octal_value).replace('o', '') # fix futurized octal value with 0o prefix
   except TypeError:
   except TypeError:
-    return str(octal_value)
+    return str(octal_value).replace('o', '')
 
 
 
 
 def _get_service_url(hdfs_config):
 def _get_service_url(hdfs_config):
@@ -1028,7 +1040,7 @@ def test_fs_configuration(fs_config):
     statbuf = fs.stats('/')
     statbuf = fs.stats('/')
     if statbuf.user != DEFAULT_HDFS_SUPERUSER:
     if statbuf.user != DEFAULT_HDFS_SUPERUSER:
       return [(fs_config.WEBHDFS_URL, _("Filesystem root '/' should be owned by '%s'") % DEFAULT_HDFS_SUPERUSER)]
       return [(fs_config.WEBHDFS_URL, _("Filesystem root '/' should be owned by '%s'") % DEFAULT_HDFS_SUPERUSER)]
-  except Exception, ex:
+  except Exception as ex:
     LOG.info("%s -- Validation error: %s" % (fs, ex))
     LOG.info("%s -- Validation error: %s" % (fs, ex))
     return [(fs_config.WEBHDFS_URL, _('Failed to access filesystem root'))]
     return [(fs_config.WEBHDFS_URL, _('Failed to access filesystem root'))]
 
 
@@ -1036,7 +1048,7 @@ def test_fs_configuration(fs_config):
   tmpname = fs.mktemp(prefix='hue_config_validation')
   tmpname = fs.mktemp(prefix='hue_config_validation')
   try:
   try:
     fs.create(tmpname)
     fs.create(tmpname)
-  except Exception, ex:
+  except Exception as ex:
     LOG.info("%s -- Validation error: %s" % (fs, ex))
     LOG.info("%s -- Validation error: %s" % (fs, ex))
     return [(fs_config.WEBHDFS_URL, _('Failed to create temporary file "%s"') % tmpname)]
     return [(fs_config.WEBHDFS_URL, _('Failed to create temporary file "%s"') % tmpname)]
 
 
@@ -1044,7 +1056,7 @@ def test_fs_configuration(fs_config):
   try:
   try:
     try:
     try:
       fs.chown(tmpname, fs.superuser)
       fs.chown(tmpname, fs.superuser)
-    except Exception, ex:
+    except Exception as ex:
       LOG.info("%s -- Validation error: %s" % (fs, ex))
       LOG.info("%s -- Validation error: %s" % (fs, ex))
       return [(fs_config.WEBHDFS_URL,
       return [(fs_config.WEBHDFS_URL,
               'Failed to chown file. Please make sure that the filesystem root '
               'Failed to chown file. Please make sure that the filesystem root '
@@ -1052,7 +1064,7 @@ def test_fs_configuration(fs_config):
   finally:
   finally:
     try:
     try:
       fs.remove(tmpname, skip_trash=True)
       fs.remove(tmpname, skip_trash=True)
-    except Exception, ex:
+    except Exception as ex:
       LOG.error("Failed to remove '%s': %s" % (tmpname, ex))
       LOG.error("Failed to remove '%s': %s" % (tmpname, ex))
       return [(fs_config.WEBHDFS_URL, _('Failed to remove temporary file "%s"') % tmpname)]
       return [(fs_config.WEBHDFS_URL, _('Failed to remove temporary file "%s"') % tmpname)]
 
 

+ 7 - 3
desktop/libs/hadoop/src/hadoop/fs/webhdfs_types.py

@@ -18,7 +18,11 @@
 """
 """
 Return types from WebHDFS api calls.
 Return types from WebHDFS api calls.
 """
 """
+from __future__ import division
 
 
+from builtins import oct
+from builtins import object
+from past.utils import old_div
 import stat
 import stat
 
 
 from django.utils.encoding import smart_str
 from django.utils.encoding import smart_str
@@ -36,8 +40,8 @@ class WebHdfsStat(object):
     self.path = Hdfs.join(parent_path, self.name)
     self.path = Hdfs.join(parent_path, self.name)
     self.isDir = file_status['type'] == 'DIRECTORY'
     self.isDir = file_status['type'] == 'DIRECTORY'
     self.type = file_status['type']
     self.type = file_status['type']
-    self.atime = file_status['accessTime'] / 1000
-    self.mtime = file_status['modificationTime'] / 1000
+    self.atime = old_div(file_status['accessTime'], 1000)
+    self.mtime = old_div(file_status['modificationTime'], 1000)
     self.user = file_status['owner']
     self.user = file_status['owner']
     self.group = file_status['group']
     self.group = file_status['group']
     self.size = file_status['length']
     self.size = file_status['length']
@@ -86,7 +90,7 @@ class WebHdfsContentSummary(object):
   def __init__(self, summary):
   def __init__(self, summary):
     self.summary = summary
     self.summary = summary
 
 
-    for k, v in summary.iteritems():
+    for k, v in summary.items():
       setattr(self, k, v)
       setattr(self, k, v)
 
 
   def __str__(self):
   def __str__(self):

+ 4 - 3
desktop/libs/hadoop/src/hadoop/hdfs_site.py

@@ -15,12 +15,13 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from __future__ import absolute_import
 import errno
 import errno
 import logging
 import logging
 import os.path
 import os.path
 
 
-import conf
-import confparse
+from . import conf
+from . import confparse
 from hadoop.conf import DEFAULT_NN_HTTP_PORT
 from hadoop.conf import DEFAULT_NN_HTTP_PORT
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
@@ -84,7 +85,7 @@ def _parse_hdfs_site():
     data = file(hdfs_site_path, 'r').read()
     data = file(hdfs_site_path, 'r').read()
   except KeyError:
   except KeyError:
     data = ""
     data = ""
-  except IOError, err:
+  except IOError as err:
     if err.errno != errno.ENOENT:
     if err.errno != errno.ENOENT:
       LOG.error('Cannot read from "%s": %s' % (hdfs_site_path, err))
       LOG.error('Cannot read from "%s": %s' % (hdfs_site_path, err))
       return
       return

+ 29 - 17
desktop/libs/hadoop/src/hadoop/mini_cluster.py

@@ -37,6 +37,10 @@
 #   echo "GET /" | nc -w 1 localhost $p
 #   echo "GET /" | nc -w 1 localhost $p
 # done
 # done
 
 
+from __future__ import print_function
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
 import atexit
 import atexit
 import subprocess
 import subprocess
 import os
 import os
@@ -49,13 +53,21 @@ import time
 import tempfile
 import tempfile
 import json
 import json
 import lxml.etree
 import lxml.etree
-import urllib2
 
 
 from desktop.lib import python_util
 from desktop.lib import python_util
 from desktop.lib.test_utils import clear_sys_caches, restore_sys_caches
 from desktop.lib.test_utils import clear_sys_caches, restore_sys_caches
 
 
 import hadoop.cluster
 import hadoop.cluster
 
 
+if sys.version_info[0] > 2:
+  from urllib.request import Request as lib_Request
+  from urllib.error import URLError as lib_URLError
+  from urllib.request import urlopen as lib_urlopen
+else:
+  from urllib2 import Request as lib_Request
+  from urllib2 import URLError as lib_URLError
+  from urllib2 import urlopen as lib_urlopen
+
 # Starts mini cluster suspended until a debugger attaches to it.
 # Starts mini cluster suspended until a debugger attaches to it.
 DEBUG_HADOOP=False
 DEBUG_HADOOP=False
 # Redirects mini cluster stderr to stderr.  (Default is to put it in a file.)
 # Redirects mini cluster stderr to stderr.  (Default is to put it in a file.)
@@ -179,7 +191,7 @@ rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
         "-D", "hadoop.policy.file=%s/hadoop-policy.xml" % in_conf_dir,
         "-D", "hadoop.policy.file=%s/hadoop-policy.xml" % in_conf_dir,
       ]
       ]
 
 
-      for key,value in extra_configs.iteritems():
+      for key,value in extra_configs.items():
         args.append("-D")
         args.append("-D")
         args.append(key + "=" + value)
         args.append(key + "=" + value)
 
 
@@ -248,7 +260,7 @@ rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
     LOGGER.debug("Successfully started minicluster")
     LOGGER.debug("Successfully started minicluster")
 
 
     # Place all the details as attributes on self.
     # Place all the details as attributes on self.
-    for k, v in details.iteritems():
+    for k, v in details.items():
       setattr(self, k, v)
       setattr(self, k, v)
 
 
     # Parse the configuration using XPath and place into self.config.
     # Parse the configuration using XPath and place into self.config.
@@ -291,9 +303,9 @@ rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
 
 
     while True:
     while True:
       try:
       try:
-        response = urllib2.urlopen(urllib2.Request('http://' +
+        response = lib_urlopen(lib_Request('http://' +
           self.config['dfs.secondary.http.address']))
           self.config['dfs.secondary.http.address']))
-      except urllib2.URLError:
+      except lib_URLError:
         # If we should abort startup.
         # If we should abort startup.
         if self.secondary_proc.poll() is not None or (not DEBUG_HADOOP and (time.time() - start) > MAX_CLUSTER_STARTUP_TIME):
         if self.secondary_proc.poll() is not None or (not DEBUG_HADOOP and (time.time() - start) > MAX_CLUSTER_STARTUP_TIME):
           LOGGER.debug("stdout:" + file(tmppath("stdout")).read())
           LOGGER.debug("stdout:" + file(tmppath("stdout")).read())
@@ -374,13 +386,13 @@ rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
 
 
     @param fd: a file-like writable object
     @param fd: a file-like writable object
     """
     """
-    print >>fd, "[hadoop]"
-    print >>fd, "[[hdfs_clusters]]"
-    print >>fd, "[[[default]]]"
-    print >>fd, "thrift_port=%d" % self.namenode_thrift_port
-    print >>fd, "[[mapred_clusters]]"
-    print >>fd, "[[[default]]]"
-    print >>fd, "thrift_port=%d" % self.jobtracker_thrift_port
+    print("[hadoop]", file=fd)
+    print("[[hdfs_clusters]]", file=fd)
+    print("[[[default]]]", file=fd)
+    print("thrift_port=%d" % self.namenode_thrift_port, file=fd)
+    print("[[mapred_clusters]]", file=fd)
+    print("[[[default]]]", file=fd)
+    print("thrift_port=%d" % self.jobtracker_thrift_port, file=fd)
 
 
 
 
 # Shared global cluster returned by shared_cluster context manager.
 # Shared global cluster returned by shared_cluster context manager.
@@ -436,7 +448,7 @@ def write_config(config, path, variables=None):
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <configuration>
 <configuration>
 """)
 """)
-    keys = (variables and (variables,) or (config.keys(),))[0]
+    keys = (variables and (variables,) or (list(config.keys()),))[0]
     for name in keys:
     for name in keys:
       value = config[name]
       value = config[name]
       f.write("  <property>\n")
       f.write("  <property>\n")
@@ -454,7 +466,7 @@ def _write_static_group_mapping(user_group_mapping, path):
   """
   """
   f = file(path, 'w')
   f = file(path, 'w')
   try:
   try:
-    for user, groups in user_group_mapping.iteritems():
+    for user, groups in user_group_mapping.items():
       f.write('%s = %s\n' % (user, ','.join(groups)))
       f.write('%s = %s\n' % (user, ','.join(groups)))
   finally:
   finally:
     f.close()
     f.close()
@@ -482,9 +494,9 @@ if __name__ == '__main__':
   if True:
   if True:
     cluster = MiniHadoopCluster(num_datanodes=5, num_tasktrackers=5)
     cluster = MiniHadoopCluster(num_datanodes=5, num_tasktrackers=5)
     cluster.start()
     cluster.start()
-    print cluster.namenode_port
-    print cluster.jobtracker_port
-    print cluster.config.get("dfs.thrift.address")
+    print(cluster.namenode_port)
+    print(cluster.jobtracker_port)
+    print(cluster.config.get("dfs.thrift.address"))
     cluster.dump_ini(sys.stdout)
     cluster.dump_ini(sys.stdout)
 
 
     from IPython.Shell import IPShellEmbed
     from IPython.Shell import IPShellEmbed

+ 25 - 23
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -15,6 +15,8 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from __future__ import print_function
+from builtins import object
 import atexit
 import atexit
 import getpass
 import getpass
 import logging
 import logging
@@ -51,7 +53,7 @@ def is_live_cluster():
 
 
 def get_fs_prefix(fs):
 def get_fs_prefix(fs):
   prefix = '/tmp/hue_tests_%s' % str(time.time())
   prefix = '/tmp/hue_tests_%s' % str(time.time())
-  fs.mkdir(prefix, 0777)
+  fs.mkdir(prefix, 0o777)
   return prefix
   return prefix
 
 
 def get_db_prefix(name='hive'):
 def get_db_prefix(name='hive'):
@@ -61,7 +63,7 @@ def get_db_prefix(name='hive'):
     return 'default'
     return 'default'
 
 
 
 
-class LiveHdfs():
+class LiveHdfs(object):
   def __init__(self):
   def __init__(self):
     self.fs = cluster.get_hdfs('default')
     self.fs = cluster.get_hdfs('default')
     # Assumes /tmp exists and is 1777
     # Assumes /tmp exists and is 1777
@@ -84,7 +86,7 @@ class PseudoHdfs4(object):
 
 
   def __init__(self):
   def __init__(self):
     self._tmpdir = tempfile.mkdtemp(prefix='tmp_hue_', dir=TEST_HDFS_TMP_DIR)
     self._tmpdir = tempfile.mkdtemp(prefix='tmp_hue_', dir=TEST_HDFS_TMP_DIR)
-    os.chmod(self._tmpdir, 0755)
+    os.chmod(self._tmpdir, 0o755)
     self._superuser = getpass.getuser()
     self._superuser = getpass.getuser()
     self.fs_prefix = None
     self.fs_prefix = None
 
 
@@ -179,7 +181,7 @@ class PseudoHdfs4(object):
           os.kill(proc.pid, signal.SIGKILL)
           os.kill(proc.pid, signal.SIGKILL)
           LOG.info('Stopping %s pid %s' % (name, proc.pid,))
           LOG.info('Stopping %s pid %s' % (name, proc.pid,))
           time.sleep(0.5)
           time.sleep(0.5)
-      except Exception, ex:
+      except Exception as ex:
         LOG.exception('Failed to stop pid %s. You may want to do it manually: %s' % (proc.pid, ex))
         LOG.exception('Failed to stop pid %s. You may want to do it manually: %s' % (proc.pid, ex))
 
 
     _kill_proc('NameNode', self._nn_proc)
     _kill_proc('NameNode', self._nn_proc)
@@ -272,26 +274,26 @@ class PseudoHdfs4(object):
 
 
     # Create HDFS directories
     # Create HDFS directories
     if not self.fs.exists('/tmp'):
     if not self.fs.exists('/tmp'):
-      self.fs.do_as_superuser(self.mkdir, '/tmp', 01777)
-    self.fs.do_as_superuser(self.fs.chmod, '/tmp', 01777)
+      self.fs.do_as_superuser(self.mkdir, '/tmp', 0o1777)
+    self.fs.do_as_superuser(self.fs.chmod, '/tmp', 0o1777)
 
 
-    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn', 01777)
-    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn', 01777)
+    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn', 0o1777)
+    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn', 0o1777)
 
 
-    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging', 01777)
-    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging', 01777)
+    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging', 0o1777)
+    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging', 0o1777)
 
 
-    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history', 01777)
-    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history', 01777)
+    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history', 0o1777)
+    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history', 0o1777)
 
 
-    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history/done', 01777)
-    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history/done', 01777)
+    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history/done', 0o1777)
+    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history/done', 0o1777)
 
 
-    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history/done/2015', 01777)
-    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history/done/2015', 01777)
+    self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history/done/2015', 0o1777)
+    self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history/done/2015', 0o1777)
 
 
-    self.fs.do_as_superuser(self.fs.mkdir, '/var/log/hadoop-yarn/apps', 01777)
-    self.fs.do_as_superuser(self.fs.chmod, '/var/log/hadoop-yarn/apps', 01777)
+    self.fs.do_as_superuser(self.fs.mkdir, '/var/log/hadoop-yarn/apps', 0o1777)
+    self.fs.do_as_superuser(self.fs.chmod, '/var/log/hadoop-yarn/apps', 0o1777)
 
 
     self.fs.do_as_user('test', self.fs.create_home_dir, '/user/test')
     self.fs.do_as_user('test', self.fs.create_home_dir, '/user/test')
     self.fs.do_as_user('hue', self.fs.create_home_dir, '/user/hue')
     self.fs.do_as_user('hue', self.fs.create_home_dir, '/user/hue')
@@ -603,11 +605,11 @@ def main():
   cluster = PseudoHdfs4()
   cluster = PseudoHdfs4()
   cluster.start()
   cluster.start()
 
 
-  print "%s running" % (cluster,)
-  print "fs.default.name=%s" % (cluster.fs_default_name,)
-  print "dfs.http.address=%s" % (cluster.dfs_http_address,)
-  print "jobtracker.thrift.port=%s" % (cluster.jt_thrift_port,)
-  print "mapred.job.tracker=%s" % (cluster.mapred_job_tracker,)
+  print("%s running" % (cluster,))
+  print("fs.default.name=%s" % (cluster.fs_default_name,))
+  print("dfs.http.address=%s" % (cluster.dfs_http_address,))
+  print("jobtracker.thrift.port=%s" % (cluster.jt_thrift_port,))
+  print("mapred.job.tracker=%s" % (cluster.mapred_job_tracker,))
 
 
   from IPython.Shell import IPShellEmbed
   from IPython.Shell import IPShellEmbed
   IPShellEmbed()()
   IPShellEmbed()()

+ 4 - 3
desktop/libs/hadoop/src/hadoop/ssl_client_site.py

@@ -15,12 +15,13 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from __future__ import absolute_import
 import errno
 import errno
 import logging
 import logging
 import os.path
 import os.path
 
 
-import conf
-import confparse
+from hadoop import conf
+from hadoop import confparse
 
 
 
 
 _SSL_SITE_PATH = None                  # Path to ssl-client.xml
 _SSL_SITE_PATH = None                  # Path to ssl-client.xml
@@ -53,7 +54,7 @@ def _parse_ssl_client_site():
       break
       break
     except KeyError:
     except KeyError:
       data = ""
       data = ""
-    except IOError, err:
+    except IOError as err:
       if err.errno != errno.ENOENT:
       if err.errno != errno.ENOENT:
         LOG.error('Cannot read from "%s": %s' % (_SSL_SITE_PATH, err))
         LOG.error('Cannot read from "%s": %s' % (_SSL_SITE_PATH, err))
         return
         return

+ 1 - 0
desktop/libs/hadoop/src/hadoop/test_base.py

@@ -16,6 +16,7 @@
 # limitations under the License.
 # limitations under the License.
 
 
 
 
+from builtins import object
 from hadoop import pseudo_hdfs4
 from hadoop import pseudo_hdfs4
 
 
 
 

+ 2 - 1
desktop/libs/hadoop/src/hadoop/test_hdfs_site.py

@@ -15,7 +15,8 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-import conf
+from __future__ import absolute_import
+from hadoop import conf
 import logging
 import logging
 import os
 import os
 import tempfile
 import tempfile

+ 2 - 1
desktop/libs/hadoop/src/hadoop/test_ssl_client_site.py

@@ -15,7 +15,8 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-import conf
+from __future__ import absolute_import
+from hadoop import conf
 import logging
 import logging
 import os
 import os
 import tempfile
 import tempfile

+ 9 - 3
desktop/libs/hadoop/src/hadoop/tests.py

@@ -15,8 +15,10 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-import cStringIO
+from future import standard_library
+standard_library.install_aliases()
 import os
 import os
+import sys
 
 
 from nose.tools import assert_true, assert_equal, assert_false
 from nose.tools import assert_true, assert_equal, assert_false
 from nose.plugins.attrib import attr
 from nose.plugins.attrib import attr
@@ -32,6 +34,10 @@ from hadoop import conf
 from hadoop import confparse
 from hadoop import confparse
 from hadoop import pseudo_hdfs4
 from hadoop import pseudo_hdfs4
 
 
+if sys.version_info[0] > 2:
+  from io import StringIO as string_io
+else:
+  from cStringIO import StringIO as string_io
 
 
 def test_confparse():
 def test_confparse():
   data = """
   data = """
@@ -57,7 +63,7 @@ def test_confparse():
   """
   """
 
 
   cp_data = confparse.ConfParse(data)
   cp_data = confparse.ConfParse(data)
-  cp_file = confparse.ConfParse(cStringIO.StringIO(data))
+  cp_file = confparse.ConfParse(string_io(data))
 
 
   for cp in (cp_data, cp_file):
   for cp in (cp_data, cp_file):
     assert_equal(cp['fs.default.name'], 'hdfs://localhost:8020')
     assert_equal(cp['fs.default.name'], 'hdfs://localhost:8020')
@@ -70,7 +76,7 @@ def test_confparse():
     try:
     try:
       cp['bogus']
       cp['bogus']
       assert_true(False, 'Should not get here')
       assert_true(False, 'Should not get here')
-    except KeyError, kerr:
+    except KeyError as kerr:
       ex = kerr
       ex = kerr
 
 
   cp_empty = confparse.ConfParse("")
   cp_empty = confparse.ConfParse("")

+ 10 - 2
desktop/libs/hadoop/src/hadoop/yarn/clients.py

@@ -15,16 +15,24 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from future import standard_library
+standard_library.install_aliases()
+from builtins import next
 import logging
 import logging
+import sys
 import threading
 import threading
 import time
 import time
-import urlparse
+import urllib.parse
 import heapq
 import heapq
 
 
 from desktop.lib.rest.http_client import HttpClient
 from desktop.lib.rest.http_client import HttpClient
 
 
 from hadoop import cluster
 from hadoop import cluster
 
 
+if sys.version_info[0] > 2:
+  from urllib.parse import urlsplit as lib_urlsplit
+else:
+  from urlparse import urlsplit as lib_urlsplit
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
@@ -41,7 +49,7 @@ def get_log_client(log_link):
   _log_client_lock.acquire()
   _log_client_lock.acquire()
 
 
   try:
   try:
-    components = urlparse.urlsplit(log_link)
+    components = lib_urlsplit(log_link)
     base_url = '%(scheme)s://%(netloc)s' % {
     base_url = '%(scheme)s://%(netloc)s' % {
       'scheme': components[0],
       'scheme': components[0],
       'netloc': components[1]
       'netloc': components[1]

+ 1 - 0
desktop/libs/hadoop/src/hadoop/yarn/history_server_api.py

@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from builtins import object
 import logging
 import logging
 import posixpath
 import posixpath
 import threading
 import threading

+ 2 - 0
desktop/libs/hadoop/src/hadoop/yarn/mapreduce_api.py

@@ -15,6 +15,8 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from past.builtins import basestring
+from builtins import object
 import logging
 import logging
 import posixpath
 import posixpath
 import threading
 import threading

+ 1 - 0
desktop/libs/hadoop/src/hadoop/yarn/node_manager_api.py

@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from builtins import object
 import logging
 import logging
 import posixpath
 import posixpath
 
 

+ 2 - 1
desktop/libs/hadoop/src/hadoop/yarn/resource_manager_api.py

@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from builtins import object
 import json
 import json
 import logging
 import logging
 import posixpath
 import posixpath
@@ -169,6 +170,6 @@ class ResourceManagerApi(object):
     response = None
     response = None
     try:
     try:
       response = function(*args, **kwargs)
       response = function(*args, **kwargs)
-    except Exception, e:
+    except Exception as e:
       raise PopupException(_('YARN RM returned a failed response: %s') % e)
       raise PopupException(_('YARN RM returned a failed response: %s') % e)
     return response
     return response

+ 10 - 2
desktop/libs/hadoop/src/hadoop/yarn/spark_history_server_api.py

@@ -15,11 +15,15 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
 import json
 import json
 import logging
 import logging
 import posixpath
 import posixpath
+import sys
 import threading
 import threading
-import urlparse
+import urllib.parse
 
 
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.rest.http_client import HttpClient
 from desktop.lib.rest.http_client import HttpClient
@@ -30,6 +34,10 @@ from hadoop.yarn.clients import get_log_client
 
 
 from lxml import html
 from lxml import html
 
 
+if sys.version_info[0] > 2:
+  from urllib.parse import urlsplit as lib_urlsplit
+else:
+  from urlparse import urlsplit as lib_urlsplit
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
@@ -154,7 +162,7 @@ class SparkHistoryServerApi(object):
     if log_links and log_name in log_links:
     if log_links and log_name in log_links:
       log_link = log_links[log_name]
       log_link = log_links[log_name]
 
 
-      root = Resource(get_log_client(log_link), urlparse.urlsplit(log_link)[2], urlencode=False)
+      root = Resource(get_log_client(log_link), lib_urlsplit(log_link)[2], urlencode=False)
       response = root.get('', params=params)
       response = root.get('', params=params)
       log = html.fromstring(response, parser=html.HTMLParser()).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content()
       log = html.fromstring(response, parser=html.HTMLParser()).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content()
     return log
     return log

+ 2 - 1
desktop/libs/hadoop/src/hadoop/yarn/tests.py

@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+from builtins import object
 import logging
 import logging
 
 
 from nose.tools import assert_true, assert_equal, assert_not_equal
 from nose.tools import assert_true, assert_equal, assert_not_equal
@@ -59,7 +60,7 @@ class MapreduceAPIMock(MapreduceApi):
     assert_equal(MapreduceAPIMock.EXPECTED_USERNAME, self.username)
     assert_equal(MapreduceAPIMock.EXPECTED_USERNAME, self.username)
 
 
 
 
-class TestMapReduceAPI():
+class TestMapReduceAPI(object):
 
 
   def setUp(self):
   def setUp(self):
     if not hasattr(self, 'originalMapReduceApi'):
     if not hasattr(self, 'originalMapReduceApi'):