ソースを参照

HUE-8737 [hdfs] Fix py3 unit tests

Ying Chen 6 年 前
コミット
f724abb72f

+ 0 - 1
desktop/core/src/desktop/lib/fs/gc/tests.py

@@ -18,7 +18,6 @@ from __future__ import absolute_import
 import logging
 import unittest
 
-from mock import patch, Mock
 from nose.plugins.skip import SkipTest
 from nose.tools import assert_equal, assert_true, assert_not_equal
 

+ 6 - 1
desktop/libs/aws/src/aws/tests.py

@@ -16,9 +16,9 @@
 from __future__ import absolute_import
 
 import logging
+import sys
 import unittest
 
-from mock import patch, Mock
 from nose.tools import assert_equal, assert_true, assert_not_equal
 
 from aws import conf
@@ -27,6 +27,11 @@ from aws.client import Client, get_credential_provider
 from desktop.lib.fsmanager import get_client, clear_cache
 from desktop.lib.python_util import current_ms_from_utc
 
+if sys.version_info[0] > 2:
+  from unittest.mock import patch
+else:
+  from mock import patch
+
 LOG = logging.getLogger(__name__)
 
 

+ 6 - 1
desktop/libs/azure/src/azure/tests.py

@@ -16,9 +16,9 @@
 from __future__ import absolute_import
 
 import logging
+import sys
 import unittest
 
-from mock import patch, Mock, PropertyMock
 from nose.plugins.skip import SkipTest
 from nose.tools import assert_equal, assert_true, assert_not_equal
 
@@ -28,6 +28,11 @@ from azure.client import get_credential_provider
 from desktop.lib.fsmanager import get_client, clear_cache, is_enabled
 from desktop.lib.python_util import current_ms_from_utc
 
+if sys.version_info[0] > 2:
+  from unittest.mock import patch
+else:
+  from mock import patch
+
 LOG = logging.getLogger(__name__)
 
 

+ 1 - 1
desktop/libs/hadoop/src/hadoop/fs/test_webhdfs.py

@@ -592,7 +592,7 @@ class WebhdfsTests(unittest.TestCase):
     LOG.debug("%s" % resp)
     
     test_dir = self.prefix + "/temp2"
-    self.cluster.fs.mkdir(test_dir, 0333)
+    self.cluster.fs.mkdir(test_dir, 0o333)
     test_file2 = test_dir + "/fortest.txt"
     f = self.cluster.fs.open(test_file2, "w")
     f.write("ok")

+ 14 - 12
desktop/libs/hadoop/src/hadoop/mini_cluster.py

@@ -63,10 +63,12 @@ if sys.version_info[0] > 2:
   from urllib.request import Request as lib_Request
   from urllib.error import URLError as lib_URLError
   from urllib.request import urlopen as lib_urlopen
+  open_file = open
 else:
   from urllib2 import Request as lib_Request
   from urllib2 import URLError as lib_URLError
   from urllib2 import urlopen as lib_urlopen
+  open_file = file
 
 # Starts mini cluster suspended until a debugger attaches to it.
 DEBUG_HADOOP=False
@@ -124,7 +126,7 @@ class MiniHadoopCluster(object):
     os.mkdir(in_conf_dir)
     self.log_dir = tmppath("logs")
     os.mkdir(self.log_dir)
-    f = file(os.path.join(in_conf_dir, "hadoop-metrics.properties"), "w")
+    f = open_file(os.path.join(in_conf_dir, "hadoop-metrics.properties"), "w")
     try:
       f.write("""
 dfs.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
@@ -159,7 +161,7 @@ rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
       hadoop_policy_config['security.' + policy + '.protocol.acl'] = '*'
     write_config(hadoop_policy_config, tmppath('in-conf/hadoop-policy.xml'))
 
-    details_file = file(tmppath("details.json"), "w+")
+    details_file = open_file(tmppath("details.json"), "w+")
     try:
       args = [ os.path.join(hadoop.conf.HADOOP_MR1_HOME.get(), 'bin', 'hadoop'),
         "jar",
@@ -229,11 +231,11 @@ rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
       if USE_STDERR:
         stderr=sys.stderr
       else:
-        stderr=file(tmppath("stderr"), "w")
+        stderr=open_file(tmppath("stderr"), "w")
       LOGGER.debug("Starting minicluster: %s env: %s" % (repr(args), repr(env)))
       self.clusterproc = subprocess.Popen(
         args=args,
-        stdout=file(tmppath("stdout"), "w"),
+        stdout=open_file(tmppath("stdout"), "w"),
         stderr=stderr,
         env=env)
 
@@ -249,9 +251,9 @@ rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
         except ValueError:
           pass
         if self.clusterproc.poll() is not None or (not DEBUG_HADOOP and (time.time() - start) > MAX_CLUSTER_STARTUP_TIME):
-          LOGGER.debug("stdout:" + file(tmppath("stdout")).read())
+          LOGGER.debug("stdout:" + open_file(tmppath("stdout")).read())
           if not USE_STDERR:
-            LOGGER.debug("stderr:" + file(tmppath("stderr")).read())
+            LOGGER.debug("stderr:" + open_file(tmppath("stderr")).read())
           self.stop()
           raise Exception("Cluster process quit or is taking too long to start.  Aborting.")
     finally:
@@ -297,8 +299,8 @@ rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
 
     self.secondary_proc = subprocess.Popen(
       args=args,
-      stdout=file(tmppath("stdout.2nn"), "w"),
-      stderr=file(tmppath("stderr.2nn"), "w"),
+      stdout=open_file(tmppath("stdout.2nn"), "w"),
+      stderr=open_file(tmppath("stderr.2nn"), "w"),
       env=env)
 
     while True:
@@ -308,9 +310,9 @@ rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
       except lib_URLError:
         # If we should abort startup.
         if self.secondary_proc.poll() is not None or (not DEBUG_HADOOP and (time.time() - start) > MAX_CLUSTER_STARTUP_TIME):
-          LOGGER.debug("stdout:" + file(tmppath("stdout")).read())
+          LOGGER.debug("stdout:" + open_file(tmppath("stdout")).read())
           if not USE_STDERR:
-            LOGGER.debug("stderr:" + file(tmppath("stderr")).read())
+            LOGGER.debug("stderr:" + open_file(tmppath("stderr")).read())
           self.stop()
           raise Exception("2nn process quit or is taking too long to start. Aborting.")
           break
@@ -442,7 +444,7 @@ def write_config(config, path, variables=None):
   from a configuration map (config), into a new file
   called path.
   """
-  f = file(path, "w")
+  f = open_file(path, "w")
   try:
     f.write("""<?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
@@ -464,7 +466,7 @@ def _write_static_group_mapping(user_group_mapping, path):
   Create a Java-style .properties file to contain the static user -> group
   mapping used by tests.
   """
-  f = file(path, 'w')
+  f = open_file(path, 'w')
   try:
     for user, groups in user_group_mapping.items():
       f.write('%s = %s\n' % (user, ','.join(groups)))

+ 10 - 5
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -25,6 +25,7 @@ import shutil
 import signal
 import subprocess
 import socket
+import sys
 import tempfile
 import textwrap
 import time
@@ -37,6 +38,10 @@ import hadoop
 from hadoop import cluster
 from hadoop.mini_cluster import write_config
 
+if sys.version_info[0] > 2:
+  open_file = open
+else:
+  open_file = file
 
 _shared_cluster = None
 
@@ -342,8 +347,8 @@ class PseudoHdfs4(object):
 
   def _log_exit(self, proc_name, exit_code):
     LOG.info('%s exited with %s' % (proc_name, exit_code))
-    LOG.debug('--------------------- STDOUT:\n' + file(self._logpath(proc_name + '.stdout')).read())
-    LOG.debug('--------------------- STDERR:\n' + file(self._logpath(proc_name + '.stderr')).read())
+    LOG.debug('--------------------- STDOUT:\n' + open_file(self._logpath(proc_name + '.stdout')).read())
+    LOG.debug('--------------------- STDERR:\n' + open_file(self._logpath(proc_name + '.stderr')).read())
 
   def _is_hdfs_ready(self, env):
     if self._nn_proc.poll() is not None:
@@ -406,8 +411,8 @@ class PseudoHdfs4(object):
     args = (hadoop_bin, '--config', conf_dir, proc_name)
 
     LOG.info('Starting Hadoop cluster daemon: %s' % (args,))
-    stdout = file(self._logpath(proc_name + ".stdout"), 'w')
-    stderr = file(self._logpath(proc_name + ".stderr"), 'w')
+    stdout = open_file(self._logpath(proc_name + ".stdout"), 'w')
+    stderr = open_file(self._logpath(proc_name + ".stderr"), 'w')
 
     return subprocess.Popen(args=args, stdout=stdout, stderr=stderr, env=env)
 
@@ -533,7 +538,7 @@ class PseudoHdfs4(object):
     write_config(mapred_configs, self._tmppath('conf/mapred-site.xml'))
 
   def _write_hadoop_metrics_conf(self, conf_dir):
-    f = file(os.path.join(conf_dir, "hadoop-metrics.properties"), "w")
+    f = open_file(os.path.join(conf_dir, "hadoop-metrics.properties"), "w")
     try:
       f.write(textwrap.dedent("""
           dfs.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext

+ 6 - 1
desktop/libs/hadoop/src/hadoop/test_hdfs_site.py

@@ -19,12 +19,17 @@ from __future__ import absolute_import
 from hadoop import conf
 import logging
 import os
+import sys
 import tempfile
 
 from nose.tools import assert_true, assert_equal, assert_false, assert_not_equal, assert_raises
 
 from hadoop import hdfs_site
 
+if sys.version_info[0] > 2:
+  open_file = open
+else:
+  open_file = file
 
 LOG = logging.getLogger(__name__)
 
@@ -48,7 +53,7 @@ def test_hdfs_site():
   </property>
 </configuration>
     """
-    file(os.path.join(hadoop_home, 'hdfs-site.xml'), 'w').write(xml)
+    open_file(os.path.join(hadoop_home, 'hdfs-site.xml'), 'w').write(xml)
 
     finish = conf.HDFS_CLUSTERS['default'].HADOOP_CONF_DIR.set_for_testing(hadoop_home)
     hdfs_site.reset()

+ 6 - 1
desktop/libs/hadoop/src/hadoop/test_ssl_client_site.py

@@ -19,12 +19,17 @@ from __future__ import absolute_import
 from hadoop import conf
 import logging
 import os
+import sys
 import tempfile
 
 from nose.tools import assert_true, assert_equal, assert_false, assert_not_equal, assert_raises
 
 from hadoop import ssl_client_site
 
+if sys.version_info[0] > 2:
+  open_file = open
+else:
+  open_file = file
 
 LOG = logging.getLogger(__name__)
 
@@ -56,7 +61,7 @@ def test_ssl_client_site():
 </configuration>
 
     """
-    file(os.path.join(hadoop_home, 'ssl-client.xml'), 'w').write(xml)
+    open_file(os.path.join(hadoop_home, 'ssl-client.xml'), 'w').write(xml)
 
     finish = conf.HDFS_CLUSTERS['default'].HADOOP_CONF_DIR.set_for_testing(hadoop_home)
     ssl_client_site.reset()

+ 6 - 2
desktop/libs/hadoop/src/hadoop/tests.py

@@ -35,9 +35,11 @@ from hadoop import confparse
 from hadoop import pseudo_hdfs4
 
 if sys.version_info[0] > 2:
-  from io import StringIO as string_io
+  from io import BytesIO as string_io
+  open_file = open
 else:
   from cStringIO import StringIO as string_io
+  open_file = file
 
 def test_confparse():
   data = """
@@ -63,6 +65,8 @@ def test_confparse():
   """
 
   cp_data = confparse.ConfParse(data)
+  if not isinstance(data, bytes):
+    data = data.encode()
   cp_file = confparse.ConfParse(string_io(data))
 
   for cp in (cp_data, cp_file):
@@ -87,7 +91,7 @@ def test_tricky_confparse():
   We found (experimentally) that dealing with a file
   sometimes triggered the wrong results here.
   """
-  cp_data = confparse.ConfParse(file(os.path.join(os.path.dirname(__file__),
+  cp_data = confparse.ConfParse(open_file(os.path.join(os.path.dirname(__file__),
                                                   "test_data",
                                                   "sample_conf.xml")))
   assert_equal("org.apache.hadoop.examples.SleepJob", cp_data["mapred.mapper.class"])