Преглед на файлове

HUE-2477 [fb] Run filebrowser.views_test:test_remove on a live HDFS cluster

Proof of concept when LIVE_CLUSTER=true, run the test without booting the
mini cluster.

Add a root prefix for all HDFS path of this test run.

Remove the usage of HDFS superuser for most of the tests and use a regular
user instead
Romain Rigaux преди 10 години
родител
ревизия
d1336c5
променени са 2 файла, в които са добавени 58 реда и са изтрити 28 реда
  1. 8 4
      apps/filebrowser/src/filebrowser/views_test.py
  2. 50 24
      desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

+ 8 - 4
apps/filebrowser/src/filebrowser/views_test.py

@@ -23,6 +23,7 @@ import re
 import urlparse
 from avro import schema, datafile, io
 
+from django.contrib.auth.models import User
 from django.core.urlresolvers import reverse
 from django.utils.encoding import smart_str
 from nose.plugins.attrib import attr
@@ -30,7 +31,7 @@ from nose.plugins.skip import SkipTest
 from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal
 
 from desktop.lib.django_test_util import make_logged_in_client
-from desktop.lib.test_utils import grant_access
+from desktop.lib.test_utils import grant_access, add_to_group
 from hadoop import pseudo_hdfs4
 from filebrowser.views import location_to_url
 
@@ -61,12 +62,15 @@ def cleanup_file(cluster, path):
 @attr('requires_hadoop')
 def test_remove():
   cluster = pseudo_hdfs4.shared_cluster()
+  prefix = cluster.fs_prefix + '/test-delete'
 
   try:
-    c = make_logged_in_client(cluster.superuser)
-    cluster.fs.setuser(cluster.superuser)
+    c = make_logged_in_client(is_superuser=False)
+    grant_access('test', 'test', 'filebrowser')
+    add_to_group('test')
+    user = User.objects.get(username='test')
+    cluster.fs.setuser('test')
 
-    prefix = '/test-delete'
     PATH_1 = '/%s/1' % prefix
     PATH_2 = '/%s/2' % prefix
     PATH_3 = '/%s/3' % prefix

+ 50 - 24
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -15,7 +15,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 import atexit
 import getpass
 import logging
@@ -31,6 +30,7 @@ import time
 from desktop.lib.python_util import find_unused_port
 
 import hadoop
+from hadoop import cluster
 from hadoop.mini_cluster import write_config
 from hadoop.job_tracker import LiveJobTracker
 from desktop.lib.paths import get_run_root
@@ -45,6 +45,25 @@ STARTUP_DEADLINE = 60.0
 CLEANUP_TMP_DIR = os.environ.get('MINI_CLUSTER_CLEANUP', 'true')
 
 
+def _get_fs_prefix(fs):
+  prefix = '/tmp/hue_tests_%s' % str(time.time())
+  fs.mkdir(prefix, 0777)
+  return prefix
+
+
+class LiveHdfs():
+  def __init__(self):
+    self.fs = cluster.get_hdfs('default')
+    # Assumes /tmp exists and is 1777
+
+    self.fs_prefix = _get_fs_prefix(self.fs)
+    LOG.info('Using %s as FS root' % self.fs_prefix)
+
+    # Might need more
+    self.fs.do_as_user('test', self.fs.create_home_dir, '/user/test')
+    self.fs.do_as_user('hue', self.fs.create_home_dir, '/user/hue')
+
+
 class PseudoHdfs4(object):
   """Run HDFS and MR2 locally, in pseudo-distributed mode"""
 
@@ -52,6 +71,7 @@ class PseudoHdfs4(object):
     self._tmpdir = tempfile.mkdtemp(prefix='tmp_hue_')
     os.chmod(self._tmpdir, 0755)
     self._superuser = getpass.getuser()
+    self.fs_prefix = None
 
     self._fs = None
     self._jt = None
@@ -263,6 +283,8 @@ class PseudoHdfs4(object):
     self.fs.do_as_user('test', self.fs.create_home_dir, '/user/test')
     self.fs.do_as_user('hue', self.fs.create_home_dir, '/user/hue')
 
+    self.fs_prefix = _get_fs_prefix(self.fs)
+
 
   def _start_mr2(self, env):
     LOG.info("Starting MR2")
@@ -512,37 +534,41 @@ def shared_cluster():
   global _shared_cluster
 
   if _shared_cluster is None:
-    cluster = PseudoHdfs4()
-    atexit.register(cluster.stop)
+    if os.environ.get('LIVE_CLUSTER', 'false').lower() == 'true':
+      cluster = LiveHdfs()
+    else:
+      cluster = PseudoHdfs4()
+      atexit.register(cluster.stop)
 
-    try:
-      cluster.start()
-    except Exception, ex:
-      LOG.exception("Failed to fully bring up test cluster: %s" % (ex,))
+      try:
+        cluster.start()
+      except Exception, ex:
+        LOG.exception("Failed to fully bring up test cluster: %s" % (ex,))
+
+      fqdn = socket.getfqdn()
+      webhdfs_url = "http://%s:%s/webhdfs/v1" % (fqdn, cluster.dfs_http_port,)
 
-    fqdn = socket.getfqdn()
-    webhdfs_url = "http://%s:%s/webhdfs/v1" % (fqdn, cluster.dfs_http_port,)
+      closers = [
+        hadoop.conf.HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing(cluster.fs_default_name),
+        hadoop.conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(webhdfs_url),
 
-    closers = [
-      hadoop.conf.HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing(cluster.fs_default_name),
-      hadoop.conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(webhdfs_url),
+        hadoop.conf.YARN_CLUSTERS['default'].HOST.set_for_testing(fqdn),
+        hadoop.conf.YARN_CLUSTERS['default'].PORT.set_for_testing(cluster._rm_port),
 
-      hadoop.conf.YARN_CLUSTERS['default'].HOST.set_for_testing(fqdn),
-      hadoop.conf.YARN_CLUSTERS['default'].PORT.set_for_testing(cluster._rm_port),
+        hadoop.conf.YARN_CLUSTERS['default'].RESOURCE_MANAGER_API_URL.set_for_testing('http://%s:%s' % (cluster._fqdn, cluster._rm_webapp_port,)),
+        hadoop.conf.YARN_CLUSTERS['default'].PROXY_API_URL.set_for_testing('http://%s:%s' % (cluster._fqdn, cluster._rm_webapp_port,)),
+        hadoop.conf.YARN_CLUSTERS['default'].HISTORY_SERVER_API_URL.set_for_testing('%s:%s' % (cluster._fqdn, cluster._jh_web_port,)),
+      ]
 
-      hadoop.conf.YARN_CLUSTERS['default'].RESOURCE_MANAGER_API_URL.set_for_testing('http://%s:%s' % (cluster._fqdn, cluster._rm_webapp_port,)),
-      hadoop.conf.YARN_CLUSTERS['default'].PROXY_API_URL.set_for_testing('http://%s:%s' % (cluster._fqdn, cluster._rm_webapp_port,)),
-      hadoop.conf.YARN_CLUSTERS['default'].HISTORY_SERVER_API_URL.set_for_testing('%s:%s' % (cluster._fqdn, cluster._jh_web_port,)),
-    ]
+      old = hadoop.cluster.clear_caches()
 
-    old = hadoop.cluster.clear_caches()
+      def restore_config():
+        hadoop.cluster.restore_caches(old)
+        for x in closers:
+          x()
 
-    def restore_config():
-      hadoop.cluster.restore_caches(old)
-      for x in closers:
-        x()
+      cluster.shutdown_hook = restore_config
 
-    cluster.shutdown_hook = restore_config
     _shared_cluster = cluster
 
   return _shared_cluster