Răsfoiți Sursa

HUE-2755 [hive] Move the test base infra to be re-entrant

Converted 3 Hive tests to check it
Make sure Impala DB cleanup failure shows up as error
Simplify FB tests
Romain Rigaux 10 ani în urmă
părinte
comite
a81c4dd

+ 99 - 72
apps/beeswax/src/beeswax/test_base.py

@@ -39,6 +39,7 @@ import beeswax.conf
 
 
 from beeswax.server.dbms import get_query_server_config
 from beeswax.server.dbms import get_query_server_config
 from beeswax.server import dbms
 from beeswax.server import dbms
+from hadoop.pseudo_hdfs4 import is_live_cluster, get_db_prefix
 
 
 
 
 HIVE_SERVER_TEST_PORT = find_unused_port()
 HIVE_SERVER_TEST_PORT = find_unused_port()
@@ -86,60 +87,69 @@ def _start_server(cluster):
   return subprocess.Popen(args=args, env=env, cwd=cluster._tmpdir, stdin=subprocess.PIPE)
   return subprocess.Popen(args=args, env=env, cwd=cluster._tmpdir, stdin=subprocess.PIPE)
 
 
 
 
-def get_shared_beeswax_server():
+def get_shared_beeswax_server(db_name='default'):
   global _SHARED_HIVE_SERVER
   global _SHARED_HIVE_SERVER
   global _SHARED_HIVE_SERVER_CLOSER
   global _SHARED_HIVE_SERVER_CLOSER
   if _SHARED_HIVE_SERVER is None:
   if _SHARED_HIVE_SERVER is None:
 
 
     cluster = pseudo_hdfs4.shared_cluster()
     cluster = pseudo_hdfs4.shared_cluster()
 
 
-    HIVE_CONF = cluster.hadoop_conf_dir
-    finish = (
-      beeswax.conf.HIVE_SERVER_HOST.set_for_testing(get_localhost_name()),
-      beeswax.conf.HIVE_SERVER_PORT.set_for_testing(HIVE_SERVER_TEST_PORT),
-      beeswax.conf.HIVE_SERVER_BIN.set_for_testing(get_run_root('ext/hive/hive') + '/bin/hiveserver2'),
-      beeswax.conf.HIVE_CONF_DIR.set_for_testing(HIVE_CONF)
-    )
-
-    default_xml = """<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<configuration>
-
-<property>
-  <name>javax.jdo.option.ConnectionURL</name>
-  <value>jdbc:derby:;databaseName=%(root)s/metastore_db;create=true</value>
-  <description>JDBC connect string for a JDBC metastore</description>
-</property>
-
- <property>
-   <name>hive.server2.enable.impersonation</name>
-   <value>false</value>
- </property>
-
-<property>
-  <name>hive.querylog.location</name>
-  <value>%(querylog)s</value>
-</property>
-
-</configuration>
-""" % {'root': cluster._tmpdir, 'querylog': cluster.log_dir + '/hive'}
-
-    file(HIVE_CONF + '/hive-site.xml', 'w').write(default_xml)
-
-    global _SHARED_HIVE_SERVER_PROCESS
-
-    if _SHARED_HIVE_SERVER_PROCESS is None:
-      p = _start_server(cluster)
-      LOG.info("started")
-      cluster.fs.do_as_superuser(cluster.fs.chmod, '/tmp', 01777)
-
-      _SHARED_HIVE_SERVER_PROCESS = p
-      def kill():
-        LOG.info("Killing server (pid %d)." % p.pid)
-        os.kill(p.pid, 9)
-        p.wait()
-      atexit.register(kill)
+    if is_live_cluster():
+      def s():
+        pass
+    else:
+      HIVE_CONF = cluster.hadoop_conf_dir
+      finish = (
+        beeswax.conf.HIVE_SERVER_HOST.set_for_testing(get_localhost_name()),
+        beeswax.conf.HIVE_SERVER_PORT.set_for_testing(HIVE_SERVER_TEST_PORT),
+        beeswax.conf.HIVE_SERVER_BIN.set_for_testing(get_run_root('ext/hive/hive') + '/bin/hiveserver2'),
+        beeswax.conf.HIVE_CONF_DIR.set_for_testing(HIVE_CONF)
+      )
+
+      default_xml = """<?xml version="1.0"?>
+  <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+  <configuration>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:derby:;databaseName=%(root)s/metastore_db;create=true</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+   <property>
+     <name>hive.server2.enable.impersonation</name>
+     <value>false</value>
+   </property>
+
+  <property>
+    <name>hive.querylog.location</name>
+    <value>%(querylog)s</value>
+  </property>
+
+  </configuration>
+  """ % {'root': cluster._tmpdir, 'querylog': cluster.log_dir + '/hive'}
+
+      file(HIVE_CONF + '/hive-site.xml', 'w').write(default_xml)
+
+      global _SHARED_HIVE_SERVER_PROCESS
+
+      if _SHARED_HIVE_SERVER_PROCESS is None:
+        p = _start_server(cluster)
+        LOG.info("started")
+        cluster.fs.do_as_superuser(cluster.fs.chmod, '/tmp', 01777)
+
+        _SHARED_HIVE_SERVER_PROCESS = p
+        def kill():
+          LOG.info("Killing server (pid %d)." % p.pid)
+          os.kill(p.pid, 9)
+          p.wait()
+        atexit.register(kill)
+
+      def s():
+        for f in finish:
+          f()
+        cluster.stop()
 
 
       start = time.time()
       start = time.time()
       started = False
       started = False
@@ -156,17 +166,12 @@ def get_shared_beeswax_server():
           started = True
           started = True
           break
           break
         except Exception, e:
         except Exception, e:
-          LOG.info('HiveServer2 server status not started yet after: %s' % e)
+          LOG.info('HiveServer2 server could not be found after: %s' % e)
           time.sleep(sleep)
           time.sleep(sleep)
 
 
       if not started:
       if not started:
         raise Exception("Server took too long to come up.")
         raise Exception("Server took too long to come up.")
 
 
-    def s():
-      for f in finish:
-        f()
-      cluster.stop()
-
     _SHARED_HIVE_SERVER, _SHARED_HIVE_SERVER_CLOSER = cluster, s
     _SHARED_HIVE_SERVER, _SHARED_HIVE_SERVER_CLOSER = cluster, s
 
 
   return _SHARED_HIVE_SERVER, _SHARED_HIVE_SERVER_CLOSER
   return _SHARED_HIVE_SERVER, _SHARED_HIVE_SERVER_CLOSER
@@ -319,12 +324,33 @@ class BeeswaxSampleProvider(object):
   """
   """
   @classmethod
   @classmethod
   def setup_class(cls):
   def setup_class(cls):
-    cls.cluster, shutdown = get_shared_beeswax_server()
+    cls.db_name = get_db_prefix(name='hive')
+    cls.cluster, shutdown = get_shared_beeswax_server(cls.db_name)
     cls.client = make_logged_in_client()
     cls.client = make_logged_in_client()
     # Weird redirection to avoid binding nonsense.
     # Weird redirection to avoid binding nonsense.
     cls.shutdown = [ shutdown ]
     cls.shutdown = [ shutdown ]
     cls.init_beeswax_db()
     cls.init_beeswax_db()
 
 
+  @classmethod
+  def teardown_class(cls):
+    if is_live_cluster():
+      # Delete test DB and tables
+      client = make_logged_in_client()
+      user = User.objects.get(username='test')
+      query_server = get_query_server_config()
+      db = dbms.get(user, query_server)
+      tables = db.get_tables(database=cls.db_name)
+      for table in tables:
+        make_query(client, 'DROP TABLE `%(db)s`.`%(table)s`' % {'db': cls.db_name, 'table': table}, wait=True)
+      make_query(client, 'DROP VIEW `%(db)s`.`myview`' % {'db': cls.db_name}, wait=True)
+      make_query(client, 'DROP DATABASE %(db)s' % {'db': cls.db_name}, wait=True)
+      make_query(client, 'DROP DATABASE %(db)s_other' % {'db': cls.db_name}, wait=True)
+
+      # Check the cleanup
+      databases = db.get_databases()
+      assert_false(cls.db_name in databases)
+      assert_false('%(db)s_other' % {'db': cls.db_name} in databases)
+
   @classmethod
   @classmethod
   def init_beeswax_db(cls):
   def init_beeswax_db(cls):
     """
     """
@@ -334,37 +360,38 @@ class BeeswaxSampleProvider(object):
     if _INITIALIZED:
     if _INITIALIZED:
       return
       return
 
 
-    make_query(cls.client, 'CREATE DATABASE other_db', wait=True)
+    make_query(cls.client, 'CREATE DATABASE %(db)s' % {'db': cls.db_name}, wait=True)
+    make_query(cls.client, 'CREATE DATABASE %(db)s_other' % {'db': cls.db_name}, wait=True)
 
 
-    data_file = u'/tmp/beeswax/sample_data_échantillon_%d.tsv'
+    data_file = cls.cluster.fs_prefix + u'/beeswax/sample_data_échantillon_%d.tsv'
 
 
     # Create a "test_partitions" table.
     # Create a "test_partitions" table.
     CREATE_PARTITIONED_TABLE = """
     CREATE_PARTITIONED_TABLE = """
-      CREATE TABLE test_partitions (foo INT, bar STRING)
+      CREATE TABLE `%(db)s`.`test_partitions` (foo INT, bar STRING)
       PARTITIONED BY (baz STRING, boom STRING)
       PARTITIONED BY (baz STRING, boom STRING)
       ROW FORMAT DELIMITED
       ROW FORMAT DELIMITED
         FIELDS TERMINATED BY '\t'
         FIELDS TERMINATED BY '\t'
         LINES TERMINATED BY '\n'
         LINES TERMINATED BY '\n'
-    """
+    """ % {'db': cls.db_name}
     make_query(cls.client, CREATE_PARTITIONED_TABLE, wait=True)
     make_query(cls.client, CREATE_PARTITIONED_TABLE, wait=True)
     cls._make_data_file(data_file % 1)
     cls._make_data_file(data_file % 1)
 
 
     LOAD_DATA = """
     LOAD_DATA = """
-      LOAD DATA INPATH '%s'
-      OVERWRITE INTO TABLE test_partitions
+      LOAD DATA INPATH '%(data_file)s'
+      OVERWRITE INTO TABLE `%(db)s`.`test_partitions`
       PARTITION (baz='baz_one', boom='boom_two')
       PARTITION (baz='baz_one', boom='boom_two')
-    """ % (data_file % 1,)
+    """ % {'db': cls.db_name, 'data_file': data_file % 1}
     make_query(cls.client, LOAD_DATA, wait=True, local=False)
     make_query(cls.client, LOAD_DATA, wait=True, local=False)
 
 
     # Insert additional partition data into "test_partitions" table
     # Insert additional partition data into "test_partitions" table
     ADD_PARTITION = """
     ADD_PARTITION = """
-      ALTER TABLE test_partitions ADD PARTITION(baz='baz_foo', boom='boom_bar') LOCATION '/tmp/beeswax/baz_foo/boom_bar'
-    """
+      ALTER TABLE `%(db)s`.`test_partitions` ADD PARTITION(baz='baz_foo', boom='boom_bar') LOCATION '/tmp/beeswax/baz_foo/boom_bar'
+    """ % {'db': cls.db_name}
     make_query(cls.client, ADD_PARTITION, wait=True, local=False)
     make_query(cls.client, ADD_PARTITION, wait=True, local=False)
 
 
     # Create a bunch of other tables
     # Create a bunch of other tables
     CREATE_TABLE = """
     CREATE_TABLE = """
-      CREATE TABLE `%(name)s` (foo INT, bar STRING)
+      CREATE TABLE `%(db)s`.`%(name)s` (foo INT, bar STRING)
       COMMENT "%(comment)s"
       COMMENT "%(comment)s"
       ROW FORMAT DELIMITED
       ROW FORMAT DELIMITED
         FIELDS TERMINATED BY '\t'
         FIELDS TERMINATED BY '\t'
@@ -372,22 +399,22 @@ class BeeswaxSampleProvider(object):
     """
     """
 
 
     # Create a "test" table.
     # Create a "test" table.
-    table_info = dict(name='test', comment='Test table')
+    table_info = {'db': cls.db_name, 'name': 'test', 'comment': 'Test table'}
     cls._make_data_file(data_file % 2)
     cls._make_data_file(data_file % 2)
     cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 2)
     cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 2)
 
 
     # Create a "test_utf8" table.
     # Create a "test_utf8" table.
-    table_info = dict(name='test_utf8', comment=cls.get_i18n_table_comment())
+    table_info = {'db': cls.db_name, 'name': 'test_utf8', 'comment': cls.get_i18n_table_comment()}
     cls._make_i18n_data_file(data_file % 3, 'utf-8')
     cls._make_i18n_data_file(data_file % 3, 'utf-8')
     cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 3)
     cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 3)
 
 
     # Create a "test_latin1" table.
     # Create a "test_latin1" table.
-    table_info = dict(name='test_latin1', comment=cls.get_i18n_table_comment())
+    table_info = {'db': cls.db_name, 'name': 'test_latin1', 'comment': cls.get_i18n_table_comment()}
     cls._make_i18n_data_file(data_file % 4, 'latin1')
     cls._make_i18n_data_file(data_file % 4, 'latin1')
     cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 4)
     cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 4)
 
 
     # Create a "myview" view.
     # Create a "myview" view.
-    make_query(cls.client, "CREATE VIEW myview (foo, bar) as SELECT * FROM test", wait=True)
+    make_query(cls.client, "CREATE VIEW `%(db)s`.`myview` (foo, bar) as SELECT * FROM `%(db)s`.`test`" % {'db': cls.db_name}, wait=True)
 
 
     _INITIALIZED = True
     _INITIALIZED = True
 
 
@@ -399,9 +426,9 @@ class BeeswaxSampleProvider(object):
   def _make_table(cls, table_name, create_ddl, filename):
   def _make_table(cls, table_name, create_ddl, filename):
     make_query(cls.client, create_ddl, wait=True)
     make_query(cls.client, create_ddl, wait=True)
     LOAD_DATA = """
     LOAD_DATA = """
-      LOAD DATA INPATH '%s' OVERWRITE INTO TABLE %s
-    """ % (filename, table_name)
-    make_query(cls.client, LOAD_DATA, wait=True, local=False)
+      LOAD DATA INPATH '%(filename)s' OVERWRITE INTO TABLE `%(db)s`.`%(table_name)s`
+    """ % {'filename': filename, 'table_name': table_name, 'db': cls.db_name}
+    make_query(cls.client, LOAD_DATA, wait=True, local=False, database=cls.db_name)
 
 
   @classmethod
   @classmethod
   def _make_data_file(cls, filename):
   def _make_data_file(cls, filename):

+ 14 - 29
apps/beeswax/src/beeswax/tests.py

@@ -122,31 +122,13 @@ class TestBeeswaxWithHadoop(BeeswaxSampleProvider):
 
 
   def test_query_with_error(self):
   def test_query_with_error(self):
     # Creating a table "again" should not work; error should be displayed.
     # Creating a table "again" should not work; error should be displayed.
-    response = _make_query(self.client, "CREATE TABLE test (foo INT)", wait=True)
+    response = _make_query(self.client, "CREATE TABLE test (foo INT)", database=self.db_name, wait=True)
     content = json.loads(response.content)
     content = json.loads(response.content)
     assert_true("AlreadyExistsException" in content.get('message'), content)
     assert_true("AlreadyExistsException" in content.get('message'), content)
 
 
-  def test_configuration(self):
-    # No HS2 API
-    raise SkipTest
-
-    params = {'server': 'default'}
-
-    response = self.client.post("/beeswax/configuration", params)
-    response_verbose = self.client.post("/beeswax/configuration?include_hadoop=true", params)
-
-    assert_true("hive.exec.scratchdir" in response.content)
-    assert_true("hive.exec.scratchdir" in response_verbose.content)
-
-    assert_true("javax.jdo.option.ConnectionPassword**********" in response_verbose.content)
-    assert_true("javax.jdo.option.ConnectionPassword**********" in response_verbose.content)
-
-    assert_false("tasktracker.http.threads" in response.content)
-    assert_true("tasktracker.http.threads" in response_verbose.content)
-    assert_true("A base for other temporary directories" in response_verbose.content)
-
   def test_query_with_resource(self):
   def test_query_with_resource(self):
-    script = self.cluster.fs.open("/square.py", "w")
+    udf = self.cluster.fs_prefix + "/square.py"
+    script = self.cluster.fs.open(udf, "w")
     script.write(
     script.write(
       """#!/usr/bin/python
       """#!/usr/bin/python
 import sys
 import sys
@@ -158,7 +140,7 @@ for x in sys.stdin:
 
 
     response = _make_query(self.client,
     response = _make_query(self.client,
       "SELECT TRANSFORM (foo) USING 'python square.py' AS b FROM test",
       "SELECT TRANSFORM (foo) USING 'python square.py' AS b FROM test",
-      resources=[("FILE", "/square.py")], local=False)
+      resources=[("FILE", udf)], local=False, database=self.db_name)
     response = wait_for_query_to_finish(self.client, response, max=180.0)
     response = wait_for_query_to_finish(self.client, response, max=180.0)
     content = fetch_query_result_data(self.client, response)
     content = fetch_query_result_data(self.client, response)
     assert_equal([['0'], ['1'], ['4'], ['9']], content["results"][0:4])
     assert_equal([['0'], ['1'], ['4'], ['9']], content["results"][0:4])
@@ -166,7 +148,7 @@ for x in sys.stdin:
   def test_query_with_setting(self):
   def test_query_with_setting(self):
     response = _make_query(self.client, "CREATE TABLE test2 AS SELECT foo+1 FROM test WHERE foo=4",
     response = _make_query(self.client, "CREATE TABLE test2 AS SELECT foo+1 FROM test WHERE foo=4",
       settings=[("mapred.job.name", "test_query_with_setting"),
       settings=[("mapred.job.name", "test_query_with_setting"),
-        ("hive.exec.compress.output", "true")], local=False) # Run on MR, because that's how we check it worked.
+        ("hive.exec.compress.output", "true")], local=False, database=self.db_name) # Run on MR, because that's how we check it worked.
     response = wait_for_query_to_finish(self.client, response, max=180.0)
     response = wait_for_query_to_finish(self.client, response, max=180.0)
     # Check that we actually got a compressed output
     # Check that we actually got a compressed output
     files = self.cluster.fs.listdir("/user/hive/warehouse/test2")
     files = self.cluster.fs.listdir("/user/hive/warehouse/test2")
@@ -203,16 +185,19 @@ for x in sys.stdin:
 
 
   def test_basic_flow(self):
   def test_basic_flow(self):
     # Minimal server operation
     # Minimal server operation
-    assert_equal(['default', 'other_db'], self.db.get_databases())
+    databases = self.db.get_databases()
+    assert_true('default' in databases, databases)
+    assert_true(self.db_name in databases, databases)
+    assert_true('%s_other' % self.db_name in databases, databases)
 
 
     # Use GROUP BY to trigger MR job
     # Use GROUP BY to trigger MR job
     QUERY = """
     QUERY = """
-      SELECT MIN(foo), MAX(foo), SUM(foo) FROM test;
-    """
-    response = _make_query(self.client, QUERY, local=False)
+      SELECT MIN(foo), MAX(foo), SUM(foo) FROM %(db)s.test;
+    """ % {'db': self.db_name}
+    response = _make_query(self.client, QUERY, local=False, database=self.db_name)
     content = json.loads(response.content)
     content = json.loads(response.content)
     assert_true('watch_url' in content)
     assert_true('watch_url' in content)
-    # Check that we report this query as "running". (This query takes a while.)
+    # Check that we report this query as "running" (this query should take a little while).
     self._verify_query_state(beeswax.models.QueryHistory.STATE.running)
     self._verify_query_state(beeswax.models.QueryHistory.STATE.running)
 
 
     response = wait_for_query_to_finish(self.client, response, max=180.0)
     response = wait_for_query_to_finish(self.client, response, max=180.0)
@@ -226,7 +211,7 @@ for x in sys.stdin:
     QUERY = """
     QUERY = """
       SELECT * FROM test
       SELECT * FROM test
     """
     """
-    response = _make_query(self.client, QUERY, name='select star', local=False)
+    response = _make_query(self.client, QUERY, name='select star', local=False, database=self.db_name)
     response = wait_for_query_to_finish(self.client, response)
     response = wait_for_query_to_finish(self.client, response)
     content = fetch_query_result_data(self.client, response)
     content = fetch_query_result_data(self.client, response)
 
 

+ 2 - 33
apps/filebrowser/src/filebrowser/views_test.py

@@ -39,10 +39,8 @@ from conf import MAX_SNAPPY_DECOMPRESSION_SIZE
 from lib.rwx import expand_mode
 from lib.rwx import expand_mode
 from views import snappy_installed
 from views import snappy_installed
 
 
-
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
-
 def cleanup_tree(cluster, path):
 def cleanup_tree(cluster, path):
   try:
   try:
     cluster.fs.rmtree(path)
     cluster.fs.rmtree(path)
@@ -50,7 +48,6 @@ def cleanup_tree(cluster, path):
     # Don't let cleanup errors mask earlier failures
     # Don't let cleanup errors mask earlier failures
     LOG.exception('failed to cleanup %s' % path)
     LOG.exception('failed to cleanup %s' % path)
 
 
-
 def cleanup_file(cluster, path):
 def cleanup_file(cluster, path):
   try:
   try:
     cluster.fs.remove(path)
     cluster.fs.remove(path)
@@ -58,8 +55,8 @@ def cleanup_file(cluster, path):
     # Don't let cleanup errors mask earlier failures
     # Don't let cleanup errors mask earlier failures
     LOG.exception('failed to cleanup %s' % path)
     LOG.exception('failed to cleanup %s' % path)
 
 
-
 class TestFileBrowserWithHadoop(object):
 class TestFileBrowserWithHadoop(object):
+  requires_hadoop = True
 
 
   def setUp(self):
   def setUp(self):
     self.c = make_logged_in_client(username='test', is_superuser=False)
     self.c = make_logged_in_client(username='test', is_superuser=False)
@@ -73,9 +70,9 @@ class TestFileBrowserWithHadoop(object):
 
 
   def tearDown(self):
   def tearDown(self):
     cleanup_tree(self.cluster, self.prefix)
     cleanup_tree(self.cluster, self.prefix)
+    assert_false(self.cluster.fs.exists(self.prefix))
     self.cluster.fs.setuser('test')
     self.cluster.fs.setuser('test')
 
 
-  @attr('requires_hadoop')
   def test_remove(self):
   def test_remove(self):
     prefix = self.prefix + '/test-delete'
     prefix = self.prefix + '/test-delete'
 
 
@@ -102,7 +99,6 @@ class TestFileBrowserWithHadoop(object):
     assert_false(self.cluster.fs.exists(PATH_3))
     assert_false(self.cluster.fs.exists(PATH_3))
 
 
 
 
-  @attr('requires_hadoop')
   def test_move(self):
   def test_move(self):
     prefix = self.cluster.fs_prefix + '/test-move'
     prefix = self.cluster.fs_prefix + '/test-move'
 
 
@@ -145,7 +141,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true(self.cluster.fs.exists(SUB_PATH2_3))
     assert_true(self.cluster.fs.exists(SUB_PATH2_3))
 
 
 
 
-  @attr('requires_hadoop')
   def test_copy(self):
   def test_copy(self):
     prefix = self.cluster.fs_prefix + '/test-copy'
     prefix = self.cluster.fs_prefix + '/test-copy'
 
 
@@ -188,7 +183,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true(self.cluster.fs.exists(SUB_PATH2_3))
     assert_true(self.cluster.fs.exists(SUB_PATH2_3))
 
 
 
 
-  @attr('requires_hadoop')
   def test_mkdir_singledir(self):
   def test_mkdir_singledir(self):
     prefix = self.cluster.fs_prefix + '/test-filebrowser-mkdir'
     prefix = self.cluster.fs_prefix + '/test-filebrowser-mkdir'
 
 
@@ -212,7 +206,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal(dir_listing[2]['name'], success_path)
     assert_equal(dir_listing[2]['name'], success_path)
 
 
 
 
-  @attr('requires_hadoop')
   def test_touch(self):
   def test_touch(self):
     prefix = self.cluster.fs_prefix + '/test-filebrowser-touch'
     prefix = self.cluster.fs_prefix + '/test-filebrowser-touch'
 
 
@@ -236,7 +229,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal(file_listing[2]['name'], success_path)
     assert_equal(file_listing[2]['name'], success_path)
 
 
 
 
-  @attr('requires_hadoop')
   def test_chmod(self):
   def test_chmod(self):
     prefix = self.cluster.fs_prefix + '/test_chmod'
     prefix = self.cluster.fs_prefix + '/test_chmod'
 
 
@@ -281,7 +273,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal(041777, int(self.cluster.fs.stats(PATH_3)["mode"]))
     assert_equal(041777, int(self.cluster.fs.stats(PATH_3)["mode"]))
 
 
 
 
-  @attr('requires_hadoop')
   def test_chmod_sticky(self):
   def test_chmod_sticky(self):
     prefix = self.cluster.fs_prefix + '/test_chmod_sticky'
     prefix = self.cluster.fs_prefix + '/test_chmod_sticky'
 
 
@@ -314,7 +305,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal(False, mode[-1])
     assert_equal(False, mode[-1])
 
 
 
 
-  @attr('requires_hadoop')
   def test_chown(self):
   def test_chown(self):
     prefix = self.cluster.fs_prefix + '/test_chown'
     prefix = self.cluster.fs_prefix + '/test_chown'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -356,7 +346,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal("y", self.cluster.fs.stats(PATH_3)["group"])
     assert_equal("y", self.cluster.fs.stats(PATH_3)["group"])
 
 
 
 
-  @attr('requires_hadoop')
   def test_rename(self):
   def test_rename(self):
     prefix = self.cluster.fs_prefix + '/test_rename'
     prefix = self.cluster.fs_prefix + '/test_rename'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -374,7 +363,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true(self.cluster.fs.exists(PREFIX + NEW_NAME))
     assert_true(self.cluster.fs.exists(PREFIX + NEW_NAME))
 
 
 
 
-  @attr('requires_hadoop')
   def test_listdir(self):
   def test_listdir(self):
     # Delete user's home if there's already something there
     # Delete user's home if there's already something there
     home = self.cluster.fs.do_as_user('test', self.cluster.fs.get_home_dir)
     home = self.cluster.fs.do_as_user('test', self.cluster.fs.get_home_dir)
@@ -444,7 +432,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal('%s/test_dir' % home, response.context['path'])
     assert_equal('%s/test_dir' % home, response.context['path'])
 
 
 
 
-  @attr('requires_hadoop')
   def test_listdir_sort_and_filter(self):
   def test_listdir_sort_and_filter(self):
     prefix = self.cluster.fs_prefix + '/test_rename'
     prefix = self.cluster.fs_prefix + '/test_rename'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -514,7 +501,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal(['..', '.', '1'], [ f['name'] for f in listing ])
     assert_equal(['..', '.', '1'], [ f['name'] for f in listing ])
 
 
 
 
-  @attr('requires_hadoop')
   def test_chooser(self):
   def test_chooser(self):
     prefix = self.cluster.fs_prefix + '/test_chooser'
     prefix = self.cluster.fs_prefix + '/test_chooser'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -527,7 +513,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal('/', dic['path'])
     assert_equal('/', dic['path'])
 
 
 
 
-  @attr('requires_hadoop')
   def test_view_snappy_compressed(self):
   def test_view_snappy_compressed(self):
     if not snappy_installed():
     if not snappy_installed():
       raise SkipTest
       raise SkipTest
@@ -575,7 +560,6 @@ class TestFileBrowserWithHadoop(object):
         done()
         done()
 
 
 
 
-  @attr('requires_hadoop')
   def test_view_snappy_compressed_avro(self):
   def test_view_snappy_compressed_avro(self):
     if not snappy_installed():
     if not snappy_installed():
       raise SkipTest
       raise SkipTest
@@ -625,7 +609,6 @@ class TestFileBrowserWithHadoop(object):
         done()
         done()
 
 
 
 
-  @attr('requires_hadoop')
   def test_view_avro(self):
   def test_view_avro(self):
     prefix = self.cluster.fs_prefix + '/test_view_avro'
     prefix = self.cluster.fs_prefix + '/test_view_avro'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -675,7 +658,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true('Failed to decompress' in response.context['message'])
     assert_true('Failed to decompress' in response.context['message'])
 
 
 
 
-  @attr('requires_hadoop')
   def test_view_parquet(self):
   def test_view_parquet(self):
     prefix = self.cluster.fs_prefix + '/test_view_parquet'
     prefix = self.cluster.fs_prefix + '/test_view_parquet'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -692,7 +674,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true('FRANCE' in response.context['view']['contents'])
     assert_true('FRANCE' in response.context['view']['contents'])
 
 
 
 
-  @attr('requires_hadoop')
   def test_view_gz(self):
   def test_view_gz(self):
     prefix = self.cluster.fs_prefix + '/test_view_gz'
     prefix = self.cluster.fs_prefix + '/test_view_gz'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -730,7 +711,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true("Failed to decompress" in response.context['message'])
     assert_true("Failed to decompress" in response.context['message'])
 
 
 
 
-  @attr('requires_hadoop')
   def test_view_i18n(self):
   def test_view_i18n(self):
     # Test viewing files in different encodings
     # Test viewing files in different encodings
     content = u'pt-Olá en-hello ch-你好 ko-안녕 ru-Здравствуйте'
     content = u'pt-Olá en-hello ch-你好 ko-안녕 ru-Здравствуйте'
@@ -753,7 +733,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal("http://testserver/filebrowser/view/user/test", response["location"])
     assert_equal("http://testserver/filebrowser/view/user/test", response["location"])
 
 
 
 
-  @attr('requires_hadoop')
   def test_view_access(self):
   def test_view_access(self):
     prefix = self.cluster.fs_prefix
     prefix = self.cluster.fs_prefix
     NO_PERM_DIR = prefix + '/test-no-perm'
     NO_PERM_DIR = prefix + '/test-no-perm'
@@ -768,7 +747,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true('Cannot access' in response.context['message'])
     assert_true('Cannot access' in response.context['message'])
 
 
 
 
-  @attr('requires_hadoop')
   def test_index(self):
   def test_index(self):
     HOME_DIR = '/user/test'
     HOME_DIR = '/user/test'
     NO_HOME_DIR = '/user/no_home'
     NO_HOME_DIR = '/user/no_home'
@@ -788,7 +766,6 @@ class TestFileBrowserWithHadoop(object):
     assert_equal(None, response.context['home_directory'])
     assert_equal(None, response.context['home_directory'])
 
 
 
 
-  @attr('requires_hadoop')
   def test_edit_i18n(self):
   def test_edit_i18n(self):
     prefix = self.cluster.fs_prefix + '/test_view_gz'
     prefix = self.cluster.fs_prefix + '/test_view_gz'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -815,7 +792,6 @@ class TestFileBrowserWithHadoop(object):
     edit_i18n_helper(self.c, self.cluster, 'johab', pass_1, pass_2)
     edit_i18n_helper(self.c, self.cluster, 'johab', pass_1, pass_2)
 
 
 
 
-  @attr('requires_hadoop')
   def test_upload_file(self):
   def test_upload_file(self):
     prefix = self.cluster.fs_prefix + '/test_upload_file'
     prefix = self.cluster.fs_prefix + '/test_upload_file'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -870,7 +846,6 @@ class TestFileBrowserWithHadoop(object):
       pass
       pass
 
 
 
 
-  @attr('requires_hadoop')
   def test_upload_zip(self):
   def test_upload_zip(self):
     prefix = self.cluster.fs_prefix + '/test_upload_zip'
     prefix = self.cluster.fs_prefix + '/test_upload_zip'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -902,7 +877,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true(self.cluster.fs.exists(HDFS_ZIP_FILE))
     assert_true(self.cluster.fs.exists(HDFS_ZIP_FILE))
 
 
 
 
-  @attr('requires_hadoop')
   def test_upload_tgz(self):
   def test_upload_tgz(self):
     prefix = self.cluster.fs_prefix + '/test_upload_tgz'
     prefix = self.cluster.fs_prefix + '/test_upload_tgz'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -935,7 +909,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true(self.cluster.fs.exists(HDFS_TGZ_FILE))
     assert_true(self.cluster.fs.exists(HDFS_TGZ_FILE))
 
 
 
 
-  @attr('requires_hadoop')
   def test_upload_bz2(self):
   def test_upload_bz2(self):
     prefix = self.cluster.fs_prefix + '/test_upload_bz2'
     prefix = self.cluster.fs_prefix + '/test_upload_bz2'
 
 
@@ -965,7 +938,6 @@ class TestFileBrowserWithHadoop(object):
     assert_true(self.cluster.fs.exists(HDFS_BZ2_FILE))
     assert_true(self.cluster.fs.exists(HDFS_BZ2_FILE))
 
 
 
 
-  @attr('requires_hadoop')
   def test_trash(self):
   def test_trash(self):
     prefix = self.cluster.fs_prefix + '/test_trash'
     prefix = self.cluster.fs_prefix + '/test_trash'
     self.cluster.fs.mkdir(prefix)
     self.cluster.fs.mkdir(prefix)
@@ -996,7 +968,6 @@ class TestFileBrowserWithHadoop(object):
     response = self.c.get('/filebrowser/view/user/test?default_to_trash', follow=True)
     response = self.c.get('/filebrowser/view/user/test?default_to_trash', follow=True)
     assert_true(any(['.Trash' in page for page, code in response.redirect_chain]), response.redirect_chain)
     assert_true(any(['.Trash' in page for page, code in response.redirect_chain]), response.redirect_chain)
 
 
-
 def view_i18n_helper(c, cluster, encoding, content):
 def view_i18n_helper(c, cluster, encoding, content):
   """
   """
   Write the content in the given encoding directly into the filesystem.
   Write the content in the given encoding directly into the filesystem.
@@ -1020,7 +991,6 @@ def view_i18n_helper(c, cluster, encoding, content):
   finally:
   finally:
     cleanup_file(cluster, filename)
     cleanup_file(cluster, filename)
 
 
-
 def edit_i18n_helper(c, cluster, encoding, contents_pass_1, contents_pass_2):
 def edit_i18n_helper(c, cluster, encoding, contents_pass_1, contents_pass_2):
   """
   """
   Put the content into the file with a specific encoding.
   Put the content into the file with a specific encoding.
@@ -1072,7 +1042,6 @@ def edit_i18n_helper(c, cluster, encoding, contents_pass_1, contents_pass_2):
   finally:
   finally:
     cleanup_file(cluster, filename)
     cleanup_file(cluster, filename)
 
 
-
 def test_location_to_url():
 def test_location_to_url():
   assert_equal('/filebrowser/view/var/lib/hadoop-hdfs', location_to_url('/var/lib/hadoop-hdfs', False))
   assert_equal('/filebrowser/view/var/lib/hadoop-hdfs', location_to_url('/var/lib/hadoop-hdfs', False))
   assert_equal('/filebrowser/view/var/lib/hadoop-hdfs', location_to_url('hdfs://localhost:8020/var/lib/hadoop-hdfs'))
   assert_equal('/filebrowser/view/var/lib/hadoop-hdfs', location_to_url('hdfs://localhost:8020/var/lib/hadoop-hdfs'))

+ 32 - 28
apps/impala/src/impala/tests.py

@@ -36,7 +36,7 @@ from beeswax.models import SavedQuery, QueryHistory
 from beeswax.server import dbms
 from beeswax.server import dbms
 from beeswax.test_base import get_query_server_config, wait_for_query_to_finish, fetch_query_result_data
 from beeswax.test_base import get_query_server_config, wait_for_query_to_finish, fetch_query_result_data
 from beeswax.tests import _make_query
 from beeswax.tests import _make_query
-from hadoop.pseudo_hdfs4 import get_db_prefix, is_live_cluser
+from hadoop.pseudo_hdfs4 import get_db_prefix, is_live_cluster
 
 
 from impala.conf import SERVER_HOST
 from impala.conf import SERVER_HOST
 
 
@@ -103,21 +103,22 @@ class TestMockedImpala:
 
 
 class TestImpalaIntegration:
 class TestImpalaIntegration:
 
 
-  def setUp(self):
-    self.finish = []
+  @classmethod
+  def setup_class(cls):
+    cls.finish = []
 
 
     # We need a real Impala cluster currently
     # We need a real Impala cluster currently
-    if (not 'impala' in sys.argv and not os.environ.get('TEST_IMPALAD_HOST')) or not is_live_cluser():
+    if (not 'impala' in sys.argv and not os.environ.get('TEST_IMPALAD_HOST')) or not is_live_cluster():
       raise SkipTest
       raise SkipTest
 
 
     if os.environ.get('TEST_IMPALAD_HOST'):
     if os.environ.get('TEST_IMPALAD_HOST'):
-      self.finish.append(SERVER_HOST.set_for_testing(os.environ.get('TEST_IMPALAD_HOST')))
+      cls.finish.append(SERVER_HOST.set_for_testing(os.environ.get('TEST_IMPALAD_HOST')))
 
 
-    self.client = make_logged_in_client()
-    self.user = User.objects.get(username='test')
+    cls.client = make_logged_in_client()
+    cls.user = User.objects.get(username='test')
     add_to_group('test')
     add_to_group('test')
-    self.db = dbms.get(self.user, get_query_server_config(name='impala'))
-    self.DATABASE = get_db_prefix()
+    cls.db = dbms.get(cls.user, get_query_server_config(name='impala'))
+    cls.DATABASE = get_db_prefix(name='impala')
 
 
     hql = """
     hql = """
       USE default;
       USE default;
@@ -126,10 +127,10 @@ class TestImpalaIntegration:
       CREATE DATABASE %(db)s;
       CREATE DATABASE %(db)s;
 
 
       USE %(db)s;
       USE %(db)s;
-    """ % {'db': self.DATABASE}
+    """ % {'db': cls.DATABASE}
 
 
-    resp = _make_query(self.client, hql, database='default', local=False, server_name='impala')
-    resp = wait_for_query_to_finish(self.client, resp, max=30.0)
+    resp = _make_query(cls.client, hql, database='default', local=False, server_name='impala')
+    resp = wait_for_query_to_finish(cls.client, resp, max=30.0)
 
 
     hql = """
     hql = """
       CREATE TABLE tweets (row_num INTEGER, id_str STRING, text STRING) STORED AS PARQUET;
       CREATE TABLE tweets (row_num INTEGER, id_str STRING, text STRING) STORED AS PARQUET;
@@ -141,23 +142,26 @@ class TestImpalaIntegration:
       INSERT INTO TABLE tweets VALUES (5, "531091827949309000", "i think @WWERollins was robbed of the IC title match this week on RAW also i wonder if he will get a rematch i hope so @WWE");
       INSERT INTO TABLE tweets VALUES (5, "531091827949309000", "i think @WWERollins was robbed of the IC title match this week on RAW also i wonder if he will get a rematch i hope so @WWE");
     """
     """
 
 
-    resp = _make_query(self.client, hql, database=self.DATABASE, local=False, server_name='impala')
-    resp = wait_for_query_to_finish(self.client, resp, max=30.0)
+    resp = _make_query(cls.client, hql, database=cls.DATABASE, local=False, server_name='impala')
+    resp = wait_for_query_to_finish(cls.client, resp, max=30.0)
 
 
-  def tearDown(self):
-    try:
-      # We need to drop tables before dropping the database
-      hql = """
-      USE default;
-      DROP TABLE IF EXISTS %(db)s.tweets;
-      DROP DATABASE %(db)s;
-      """ % {'db': self.DATABASE}
-      resp = _make_query(self.client, hql, database='default', local=False, server_name='impala')
-      resp = wait_for_query_to_finish(self.client, resp, max=30.0)
-    except Exception, e:
-      LOG.exception('Problem deleting Impala integration test DB')
-
-    for f in self.finish:
+  @classmethod
+  def teardown_class(cls):
+    # We need to drop tables before dropping the database
+    hql = """
+    USE default;
+    DROP TABLE IF EXISTS %(db)s.tweets;
+    DROP DATABASE %(db)s;
+    """ % {'db': cls.DATABASE}
+    resp = _make_query(cls.client, hql, database='default', local=False, server_name='impala')
+    resp = wait_for_query_to_finish(cls.client, resp, max=30.0)
+
+    # Check the cleanup
+    databases = db.get_databases()
+    assert_false(cls.db_name in databases)
+    assert_false('%(db)s_other' % {'db': cls.db_name} in databases)
+
+    for f in cls.finish:
       f()
       f()
 
 
   def test_basic_flow(self):
   def test_basic_flow(self):

+ 4 - 4
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -45,7 +45,7 @@ STARTUP_DEADLINE = 60.0
 CLEANUP_TMP_DIR = os.environ.get('MINI_CLUSTER_CLEANUP', 'true')
 CLEANUP_TMP_DIR = os.environ.get('MINI_CLUSTER_CLEANUP', 'true')
 
 
 
 
-def is_live_cluser():
+def is_live_cluster():
   return os.environ.get('LIVE_CLUSTER', 'false').lower() == 'true'
   return os.environ.get('LIVE_CLUSTER', 'false').lower() == 'true'
 
 
 def get_fs_prefix(fs):
 def get_fs_prefix(fs):
@@ -53,8 +53,8 @@ def get_fs_prefix(fs):
   fs.mkdir(prefix, 0777)
   fs.mkdir(prefix, 0777)
   return prefix
   return prefix
 
 
-def get_db_prefix():
-  return 'hue_test__%s' % str(time.time()).replace('.', '')
+def get_db_prefix(name='hive'):
+  return 'hue_test_%s_%s' % (name, str(time.time()).replace('.', ''))
 
 
 
 
 class LiveHdfs():
 class LiveHdfs():
@@ -544,7 +544,7 @@ def shared_cluster():
   global _shared_cluster
   global _shared_cluster
 
 
   if _shared_cluster is None:
   if _shared_cluster is None:
-    if is_live_cluser():
+    if is_live_cluster():
       cluster = LiveHdfs()
       cluster = LiveHdfs()
     else:
     else:
       cluster = PseudoHdfs4()
       cluster = PseudoHdfs4()