Эх сурвалжийг харах

[beeswax] Add support to run beeswax tests in Hive on Spark mode

Jenny Kim 9 жил өмнө
parent
commit
60aba81

+ 24 - 4
apps/beeswax/src/beeswax/test_base.py

@@ -35,7 +35,7 @@ from desktop.lib.python_util import find_unused_port
 from desktop.lib.exceptions import StructuredThriftTransportException
 from desktop.lib.exceptions import StructuredThriftTransportException
 from desktop.lib.security_util import get_localhost_name
 from desktop.lib.security_util import get_localhost_name
 from desktop.lib.test_utils import add_to_group, grant_access
 from desktop.lib.test_utils import add_to_group, grant_access
-from hadoop import pseudo_hdfs4
+from hadoop import cluster, pseudo_hdfs4
 from hadoop.pseudo_hdfs4 import is_live_cluster, get_db_prefix
 from hadoop.pseudo_hdfs4 import is_live_cluster, get_db_prefix
 
 
 import beeswax.conf
 import beeswax.conf
@@ -55,6 +55,10 @@ _SHARED_HIVE_SERVER_CLOSER = None
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
 
 
 
 
+def is_hive_on_spark():
+  return os.environ.get('ENABLE_HIVE_ON_SPARK', 'false').lower() == 'true'
+
+
 def _start_server(cluster):
 def _start_server(cluster):
   args = [beeswax.conf.HIVE_SERVER_BIN.get()]
   args = [beeswax.conf.HIVE_SERVER_BIN.get()]
 
 
@@ -124,8 +128,6 @@ def get_shared_beeswax_server(db_name='default'):
             raise
             raise
         except Exception, e:
         except Exception, e:
           LOG.exception('Failed to open Hive Server session')
           LOG.exception('Failed to open Hive Server session')
-          import pdb
-          pdb.set_trace()
         else:
         else:
           started = True
           started = True
           break
           break
@@ -351,6 +353,7 @@ class BeeswaxSampleProvider(object):
   def setup_class(cls):
   def setup_class(cls):
     cls.db_name = get_db_prefix(name='hive')
     cls.db_name = get_db_prefix(name='hive')
     cls.cluster, shutdown = get_shared_beeswax_server(cls.db_name)
     cls.cluster, shutdown = get_shared_beeswax_server(cls.db_name)
+    cls.set_execution_engine()
     cls.client = make_logged_in_client(username='test', is_superuser=False)
     cls.client = make_logged_in_client(username='test', is_superuser=False)
     add_to_group('test', 'test')
     add_to_group('test', 'test')
     grant_access('test', 'test', 'beeswax')
     grant_access('test', 'test', 'beeswax')
@@ -363,11 +366,17 @@ class BeeswaxSampleProvider(object):
   def teardown_class(cls):
   def teardown_class(cls):
     if is_live_cluster():
     if is_live_cluster():
       # Delete test DB and tables
       # Delete test DB and tables
+      query_server = get_query_server_config()
       client = make_logged_in_client()
       client = make_logged_in_client()
       user = User.objects.get(username='test')
       user = User.objects.get(username='test')
-      query_server = get_query_server_config()
+
       db = dbms.get(user, query_server)
       db = dbms.get(user, query_server)
 
 
+      # Kill Spark context if running
+      if is_hive_on_spark() and cluster.is_yarn():
+        # TODO: We should clean up the running Hive on Spark job here
+        pass
+
       for db_name in [cls.db_name, '%s_other' % cls.db_name]:
       for db_name in [cls.db_name, '%s_other' % cls.db_name]:
         databases = db.get_databases()
         databases = db.get_databases()
 
 
@@ -385,6 +394,17 @@ class BeeswaxSampleProvider(object):
       global _INITIALIZED
       global _INITIALIZED
       _INITIALIZED = False
       _INITIALIZED = False
 
 
+  @classmethod
+  def set_execution_engine(cls):
+    query_server = get_query_server_config()
+
+    if query_server['server_name'] == 'beeswax' and is_hive_on_spark():
+      user = User.objects.get(username='test')
+      db = dbms.get(user, query_server)
+
+      LOG.info("Setting Hive execution engine to Spark")
+      db.execute_statement('SET hive.execution.engine=spark')
+
   @classmethod
   @classmethod
   def init_beeswax_db(cls):
   def init_beeswax_db(cls):
     """
     """

+ 13 - 6
apps/beeswax/src/beeswax/tests.py

@@ -78,7 +78,7 @@ from beeswax.server.dbms import QueryServerException
 from beeswax.server.hive_server2_lib import HiveServerClient,\
 from beeswax.server.hive_server2_lib import HiveServerClient,\
   PartitionKeyCompatible, PartitionValueCompatible, HiveServerTable,\
   PartitionKeyCompatible, PartitionValueCompatible, HiveServerTable,\
   HiveServerTColumnValue2
   HiveServerTColumnValue2
-from beeswax.test_base import BeeswaxSampleProvider
+from beeswax.test_base import BeeswaxSampleProvider, is_hive_on_spark
 from beeswax.hive_site import get_metastore
 from beeswax.hive_site import get_metastore
 
 
 
 
@@ -218,8 +218,10 @@ for x in sys.stdin:
     response = _make_query(self.client, QUERY, local=False, database=self.db_name)
     response = _make_query(self.client, QUERY, local=False, database=self.db_name)
     content = json.loads(response.content)
     content = json.loads(response.content)
     assert_true('watch_url' in content)
     assert_true('watch_url' in content)
+
     # Check that we report this query as "running" (this query should take a little while).
     # Check that we report this query as "running" (this query should take a little while).
-    self._verify_query_state(beeswax.models.QueryHistory.STATE.running)
+    if not is_hive_on_spark():
+      self._verify_query_state(beeswax.models.QueryHistory.STATE.running)
 
 
     response = wait_for_query_to_finish(self.client, response, max=180.0)
     response = wait_for_query_to_finish(self.client, response, max=180.0)
     content = fetch_query_result_data(self.client, response)
     content = fetch_query_result_data(self.client, response)
@@ -365,10 +367,12 @@ for x in sys.stdin:
 
 
     assert_equal([2.0, 4.0], content["results"][0])
     assert_equal([2.0, 4.0], content["results"][0])
     log = content['log']
     log = content['log']
-    assert_true(search_log_line('map = 100%', log), log)
-    assert_true(search_log_line('reduce = 100%', log), log)
-    # Test job extraction while we're at it
-    assert_equal(1, len(content["hadoop_jobs"]), "Should have started 1 job and extracted it.")
+
+    if not is_hive_on_spark():
+      assert_true(search_log_line('map = 100%', log), log)
+      assert_true(search_log_line('reduce = 100%', log), log)
+      # Test job extraction while we're at it
+      assert_equal(1, len(content["hadoop_jobs"]), "Should have started 1 job and extracted it.")
 
 
 
 
   def test_query_with_remote_udf(self):
   def test_query_with_remote_udf(self):
@@ -1993,6 +1997,9 @@ for x in sys.stdin:
     """
     """
     Test that the HS2 logs send back the ql.Driver log output with JobID
     Test that the HS2 logs send back the ql.Driver log output with JobID
     """
     """
+    if is_hive_on_spark():
+      raise SkipTest
+
     hql = "SELECT foo FROM `%(db)s`.`test` GROUP BY foo" % {'db': self.db_name}  # GROUP BY forces the MR job
     hql = "SELECT foo FROM `%(db)s`.`test` GROUP BY foo" % {'db': self.db_name}  # GROUP BY forces the MR job
     response = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.db_name)
     response = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.db_name)
     content = fetch_query_result_data(self.client, response)
     content = fetch_query_result_data(self.client, response)