Pārlūkot izejas kodu

HUE-8758 [impala] First end to end via connector interface only

Romain 6 gadi atpakaļ
vecāks
revīzija
8e18eba6e0

+ 102 - 87
apps/beeswax/src/beeswax/server/dbms.py

@@ -91,102 +91,117 @@ def get(user, query_server=None, cluster=None):
     DBMS_CACHE_LOCK.release()
 
 
-def get_query_server_config(name='beeswax', server=None, cluster=None):
-  LOG.debug("Query cluster %s: %s" % (name, cluster))
-
-  cluster_config = get_cluster_config(cluster)
-  if name == "llap":
-    activeEndpoint = cache.get('llap')
-    if activeEndpoint is None:
-      if HIVE_DISCOVERY_LLAP.get():
-        LOG.debug("Checking zookeeper for Hive Server Interactive endpoint")
-        zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
-        zk.start()
-        if HIVE_DISCOVERY_LLAP_HA.get():
-          znode = "{0}/instances".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
-          LOG.debug("Setting up LLAP with the following node {0}".format(znode))
-          if zk.exists(znode):
-            hiveservers = zk.get_children(znode)
-            for server in hiveservers:
-              llap_servers= json.loads(zk.get("{0}/{1}".format(znode, server))[0])["internal"][0]
-              if llap_servers["api"] == "activeEndpoint":
-                cache.set("llap", json.dumps({"host": llap_servers["addresses"][0]["host"], "port": llap_servers["addresses"][0]["port"]}), CACHE_TIMEOUT.get())
+def get_query_server_config(name='beeswax', connector=None):
+  LOG.debug("Query cluster %s: %s" % (name, connector))
+
+  if connector:
+    query_server = {
+        'server_name': connector['type'],
+        'server_host': connector['options']['server_host'],
+        'server_port': connector['options']['server_port'],
+        'principal': 'TODO',
+        'auth_username': AUTH_USERNAME.get(),
+        'auth_password': AUTH_PASSWORD.get(),
+
+        'impersonation_enabled': False, # TODO, Impala only, to add to connector class
+        'SESSION_TIMEOUT_S': 15 * 60,
+        'querycache_rows': 1000,
+        'QUERY_TIMEOUT_S': 15 * 60,
+    }
+  else:
+    cluster_config = get_cluster_config(cluster)
+    if name == "llap":
+      activeEndpoint = cache.get('llap')
+      if activeEndpoint is None:
+        if HIVE_DISCOVERY_LLAP.get():
+          LOG.debug("Checking zookeeper for Hive Server Interactive endpoint")
+          zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
+          zk.start()
+          if HIVE_DISCOVERY_LLAP_HA.get():
+            znode = "{0}/instances".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
+            LOG.debug("Setting up LLAP with the following node {0}".format(znode))
+            if zk.exists(znode):
+              hiveservers = zk.get_children(znode)
+              for server in hiveservers:
+                llap_servers= json.loads(zk.get("{0}/{1}".format(znode, server))[0])["internal"][0]
+                if llap_servers["api"] == "activeEndpoint":
+                  cache.set("llap", json.dumps({"host": llap_servers["addresses"][0]["host"], "port": llap_servers["addresses"][0]["port"]}), CACHE_TIMEOUT.get())
+            else:
+              LOG.error("LLAP Endpoint not found, reverting to HiveServer2")
+              cache.set("llap", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}), CACHE_TIMEOUT.get())
           else:
-            LOG.error("LLAP Endpoint not found, reverting to HiveServer2")
-            cache.set("llap", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}), CACHE_TIMEOUT.get())
+            znode = "{0}".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
+            LOG.debug("Setting up LLAP with the following node {0}".format(znode))
+            if zk.exists(znode):
+              hiveservers = zk.get_children(znode)
+              for server in hiveservers:
+                cache.set("llap", json.dumps({"host": server.split(';')[0].split('=')[1].split(":")[0], "port": server.split(';')[0].split('=')[1].split(":")[1]}))
+          zk.stop()
         else:
-          znode = "{0}".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
-          LOG.debug("Setting up LLAP with the following node {0}".format(znode))
+          LOG.debug("Zookeeper Discovery not enabled, reverting to config values")
+          cache.set("llap", json.dumps({"host": LLAP_SERVER_HOST.get(), "port": LLAP_SERVER_THRIFT_PORT.get()}), CACHE_TIMEOUT.get())
+      activeEndpoint = json.loads(cache.get("llap"))
+    elif name != 'hms' and name != 'impala':
+      activeEndpoint = cache.get("hiveserver2")
+      if activeEndpoint is None:
+        if HIVE_DISCOVERY_HS2.get():
+          zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
+          zk.start()
+          znode = HIVE_DISCOVERY_HIVESERVER2_ZNODE.get()
+          LOG.info("Setting up Hive with the following node {0}".format(znode))
           if zk.exists(znode):
             hiveservers = zk.get_children(znode)
-            for server in hiveservers:
-              cache.set("llap", json.dumps({"host": server.split(';')[0].split('=')[1].split(":")[0], "port": server.split(';')[0].split('=')[1].split(":")[1]}))
-        zk.stop()
-      else:
-        LOG.debug("Zookeeper Discovery not enabled, reverting to config values")
-        cache.set("llap", json.dumps({"host": LLAP_SERVER_HOST.get(), "port": LLAP_SERVER_THRIFT_PORT.get()}), CACHE_TIMEOUT.get())
-    activeEndpoint = json.loads(cache.get("llap"))
-  elif name != 'hms' and name != 'impala':
-    activeEndpoint = cache.get("hiveserver2")
-    if activeEndpoint is None:
-      if HIVE_DISCOVERY_HS2.get():
-        zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
-        zk.start()
-        znode = HIVE_DISCOVERY_HIVESERVER2_ZNODE.get()
-        LOG.info("Setting up Hive with the following node {0}".format(znode))
-        if zk.exists(znode):
-          hiveservers = zk.get_children(znode)
-          server_to_use = 0 # if CONF.HIVE_SPREAD.get() randint(0, len(hiveservers)-1) else 0
-          cache.set("hiveserver2", json.dumps({"host": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[0], "port": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[1]}))
+            server_to_use = 0 # if CONF.HIVE_SPREAD.get() randint(0, len(hiveservers)-1) else 0
+            cache.set("hiveserver2", json.dumps({"host": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[0], "port": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[1]}))
+          else:
+            cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}))
+          zk.stop()
         else:
           cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}))
-        zk.stop()
-      else:
-        cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}))
-    activeEndpoint = json.loads(cache.get("hiveserver2"))
-
-  if name == 'impala':
-    from impala.dbms import get_query_server_config as impala_query_server_config
-    query_server = impala_query_server_config(cluster_config=cluster_config)
-  elif name == 'hms':
-    kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
-    query_server = {
-        'server_name': 'hms',
-        'server_host': HIVE_METASTORE_HOST.get() if not cluster_config else cluster_config.get('server_host'),
-        'server_port': HIVE_METASTORE_PORT.get(),
-        'principal': kerberos_principal,
-        'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
-        'auth_username': AUTH_USERNAME.get(),
-        'auth_password': AUTH_PASSWORD.get()
-    }
-  else:
-    kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
-    query_server = {
-        'server_name': 'beeswax',
-        'server_host': activeEndpoint["host"],
-        'server_port': LLAP_SERVER_PORT.get() if name == 'llap' else HIVE_SERVER_PORT.get(),
-        'principal': kerberos_principal,
-        'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
-            'protocol': 'https' if hiveserver2_use_ssl() else 'http',
-            'host': activeEndpoint["host"],
-            'port': activeEndpoint["port"],
-            'end_point': hive_site.hiveserver2_thrift_http_path()
-          },
-        'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
-        'auth_username': AUTH_USERNAME.get(),
-        'auth_password': AUTH_PASSWORD.get()
+      activeEndpoint = json.loads(cache.get("hiveserver2"))
+
+    if name == 'impala':
+      from impala.dbms import get_query_server_config as impala_query_server_config
+      query_server = impala_query_server_config(cluster_config=cluster_config)
+    elif name == 'hms':
+      kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
+      query_server = {
+          'server_name': 'hms',
+          'server_host': HIVE_METASTORE_HOST.get() if not cluster_config else cluster_config.get('server_host'),
+          'server_port': HIVE_METASTORE_PORT.get(),
+          'principal': kerberos_principal,
+          'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
+          'auth_username': AUTH_USERNAME.get(),
+          'auth_password': AUTH_PASSWORD.get()
       }
-  if name == 'sparksql': # Spark SQL is almost the same as Hive
-    from spark.conf import SQL_SERVER_HOST as SPARK_SERVER_HOST, SQL_SERVER_PORT as SPARK_SERVER_PORT
-
-    query_server.update({
-        'server_name': 'sparksql',
-        'server_host': SPARK_SERVER_HOST.get(),
-        'server_port': SPARK_SERVER_PORT.get()
-    })
+    else:
+      kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
+      query_server = {
+          'server_name': 'beeswax',
+          'server_host': activeEndpoint["host"],
+          'server_port': LLAP_SERVER_PORT.get() if name == 'llap' else HIVE_SERVER_PORT.get(),
+          'principal': kerberos_principal,
+          'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
+              'protocol': 'https' if hiveserver2_use_ssl() else 'http',
+              'host': activeEndpoint["host"],
+              'port': activeEndpoint["port"],
+              'end_point': hive_site.hiveserver2_thrift_http_path()
+            },
+          'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
+          'auth_username': AUTH_USERNAME.get(),
+          'auth_password': AUTH_PASSWORD.get()
+        }
+    if name == 'sparksql': # Spark SQL is almost the same as Hive
+      from spark.conf import SQL_SERVER_HOST as SPARK_SERVER_HOST, SQL_SERVER_PORT as SPARK_SERVER_PORT
+
+      query_server.update({
+          'server_name': 'sparksql',
+          'server_host': SPARK_SERVER_HOST.get(),
+          'server_port': SPARK_SERVER_PORT.get()
+      })
 
   debug_query_server = query_server.copy()
-  debug_query_server['auth_password_used'] = bool(debug_query_server.pop('auth_password'))
+  debug_query_server['auth_password_used'] = bool(debug_query_server.pop('auth_password', None))
   LOG.debug("Query Server: %s" % debug_query_server)
 
   return query_server

+ 4 - 4
apps/impala/src/impala/dbms.py

@@ -37,11 +37,11 @@ from notebook.conf import get_ordered_interpreters
 LOG = logging.getLogger(__name__)
 
 
-def get_query_server_config(cluster_config=None):
+def get_query_server_config():
   query_server = {
-      'server_name': _get_server_name(cluster_config),
-      'server_host': conf.SERVER_HOST.get() if not cluster_config else cluster_config.get('server_host'),
-      'server_port': conf.SERVER_PORT.get() if not cluster_config else 21050,
+      'server_name': 'impala',
+      'server_host': conf.SERVER_HOST.get(),
+      'server_port': conf.SERVER_PORT.get(),
       'principal': conf.IMPALA_PRINCIPAL.get(),
       'impersonation_enabled': conf.IMPERSONATION_ENABLED.get(),
       'querycache_rows': conf.QUERYCACHE_ROWS.get(),

+ 6 - 0
desktop/core/src/desktop/lib/connectors/api.py

@@ -45,6 +45,7 @@ CONNECTOR_TYPES = [{
   ]
 ]
 
+
 CONNECTOR_TYPES += [
   {'name': "Hive Tez", 'type': 'hive-tez', 'interface': 'hiveserver2', 'settings': [{'name': 'server_host', 'value': ''}, {'name': 'server_port', 'value': ''},], 'id': None, 'category': 'engines', 'description': ''},
   {'name': "Hive LLAP", 'type': 'hive-llap', 'interface': 'hiveserver2', 'settings': [{'name': 'server_host', 'value': ''}, {'name': 'server_port', 'value': ''},], 'id': None, 'category': 'engines', 'description': ''},
@@ -59,6 +60,9 @@ CONNECTOR_TYPES += [
   {'name': "Oracle", 'type': 'oracle', 'interface': 'sqlalchemy', 'settings': [], 'id': None, 'category': 'engines', 'description': ''},
   {'name': "SQL Database", 'type': 'sql-alchemy', 'interface': 'sqlalchemy', 'settings': [], 'id': None, 'category': 'engines', 'description': ''},
   {'name': "SQL Database (JDBC)", 'type': 'sql-jdbc', 'interface': 'sqlalchemy', 'settings': [], 'id': None, 'category': 'engines', 'description': 'Deprecated: older way to connect to any database.'},
+  # solr
+  # hbase
+  # kafka
 
   {'name': "PySpark", 'type': 'pyspark', 'settings': [], 'id': None, 'category': 'engines', 'description': ''},
   {'name': "Spark", 'type': 'spark', 'settings': [], 'id': None, 'category': 'engines', 'description': ''},
@@ -108,6 +112,8 @@ AVAILABLE_CONNECTORS = _group_category_connectors(CONNECTOR_TYPES)
 # TODO: load back from DB and apply Category properties, e.g. defaults, interface, category, category_name...
 # TODO: connector groups: if we want one type (e.g. Hive) to show-up with multiple computes and the same saved query.
 # TODO: type --> name, type --> SQL language, e.g. mysql
+
+# connector_type: engine, engine_type: sql, language: hive, hive tez, hiveserver2 + endpoint
 CONNECTOR_INSTANCES = [
   {'name': 'Impala', 'type': Impala().TYPE + '-1', 'connector_name': Impala().TYPE, 'interface': Impala().INTERFACE, 'settings': Impala().PROPERTIES, 'id': 1, 'category': 'engines', 'description': ''},
   {'name': 'Hive', 'type': Hive().TYPE + '-2', 'connector_name': Hive().TYPE, 'interface': Hive().INTERFACE, 'settings': Hive().PROPERTIES, 'id': 2, 'category': 'engines', 'description': ''},

+ 14 - 38
desktop/libs/notebook/src/notebook/connectors/base.py

@@ -283,11 +283,10 @@ def get_api(request, snippet):
     snippet['type'] = 'impala'
 
   interpreter = [
-    interpreter
-    for interpreter in get_ordered_interpreters(request.user) if snippet['type'] in (interpreter['type'], interpreter['interface'])
+    interpreter for interpreter in get_ordered_interpreters(request.user) if snippet['type'] == interpreter['type']
   ]
   if not interpreter:
-    if snippet['type'] == 'hbase':
+    if snippet['type'] == 'hbase': # TODO move to connectors
       interpreter = [{
         'name': 'hbase',
         'type': 'hbase',
@@ -325,36 +324,21 @@ def get_api(request, snippet):
   interpreter = interpreter[0]
   interface = interpreter['interface']
 
-  if has_connectors():
-    cluster = {
-      'connector': snippet['type'],
-      'id': interpreter['type'],
-    }
-    cluster.update(interpreter['options'])
-  elif get_cluster_config(request.user)['has_computes']:
-    cluster = json.loads(request.POST.get('cluster', '""')) # Via Catalog autocomplete API or Notebook create sessions
-    if cluster == '""' or cluster == 'undefined':
-      cluster = None
-    if not cluster and snippet.get('compute'): # Via notebook.ko.js
-      cluster = snippet['compute']
+  if get_cluster_config(request.user)['has_computes']:
+    compute = json.loads(request.POST.get('cluster', '""')) # Via Catalog autocomplete API or Notebook create sessions.
+    if compute == '""' or compute == 'undefined':
+      compute = None
+    if not compute and snippet.get('compute'): # Via notebook.ko.js
+      compute = snippet['compute']
   else:
-    cluster = None
-
-  cluster_name = cluster.get('id') if cluster else None
-
-  if cluster and 'altus:dataware:k8s' in cluster_name:
-    interface = 'hiveserver2'
-  elif cluster and 'crn:altus:dataware:' in cluster_name:
-    interface = 'altus-adb'
-  elif cluster and 'crn:altus:dataeng:' in cluster_name:
-    interface = 'dataeng'
+    compute = None
 
-  LOG.debug('Selected connector %s %s interface=%s compute=%s' % (cluster_name, cluster, interface, snippet.get('compute')))
-  snippet['interface'] = interface
+  LOG.debug('Selected interpreter %s interface=%s compute=%s' % (interpreter, interface, compute))
+  # snippet['interface'] = interface
 
-  if interface.startswith('hiveserver2') or interface == 'hms':
+  if interface == 'hiveserver2':
     from notebook.connectors.hiveserver2 import HS2Api
-    return HS2Api(user=request.user, request=request, cluster=cluster, interface=interface)
+    return HS2Api(user=request.user, request=request, interpreter=interpreter)
   elif interface == 'oozie':
     return OozieApi(user=request.user, request=request)
   elif interface == 'livy':
@@ -369,12 +353,6 @@ def get_api(request, snippet):
   elif interface == 'rdbms':
     from notebook.connectors.rdbms import RdbmsApi
     return RdbmsApi(request.user, interpreter=snippet['type'], query_server=snippet.get('query_server'))
-  elif interface == 'altus-adb':
-    from notebook.connectors.altus_adb import AltusAdbApi
-    return AltusAdbApi(user=request.user, cluster_name=cluster_name, request=request)
-  elif interface == 'dataeng':
-    from notebook.connectors.dataeng import DataEngApi
-    return DataEngApi(user=request.user, request=request, cluster_name=cluster_name)
   elif interface == 'jdbc':
     if interpreter['options'] and interpreter['options'].get('url', '').find('teradata') >= 0:
       from notebook.connectors.jdbc_teradata import JdbcApiTeradata
@@ -433,13 +411,11 @@ def _get_snippet_session(notebook, snippet):
 
 class Api(object):
 
-  def __init__(self, user, interpreter=None, request=None, cluster=None, query_server=None, interface=None):
+  def __init__(self, user, interpreter=None, request=None, query_server=None):
     self.user = user
     self.interpreter = interpreter
     self.request = request
-    self.cluster = cluster
     self.query_server = query_server
-    self.interface = interface
 
   def create_session(self, lang, properties=None):
     return {

+ 26 - 29
desktop/libs/notebook/src/notebook/connectors/hiveserver2.py

@@ -169,7 +169,8 @@ class HS2Api(Api):
 
     reuse_session = session is not None
     if not reuse_session:
-      session = dbms.get(self.user, query_server=get_query_server_config(name=lang, cluster=self.cluster)).open_session(self.user)
+      db = dbms.get(self.user, query_server=get_query_server_config(name=lang, connector=self.interpreter))
+      session = db.open_session(self.user)
 
     response = {
       'type': lang,
@@ -240,7 +241,7 @@ class HS2Api(Api):
 
   @query_error_handler
   def execute(self, notebook, snippet):
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
 
     statement = self._get_current_statement(notebook, snippet)
     session = self._get_session(notebook, snippet['type'])
@@ -274,7 +275,7 @@ class HS2Api(Api):
   @query_error_handler
   def check_status(self, notebook, snippet):
     response = {}
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
 
     handle = self._get_handle(snippet)
     operation = db.get_operation_status(handle)
@@ -295,7 +296,7 @@ class HS2Api(Api):
 
   @query_error_handler
   def fetch_result(self, notebook, snippet, rows, start_over):
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
 
     handle = self._get_handle(snippet)
     try:
@@ -343,7 +344,7 @@ class HS2Api(Api):
 
   @query_error_handler
   def cancel(self, notebook, snippet):
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
 
     handle = self._get_handle(snippet)
     db.cancel_operation(handle)
@@ -352,7 +353,7 @@ class HS2Api(Api):
 
   @query_error_handler
   def get_log(self, notebook, snippet, startFrom=None, size=None):
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
 
     handle = self._get_handle(snippet)
     return db.get_log(handle, start_over=startFrom == 0)
@@ -364,7 +365,7 @@ class HS2Api(Api):
       from impala import conf as impala_conf
 
     if (snippet['type'] == 'hive' and beeswax_conf.CLOSE_QUERIES.get()) or (snippet['type'] == 'impala' and impala_conf.CLOSE_QUERIES.get()):
-      db = self._get_db(snippet, cluster=self.cluster)
+      db = self._get_db(snippet, interpreter=self.interpreter)
 
       try:
         handle = self._get_handle(snippet)
@@ -381,7 +382,7 @@ class HS2Api(Api):
 
   def can_start_over(self, notebook, snippet):
     try:
-      db = self._get_db(snippet, cluster=self.cluster)
+      db = self._get_db(snippet, interpreter=self.interpreter)
       handle = self._get_handle(snippet)
       # Test handle to verify if still valid
       db.fetch(handle, start_over=True, rows=1)
@@ -440,7 +441,7 @@ class HS2Api(Api):
 
   @query_error_handler
   def autocomplete(self, snippet, database=None, table=None, column=None, nested=None):
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
     query = None
 
     if snippet.get('query'):
@@ -453,21 +454,21 @@ class HS2Api(Api):
       query = self._get_current_statement(notebook, snippet)['statement']
       database, table = '', ''
 
-    return _autocomplete(db, database, table, column, nested, query=query, cluster=self.cluster)
+    return _autocomplete(db, database, table, column, nested, query=query, interpreter=self.interpreter)
 
 
   @query_error_handler
   def get_sample_data(self, snippet, database=None, table=None, column=None, async=False, operation=None):
     try:
-      db = self._get_db(snippet, async, cluster=self.cluster)
-      return _get_sample_data(db, database, table, column, async, operation=operation, cluster=self.cluster)
+      db = self._get_db(snippet, async, interpreter=self.interpreter)
+      return _get_sample_data(db, database, table, column, async, operation=operation, interpreter=self.interpreter)
     except QueryServerException, ex:
       raise QueryError(ex.message)
 
 
   @query_error_handler
   def explain(self, notebook, snippet):
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
     response = self._get_current_statement(notebook, snippet)
     session = self._get_session(notebook, snippet['type'])
 
@@ -489,7 +490,7 @@ class HS2Api(Api):
 
   @query_error_handler
   def export_data_as_hdfs_file(self, snippet, target_file, overwrite):
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
 
     handle = self._get_handle(snippet)
     max_rows = DOWNLOAD_ROW_LIMIT.get()
@@ -501,7 +502,7 @@ class HS2Api(Api):
 
 
   def export_data_as_table(self, notebook, snippet, destination, is_temporary=False, location=None):
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
 
     response = self._get_current_statement(notebook, snippet)
     session = self._get_session(notebook, snippet['type'])
@@ -664,11 +665,11 @@ DROP TABLE IF EXISTS `%(table)s`;
 
 
   def get_browse_query(self, snippet, database, table, partition_spec=None):
-    db = self._get_db(snippet, cluster=self.cluster)
+    db = self._get_db(snippet, interpreter=self.interpreter)
     table = db.get_table(database, table)
     if table.is_impala_only:
       snippet['type'] = 'impala'
-      db = self._get_db(snippet, cluster=self.cluster)
+      db = self._get_db(snippet, interpreter=self.interpreter)
 
     if partition_spec is not None:
       decoded_spec = urllib.unquote(partition_spec)
@@ -693,23 +694,19 @@ DROP TABLE IF EXISTS `%(table)s`;
     return HiveServerQueryHandle(**handle)
 
 
-  def _get_db(self, snippet, async=False, cluster=None):
+  def _get_db(self, snippet, async=False, interpreter=None):
     if not async and snippet['type'] == 'hive':
       name = 'beeswax'
     elif snippet['type'] == 'hive':
       name = 'hive'
-    elif snippet['type'] == 'impala':
-      name = 'impala'
     elif snippet['type'] == 'llap':
       name = 'llap'
-    elif self.interface == 'hms':
-      name = 'hms'
-    elif self.interface.startswith('hiveserver2-'):
-      name = self.interface.replace('hiveserver2-', '')
+    elif snippet['type'] == 'impala':
+      name = 'impala'
     else:
-      name = 'sparksql' # Backward compatibility until HUE-8758
+      name = 'sparksql'
 
-    return dbms.get(self.user, query_server=get_query_server_config(name=name, cluster=cluster))
+    return dbms.get(self.user, query_server=get_query_server_config(name=name, connector=interpreter)) # Note: name is not used if interpreter is present
 
 
   def _parse_job_counters(self, job_id):
@@ -827,12 +824,12 @@ DROP TABLE IF EXISTS `%(table)s`;
 
 
   def describe_column(self, notebook, snippet, database=None, table=None, column=None):
-    db = self._get_db(snippet, self.cluster)
+    db = self._get_db(snippet, self.interpreter)
     return db.get_table_columns_stats(database, table, column)
 
 
   def describe_table(self, notebook, snippet, database=None, table=None):
-    db = self._get_db(snippet, self.cluster)
+    db = self._get_db(snippet, self.interpreter)
     tb = db.get_table(database, table)
     return {
       'status': 0,
@@ -849,7 +846,7 @@ DROP TABLE IF EXISTS `%(table)s`;
     }
 
   def describe_database(self, notebook, snippet, database=None):
-    db = self._get_db(snippet, self.cluster)
+    db = self._get_db(snippet, self.interpreter)
     return db.get_database(database)
 
   def get_log_is_full_log(self, notebook, snippet):