Browse Source

HUE-8887 [editor] Hive LLAP and Hive Service Discovery connectors (#888)

* Updating settings.py to add FileBased Cache

* Update conf.py

* Updates to dbms.py

Was not expecting the hms change, modified on the fly but i need to test it

* Updates to allow LLAP type

* updates to hiveserver2.py

This section has changed a lot as well

* Change logical order of impala, hms, beeswax

* fixing if statements

* added comment to cacheing strategy

* moving imports, cleaned up a stray if statement

* fixing typo on debug

* adding config for cache name

* cache key now a dynamic value

* updating settings.py, adding cache config keys

* added config for file

* adding llap instructions

* adding llap instructions

* working around race condition

* adding function to bypass race condition

* Modifying Cache Code

* reverting path change

* reverting configured path

* reverting to static, will address later

* Update conf.py

* Update dbms.py

* finalized removal of dynamic cache location
jkm 6 years ago
parent
commit
93a8c37071

+ 53 - 0
apps/beeswax/src/beeswax/conf.py

@@ -32,6 +32,59 @@ from beeswax.settings import NICE_NAME
 
 LOG = logging.getLogger(__name__)
 
+HIVE_DISCOVERY_LLAP = Config(
+  key="hive_discovery_llap",
+  help=_t("Have Hue determine Hive Server Interactive endpoint from zookeeper"),
+  default="false",
+  type=coerce_bool
+)
+
+HIVE_DISCOVERY_HS2 = Config(
+  key="hive_discovery_hs2",
+  help=_t("Determines whether we pull a random HiveServer2 from the list in zookeeper.  This HS2 instance is cached until hue is restarted."),
+  default="false",
+  type=coerce_bool
+)
+
+HIVE_DISCOVERY_LLAP_HA = Config(
+  key="hive_discovery_llap_ha",
+  help=_t("If you have more than one HSI server, it has a different znode setup.  This will trigger the code to check for the Active HSI Server"),
+  default="false",
+  type=coerce_bool
+)
+
+HIVE_DISCOVERY_LLAP_ZNODE = Config(
+  key="hive_discovery_llap_znode",
+  help=_t("If LLAP is enabled, you should be using zookeeper service discovery mode, this is the znode of the LLAP Master(s)"),
+  default="/hiveserver2-hive2"
+)
+
+HIVE_DISCOVERY_HIVESERVER2_ZNODE = Config(
+  key="hive_discovery_hiveserver2_znode",
+  help=_t("If Hive is using zookeeper service discovery mode, this is the znode of the hiveserver2(s)"),
+  default="/hiveserver2"
+)
+
+CACHE_TIMEOUT = Config(
+  key="cache_timeout",
+  help=_t("How long to pause before reaching back out to zookeeper to get the current Active HSI endpoint"),
+  default=60,
+  type=int
+)
+
+LLAP_SERVER_PORT = Config(
+  key="llap_server_port",
+  help=_t("Host where Hive Server Interactive is running. If Kerberos security is enabled, "
+         "the fully-qualified domain name (FQDN) is required"),
+  default="localhost"
+)
+
+LLAP_SERVER_HOST = Config(
+  key="llap_server_host",
+  help=_t("Configure the port the Hive Server Interactive runs on."),
+  default=10501,
+  type=int
+)
 
 HIVE_SERVER_HOST = Config(
   key="hive_server_host",

+ 66 - 11
apps/beeswax/src/beeswax/server/dbms.py

@@ -19,7 +19,9 @@ import logging
 import re
 import threading
 import time
+import json
 
+from django.core.cache import caches
 from django.urls import reverse
 from django.utils.encoding import force_unicode
 from django.utils.translation import ugettext as _
@@ -34,19 +36,25 @@ from indexer.file_format import HiveFormat
 
 from beeswax import hive_site
 from beeswax.conf import HIVE_SERVER_HOST, HIVE_SERVER_PORT, HIVE_METASTORE_HOST, HIVE_METASTORE_PORT, LIST_PARTITIONS_LIMIT, SERVER_CONN_TIMEOUT, \
-  AUTH_USERNAME, AUTH_PASSWORD, APPLY_NATURAL_SORT_MAX, QUERY_PARTITIONS_LIMIT
+  AUTH_USERNAME, AUTH_PASSWORD, APPLY_NATURAL_SORT_MAX, QUERY_PARTITIONS_LIMIT, HIVE_DISCOVERY_HIVESERVER2_ZNODE, \
+  HIVE_DISCOVERY_HS2, HIVE_DISCOVERY_LLAP, HIVE_DISCOVERY_LLAP_HA, HIVE_DISCOVERY_LLAP_ZNODE, CACHE_TIMEOUT, \
+  LLAP_SERVER_HOST, LLAP_SERVER_PORT
 from beeswax.common import apply_natural_sort
 from beeswax.design import hql_query
 from beeswax.hive_site import hiveserver2_use_ssl
 from beeswax.models import QueryHistory, QUERY_TYPES
-
+from libzookeeper import conf as libzookeeper_conf
+from kazoo.client import KazooClient
 
 LOG = logging.getLogger(__name__)
 
 
 DBMS_CACHE = {}
 DBMS_CACHE_LOCK = threading.Lock()
-
+cache = caches['hive_discovery']
+#using file cache to make sure eventlet threads are uniform, this cache is persistent on startup
+#so we clear it to make sure the server resets hiveserver2 host
+cache.clear()
 
 def get(user, query_server=None, cluster=None):
   global DBMS_CACHE
@@ -83,7 +91,55 @@ def get_query_server_config(name='beeswax', server=None, cluster=None):
   LOG.debug("Query cluster %s: %s" % (name, cluster))
 
   cluster_config = get_cluster_config(cluster)
-
+  if name == "llap":
+    activeEndpoint = cache.get('llap')
+    if activeEndpoint is None:
+      if HIVE_DISCOVERY_LLAP.get():
+        LOG.debug("Checking zookeeper for Hive Server Interactive endpoint")
+        zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
+        zk.start()
+        if HIVE_DISCOVERY_LLAP_HA.get():
+          znode = "{0}/instances".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
+          LOG.debug("Setting up LLAP with the following node {0}".format(znode))
+          if zk.exists(znode):
+            hiveservers = zk.get_children(znode)
+            for server in hiveservers:
+              llap_servers= json.loads(zk.get("{0}/{1}".format(znode, server))[0])["internal"][0]
+              if llap_servers["api"] == "activeEndpoint":
+                cache.set("llap", json.dumps({"host": llap_servers["addresses"][0]["host"], "port": llap_servers["addresses"][0]["port"]}), CACHE_TIMEOUT.get())
+          else:
+            LOG.error("LLAP Endpoint not found, reverting to HiveServer2")
+            cache.set("llap", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}), CACHE_TIMEOUT.get())
+        else:
+          znode = "{0}".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
+          LOG.debug("Setting up LLAP with the following node {0}".format(znode))
+          if zk.exists(znode):
+            hiveservers = zk.get_children(znode)
+            for server in hiveservers:
+              cache.set("llap", json.dumps({"host": server.split(';')[0].split('=')[1].split(":")[0], "port": server.split(';')[0].split('=')[1].split(":")[1]}))
+        zk.stop()
+      else:
+        LOG.debug("Zookeeper Discovery not enabled, reverting to config values")
+        cache.set("llap", json.dumps({"host": LLAP_SERVER_HOST.get(), "port": LLAP_SERVER_PORT.get()}), CACHE_TIMEOUT.get())
+    activeEndpoint = json.loads(cache.get("llap"))
+  elif name != 'hms' and name != 'impala':
+    activeEndpoint = cache.get("hiveserver2")
+    if activeEndpoint is None:
+      if HIVE_DISCOVERY_HS2.get():
+        zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
+        zk.start()
+        znode = HIVE_DISCOVERY_HIVESERVER2_ZNODE.get()
+        LOG.info("Setting up Hive with the following node {0}".format(znode))
+        if zk.exists(znode):
+          hiveservers = zk.get_children(znode)
+          server_to_use = 0 # if CONF.HIVE_SPREAD.get() randint(0, len(hiveservers)-1) else 0
+          cache.set("hiveserver2", json.dumps({"host": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[0], "port": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[1]}))
+        else:
+          cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}))
+        zk.stop()
+      else:
+        cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}))
+    activeEndpoint = json.loads(cache.get("hiveserver2"))
   if name == 'impala':
     from impala.dbms import get_query_server_config as impala_query_server_config
     query_server = impala_query_server_config(cluster_config=cluster_config)
@@ -102,20 +158,19 @@ def get_query_server_config(name='beeswax', server=None, cluster=None):
     kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
     query_server = {
         'server_name': 'beeswax',
-        'server_host': HIVE_SERVER_HOST.get() if not cluster_config else cluster_config.get('server_host'),
-        'server_port': HIVE_SERVER_PORT.get(),
+        'server_host': activeEndpoint["host"],
+        'server_port': activeEndpoint["port"],
         'principal': kerberos_principal,
         'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
             'protocol': 'https' if hiveserver2_use_ssl() else 'http',
-            'host': HIVE_SERVER_HOST.get(),
-            'port': hive_site.hiveserver2_thrift_http_port(),
+            'host': activeEndpoint["host"],
+            'port': activeEndpoint["port"],
             'end_point': hive_site.hiveserver2_thrift_http_path()
-        },
+          },
         'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
         'auth_username': AUTH_USERNAME.get(),
         'auth_password': AUTH_PASSWORD.get()
-    }
-
+      }
   if name == 'sparksql': # Spark SQL is almost the same as Hive
     from spark.conf import SQL_SERVER_HOST as SPARK_SERVER_HOST, SQL_SERVER_PORT as SPARK_SERVER_PORT
 

+ 4 - 1
apps/beeswax/src/beeswax/server/hive_server2_lib.py

@@ -603,7 +603,10 @@ class HiveServerClient:
 
     if self.query_server['server_name'] == 'beeswax': # All the time
       kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
-
+      
+    if self.query_server['server_name'] == 'llap': # All the time
+      kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
+    
     if self.query_server['server_name'] == 'sparksql': # All the time
       kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
 

+ 36 - 0
desktop/conf.dist/hue.ini

@@ -1120,6 +1120,42 @@
   # Port where HiveServer2 Thrift server runs on.
   ## hive_server_port=10000
 
+  # Host where LLAP is running
+  ## llap_server_host = localhost
+  
+  # Port where LLAP Thrift server runs on
+  ## llap_server_port = 10501
+  
+  # Alternatively, use Service Discovery for LLAP (Hive Server Interactive) and/or Hiveserver2, this will override the above
+  
+  # Whether to use Service Discovery for LLAP 
+  ## hive_discovery_llap = true
+  
+  # is llap (hive server interactive) running in an HA configuration (more than 1)
+  # important as the zookeeper structure is different
+  ## hive_discovery_llap_ha = false
+  
+  # Shortcuts to finding LLAP znode Key
+  # Non-HA - hiveserver-interactive-site - hive.server2.zookeeper.namespace ex hive2 = /hive2
+  # HA-NonKerberized - <llap_app_name>_llap ex app name llap0 = /llap0_llap
+  # HA-Kerberized - <llap_app_name>_llap-sasl ex app name llap0 = /llap0_llap-sasl
+  ## hive_discovery_llap_znode = /hiveserver2-hive2
+  
+  # Whether to use Service Discovery for HiveServer2
+  ## hive_discovery_hs2 = true
+  
+  # Hiveserver2 is hive-site hive.server2.zookeeper.namespace ex hiveserver2 = /hiverserver2
+  ## hive_discovery_hiveserver2_znode = /hiveserver2
+  
+  # Applicable only for LLAP HA
+  # To keep the load on zookeeper to a minimum
+  # ---- we cache the LLAP activeEndpoint for the cache_timeout period
+  # ---- we cache the hiveserver2 endpoint for the length of session
+  # configurations to set the cache name, the cache path, and the time between zookeeper checks
+  ## caches_hive_discovery_key = hive_discovery
+  ## caches_hive_discovery_key_path = /tmp/hive_discovery_cache
+  ## cache_timeout = 60
+  
   # Host where Hive Metastore Server (HMS) is running.
   # If Kerberos security is enabled, the fully-qualified domain name (FQDN) is required.
   ## hive_metastore_host=localhost

+ 37 - 1
desktop/conf/pseudo-distributed.ini.tmpl

@@ -1121,7 +1121,43 @@
 
   # Port where HiveServer2 Thrift server runs on.
   ## hive_server_port=10000
-
+  
+  # Host where LLAP is running
+  ## llap_server_host = localhost
+  
+  # Port where LLAP Thrift server runs on
+  ## llap_server_port = 10501
+  
+  # Alternatively, use Service Discovery for LLAP (Hive Server Interactive) and/or Hiveserver2, this will override the above
+  
+  # Whether to use Service Discovery for LLAP 
+  ## hive_discovery_llap = true
+  
+  # is llap (hive server interactive) running in an HA configuration (more than 1)
+  # important as the zookeeper structure is different
+  ## hive_discovery_llap_ha = false
+  
+  # Shortcuts to finding LLAP znode Key
+  # Non-HA - hiveserver-interactive-site - hive.server2.zookeeper.namespace ex hive2 = /hive2
+  # HA-NonKerberized - <llap_app_name>_llap ex app name llap0 = /llap0_llap
+  # HA-Kerberized - <llap_app_name>_llap-sasl ex app name llap0 = /llap0_llap-sasl
+  ## hive_discovery_llap_znode = /hiveserver2-hive2
+  
+  # Whether to use Service Discovery for HiveServer2
+  ## hive_discovery_hs2 = true
+  
+  # Hiveserver2 is hive-site hive.server2.zookeeper.namespace ex hiveserver2 = /hiverserver2
+  ## hive_discovery_hiveserver2_znode = /hiveserver2
+  
+  # Applicable only for LLAP HA
+  # To keep the load on zookeeper to a minimum
+  # ---- we cache the LLAP activeEndpoint for the cache_timeout period
+  # ---- we cache the hiveserver2 endpoint for the length of session
+  # configurations to set the cache name, the cache path, and the time between zookeeper checks
+  ## caches_hive_discovery_key = hive_discovery
+  ## caches_hive_discovery_key_path = /tmp/hive_discovery_cache
+  ## cache_timeout = 60
+  
   # Host where Hive Metastore Server (HMS) is running.
   # If Kerberos security is enabled, the fully-qualified domain name (FQDN) is required.
   ## hive_metastore_host=localhost

+ 6 - 1
desktop/core/src/desktop/settings.py

@@ -37,7 +37,6 @@ from desktop.lib.python_util import force_dict_to_strings
 
 from aws.conf import is_enabled as is_s3_enabled
 
-
 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
 BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), '..', '..', '..'))
 
@@ -385,6 +384,12 @@ CACHES = {
         'LOCATION': 'unique-hue'
     }
 }
+CACHES_HIVE_DISCOVERY_KEY = 'hive_discovery'
+CACHES[CACHES_HIVE_DISCOVERY_KEY] = {
+        'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
+        'LOCATION': '/tmp/hive_discovery_cache',
+    }
+
 CACHES_CELERY_KEY = 'celery'
 if desktop.conf.TASK_SERVER.ENABLED.get():
   CACHES[CACHES_CELERY_KEY] = json.loads(desktop.conf.TASK_SERVER.EXECUTION_STORAGE.get())

+ 3 - 1
desktop/libs/notebook/src/notebook/connectors/hiveserver2.py

@@ -163,7 +163,7 @@ class HS2Api(Api):
 
   @query_error_handler
   def create_session(self, lang='hive', properties=None):
-    application = 'beeswax' if lang == 'hive' else lang
+    application = 'beeswax' if lang == 'hive' or lang =='llap' else lang
 
     session = Session.objects.get_session(self.user, application=application)
 
@@ -700,6 +700,8 @@ DROP TABLE IF EXISTS `%(table)s`;
       name = 'hive'
     elif snippet['type'] == 'impala':
       name = 'impala'
+    elif snippet['type'] == 'llap':
+      name = 'llap'
     elif self.interface == 'hms':
       name = 'hms'
     elif self.interface.startswith('hiveserver2-'):