|
|
@@ -19,7 +19,9 @@ import logging
|
|
|
import re
|
|
|
import threading
|
|
|
import time
|
|
|
+import json
|
|
|
|
|
|
+from django.core.cache import caches
|
|
|
from django.urls import reverse
|
|
|
from django.utils.encoding import force_unicode
|
|
|
from django.utils.translation import ugettext as _
|
|
|
@@ -34,19 +36,25 @@ from indexer.file_format import HiveFormat
|
|
|
|
|
|
from beeswax import hive_site
|
|
|
from beeswax.conf import HIVE_SERVER_HOST, HIVE_SERVER_PORT, HIVE_METASTORE_HOST, HIVE_METASTORE_PORT, LIST_PARTITIONS_LIMIT, SERVER_CONN_TIMEOUT, \
|
|
|
- AUTH_USERNAME, AUTH_PASSWORD, APPLY_NATURAL_SORT_MAX, QUERY_PARTITIONS_LIMIT
|
|
|
+ AUTH_USERNAME, AUTH_PASSWORD, APPLY_NATURAL_SORT_MAX, QUERY_PARTITIONS_LIMIT, HIVE_DISCOVERY_HIVESERVER2_ZNODE, \
|
|
|
+ HIVE_DISCOVERY_HS2, HIVE_DISCOVERY_LLAP, HIVE_DISCOVERY_LLAP_HA, HIVE_DISCOVERY_LLAP_ZNODE, CACHE_TIMEOUT, \
|
|
|
+ LLAP_SERVER_HOST, LLAP_SERVER_PORT
|
|
|
from beeswax.common import apply_natural_sort
|
|
|
from beeswax.design import hql_query
|
|
|
from beeswax.hive_site import hiveserver2_use_ssl
|
|
|
from beeswax.models import QueryHistory, QUERY_TYPES
|
|
|
-
|
|
|
+from libzookeeper import conf as libzookeeper_conf
|
|
|
+from kazoo.client import KazooClient
|
|
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
DBMS_CACHE = {}
|
|
|
DBMS_CACHE_LOCK = threading.Lock()
|
|
|
-
|
|
|
+cache = caches['hive_discovery']
|
|
|
+#using file cache to make sure eventlet threads are uniform, this cache is persistent on startup
|
|
|
+#so we clear it to make sure the server resets hiveserver2 host
|
|
|
+cache.clear()
|
|
|
|
|
|
def get(user, query_server=None, cluster=None):
|
|
|
global DBMS_CACHE
|
|
|
@@ -83,7 +91,55 @@ def get_query_server_config(name='beeswax', server=None, cluster=None):
|
|
|
LOG.debug("Query cluster %s: %s" % (name, cluster))
|
|
|
|
|
|
cluster_config = get_cluster_config(cluster)
|
|
|
-
|
|
|
+ if name == "llap":
|
|
|
+ activeEndpoint = cache.get('llap')
|
|
|
+ if activeEndpoint is None:
|
|
|
+ if HIVE_DISCOVERY_LLAP.get():
|
|
|
+ LOG.debug("Checking zookeeper for Hive Server Interactive endpoint")
|
|
|
+ zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
|
|
|
+ zk.start()
|
|
|
+ if HIVE_DISCOVERY_LLAP_HA.get():
|
|
|
+ znode = "{0}/instances".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
|
|
|
+ LOG.debug("Setting up LLAP with the following node {0}".format(znode))
|
|
|
+ if zk.exists(znode):
|
|
|
+ hiveservers = zk.get_children(znode)
|
|
|
+ for server in hiveservers:
|
|
|
+ llap_servers= json.loads(zk.get("{0}/{1}".format(znode, server))[0])["internal"][0]
|
|
|
+ if llap_servers["api"] == "activeEndpoint":
|
|
|
+ cache.set("llap", json.dumps({"host": llap_servers["addresses"][0]["host"], "port": llap_servers["addresses"][0]["port"]}), CACHE_TIMEOUT.get())
|
|
|
+ else:
|
|
|
+ LOG.error("LLAP Endpoint not found, reverting to HiveServer2")
|
|
|
+ cache.set("llap", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}), CACHE_TIMEOUT.get())
|
|
|
+ else:
|
|
|
+ znode = "{0}".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
|
|
|
+ LOG.debug("Setting up LLAP with the following node {0}".format(znode))
|
|
|
+ if zk.exists(znode):
|
|
|
+ hiveservers = zk.get_children(znode)
|
|
|
+ for server in hiveservers:
|
|
|
+ cache.set("llap", json.dumps({"host": server.split(';')[0].split('=')[1].split(":")[0], "port": server.split(';')[0].split('=')[1].split(":")[1]}))
|
|
|
+ zk.stop()
|
|
|
+ else:
|
|
|
+ LOG.debug("Zookeeper Discovery not enabled, reverting to config values")
|
|
|
+ cache.set("llap", json.dumps({"host": LLAP_SERVER_HOST.get(), "port": LLAP_SERVER_PORT.get()}), CACHE_TIMEOUT.get())
|
|
|
+ activeEndpoint = json.loads(cache.get("llap"))
|
|
|
+ elif name != 'hms' and name != 'impala':
|
|
|
+ activeEndpoint = cache.get("hiveserver2")
|
|
|
+ if activeEndpoint is None:
|
|
|
+ if HIVE_DISCOVERY_HS2.get():
|
|
|
+ zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
|
|
|
+ zk.start()
|
|
|
+ znode = HIVE_DISCOVERY_HIVESERVER2_ZNODE.get()
|
|
|
+ LOG.info("Setting up Hive with the following node {0}".format(znode))
|
|
|
+ if zk.exists(znode):
|
|
|
+ hiveservers = zk.get_children(znode)
|
|
|
+ server_to_use = 0 # if CONF.HIVE_SPREAD.get() randint(0, len(hiveservers)-1) else 0
|
|
|
+ cache.set("hiveserver2", json.dumps({"host": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[0], "port": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[1]}))
|
|
|
+ else:
|
|
|
+ cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}))
|
|
|
+ zk.stop()
|
|
|
+ else:
|
|
|
+ cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}))
|
|
|
+ activeEndpoint = json.loads(cache.get("hiveserver2"))
|
|
|
if name == 'impala':
|
|
|
from impala.dbms import get_query_server_config as impala_query_server_config
|
|
|
query_server = impala_query_server_config(cluster_config=cluster_config)
|
|
|
@@ -102,20 +158,19 @@ def get_query_server_config(name='beeswax', server=None, cluster=None):
|
|
|
kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
|
|
|
query_server = {
|
|
|
'server_name': 'beeswax',
|
|
|
- 'server_host': HIVE_SERVER_HOST.get() if not cluster_config else cluster_config.get('server_host'),
|
|
|
- 'server_port': HIVE_SERVER_PORT.get(),
|
|
|
+ 'server_host': activeEndpoint["host"],
|
|
|
+ 'server_port': activeEndpoint["port"],
|
|
|
'principal': kerberos_principal,
|
|
|
'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
|
|
|
'protocol': 'https' if hiveserver2_use_ssl() else 'http',
|
|
|
- 'host': HIVE_SERVER_HOST.get(),
|
|
|
- 'port': hive_site.hiveserver2_thrift_http_port(),
|
|
|
+ 'host': activeEndpoint["host"],
|
|
|
+ 'port': activeEndpoint["port"],
|
|
|
'end_point': hive_site.hiveserver2_thrift_http_path()
|
|
|
- },
|
|
|
+ },
|
|
|
'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
|
|
|
'auth_username': AUTH_USERNAME.get(),
|
|
|
'auth_password': AUTH_PASSWORD.get()
|
|
|
- }
|
|
|
-
|
|
|
+ }
|
|
|
if name == 'sparksql': # Spark SQL is almost the same as Hive
|
|
|
from spark.conf import SQL_SERVER_HOST as SPARK_SERVER_HOST, SQL_SERVER_PORT as SPARK_SERVER_PORT
|
|
|
|