|
|
@@ -15,19 +15,58 @@
|
|
|
# See the License for the specific language governing permissions and
|
|
|
# limitations under the License.
|
|
|
|
|
|
-from builtins import object
|
|
|
-import logging
|
|
|
import re
|
|
|
import sys
|
|
|
-import threading
|
|
|
-import time
|
|
|
import json
|
|
|
+import time
|
|
|
+import logging
|
|
|
+import threading
|
|
|
+from builtins import object
|
|
|
|
|
|
from django.core.cache import caches
|
|
|
from django.urls import reverse
|
|
|
+from django.utils.encoding import force_str
|
|
|
+from django.utils.translation import gettext as _
|
|
|
from kazoo.client import KazooClient
|
|
|
|
|
|
-from beeswax.models import Compute
|
|
|
+from azure.abfs import abfspath
|
|
|
+from beeswax.common import apply_natural_sort, is_compute
|
|
|
+from beeswax.conf import (
|
|
|
+ APPLY_NATURAL_SORT_MAX,
|
|
|
+ AUTH_PASSWORD,
|
|
|
+ AUTH_USERNAME,
|
|
|
+ CACHE_TIMEOUT,
|
|
|
+ CLOSE_SESSIONS,
|
|
|
+ HIVE_DISCOVERY_HIVESERVER2_ZNODE,
|
|
|
+ HIVE_DISCOVERY_HS2,
|
|
|
+ HIVE_DISCOVERY_LLAP,
|
|
|
+ HIVE_DISCOVERY_LLAP_HA,
|
|
|
+ HIVE_DISCOVERY_LLAP_ZNODE,
|
|
|
+ HIVE_HTTP_THRIFT_PORT,
|
|
|
+ HIVE_METASTORE_HOST,
|
|
|
+ HIVE_METASTORE_PORT,
|
|
|
+ HIVE_SERVER_HOST,
|
|
|
+ HIVE_SERVER_PORT,
|
|
|
+ LIST_PARTITIONS_LIMIT,
|
|
|
+ LLAP_SERVER_HOST,
|
|
|
+ LLAP_SERVER_PORT,
|
|
|
+ LLAP_SERVER_THRIFT_PORT,
|
|
|
+ MAX_NUMBER_OF_SESSIONS,
|
|
|
+ QUERY_PARTITIONS_LIMIT,
|
|
|
+ SERVER_CONN_TIMEOUT,
|
|
|
+ USE_SASL as HIVE_USE_SASL,
|
|
|
+ ZOOKEEPER_CONN_TIMEOUT,
|
|
|
+ has_session_pool,
|
|
|
+)
|
|
|
+from beeswax.design import hql_query
|
|
|
+from beeswax.hive_site import (
|
|
|
+ get_hiveserver2_kerberos_principal,
|
|
|
+ hiveserver2_impersonation_enabled,
|
|
|
+ hiveserver2_thrift_http_path,
|
|
|
+ hiveserver2_transport_mode,
|
|
|
+ hiveserver2_use_ssl,
|
|
|
+)
|
|
|
+from beeswax.models import QUERY_TYPES, Compute, QueryHistory
|
|
|
from desktop.conf import CLUSTER_ID, has_connectors
|
|
|
from desktop.lib.django_util import format_preserving_redirect
|
|
|
from desktop.lib.exceptions_renderable import PopupException
|
|
|
@@ -38,30 +77,6 @@ from desktop.settings import CACHES_HIVE_DISCOVERY_KEY
|
|
|
from indexer.file_format import HiveFormat
|
|
|
from libzookeeper import conf as libzookeeper_conf
|
|
|
|
|
|
-from azure.abfs import abfspath
|
|
|
-from beeswax.conf import HIVE_SERVER_HOST, HIVE_SERVER_PORT, HIVE_SERVER_HOST, HIVE_HTTP_THRIFT_PORT, HIVE_METASTORE_HOST, \
|
|
|
- HIVE_METASTORE_PORT, LIST_PARTITIONS_LIMIT, SERVER_CONN_TIMEOUT, ZOOKEEPER_CONN_TIMEOUT, \
|
|
|
- AUTH_USERNAME, AUTH_PASSWORD, APPLY_NATURAL_SORT_MAX, QUERY_PARTITIONS_LIMIT, HIVE_DISCOVERY_HIVESERVER2_ZNODE, \
|
|
|
- HIVE_DISCOVERY_HS2, HIVE_DISCOVERY_LLAP, HIVE_DISCOVERY_LLAP_HA, HIVE_DISCOVERY_LLAP_ZNODE, CACHE_TIMEOUT, \
|
|
|
- LLAP_SERVER_HOST, LLAP_SERVER_PORT, LLAP_SERVER_THRIFT_PORT, USE_SASL as HIVE_USE_SASL, CLOSE_SESSIONS, has_session_pool, \
|
|
|
- MAX_NUMBER_OF_SESSIONS
|
|
|
-from beeswax.common import apply_natural_sort, is_compute
|
|
|
-from beeswax.design import hql_query
|
|
|
-from beeswax.hive_site import hiveserver2_use_ssl, hiveserver2_impersonation_enabled, get_hiveserver2_kerberos_principal, \
|
|
|
- hiveserver2_transport_mode, hiveserver2_thrift_http_path
|
|
|
-from beeswax.models import QueryHistory, QUERY_TYPES
|
|
|
-
|
|
|
-
|
|
|
-if sys.version_info[0] > 2:
|
|
|
- from django.utils.encoding import force_str
|
|
|
-else:
|
|
|
- from django.utils.encoding import force_unicode as force_str
|
|
|
-
|
|
|
-if sys.version_info[0] > 2:
|
|
|
- from django.utils.translation import gettext as _
|
|
|
-else:
|
|
|
- from django.utils.translation import ugettext as _
|
|
|
-
|
|
|
LOG = logging.getLogger()
|
|
|
|
|
|
|
|
|
@@ -70,6 +85,7 @@ DBMS_CACHE = {}
|
|
|
DBMS_CACHE_LOCK = threading.Lock()
|
|
|
cache = caches[CACHES_HIVE_DISCOVERY_KEY]
|
|
|
|
|
|
+
|
|
|
# Using file cache to make sure eventlet threads are uniform, this cache is persistent on startup
|
|
|
# So we clear it to make sure the server resets hiveserver2 host.
|
|
|
def reset_ha():
|
|
|
@@ -316,7 +332,7 @@ def get_query_server_config_via_connector(connector):
|
|
|
connector_name = connector['type']
|
|
|
compute_name = compute['name']
|
|
|
if compute.get('id'):
|
|
|
- compute = Compute.objects.get(id=compute['id']).to_dict() #Reload the full compute from db
|
|
|
+ compute = Compute.objects.get(id=compute['id']).to_dict() # Reload the full compute from db
|
|
|
LOG.debug("Query cluster connector %s compute %s" % (connector_name, compute))
|
|
|
|
|
|
if compute['options'].get('has_ssh') == 'true':
|
|
|
@@ -384,7 +400,6 @@ class HiveServer2Dbms(object):
|
|
|
self.server_name = self.client.query_server.get('dialect') if self.client.query_server['server_name'].isdigit() \
|
|
|
else self.client.query_server['server_name']
|
|
|
|
|
|
-
|
|
|
@classmethod
|
|
|
def to_matching_wildcard(cls, identifier=None):
|
|
|
cleaned = "*"
|
|
|
@@ -392,7 +407,6 @@ class HiveServer2Dbms(object):
|
|
|
cleaned = "*%s*" % identifier.strip().strip("*")
|
|
|
return cleaned
|
|
|
|
|
|
-
|
|
|
def get_databases(self, database_names='*'):
|
|
|
if database_names != '*':
|
|
|
database_names = self.to_matching_wildcard(database_names)
|
|
|
@@ -404,11 +418,9 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return databases
|
|
|
|
|
|
-
|
|
|
def get_database(self, database):
|
|
|
return self.client.get_database(database)
|
|
|
|
|
|
-
|
|
|
def alter_database(self, database, properties):
|
|
|
hql = 'ALTER database `%s` SET DBPROPERTIES (' % database
|
|
|
hql += ', '.join(["'%s'='%s'" % (k, v) for k, v in list(properties.items())])
|
|
|
@@ -426,7 +438,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.client.get_database(database)
|
|
|
|
|
|
-
|
|
|
def get_tables_meta(self, database='default', table_names='*', table_types=None):
|
|
|
database = database.lower() # Impala is case sensitive
|
|
|
|
|
|
@@ -444,7 +455,6 @@ class HiveServer2Dbms(object):
|
|
|
tables = apply_natural_sort(tables, key='name')
|
|
|
return tables
|
|
|
|
|
|
-
|
|
|
def get_tables(self, database='default', table_names='*', table_types=None):
|
|
|
database = database.lower() # Impala is case sensitive
|
|
|
|
|
|
@@ -459,7 +469,6 @@ class HiveServer2Dbms(object):
|
|
|
tables = apply_natural_sort(tables)
|
|
|
return tables
|
|
|
|
|
|
-
|
|
|
def _get_tables_via_sparksql(self, database, table_names='*'):
|
|
|
hql = "SHOW TABLES IN %s" % database
|
|
|
if table_names != '*':
|
|
|
@@ -486,7 +495,6 @@ class HiveServer2Dbms(object):
|
|
|
else:
|
|
|
return []
|
|
|
|
|
|
-
|
|
|
def get_table(self, database, table_name):
|
|
|
try:
|
|
|
return self.client.get_table(database, table_name)
|
|
|
@@ -505,7 +513,6 @@ class HiveServer2Dbms(object):
|
|
|
else:
|
|
|
raise e
|
|
|
|
|
|
-
|
|
|
def alter_table(self, database, table_name, new_table_name=None, comment=None, tblproperties=None):
|
|
|
table_obj = self.get_table(database, table_name)
|
|
|
if table_obj is None:
|
|
|
@@ -536,7 +543,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.client.get_table(database, table_name)
|
|
|
|
|
|
-
|
|
|
def get_column(self, database, table_name, column_name):
|
|
|
table = self.get_table(database, table_name)
|
|
|
for col in table.cols:
|
|
|
@@ -544,7 +550,6 @@ class HiveServer2Dbms(object):
|
|
|
return col
|
|
|
return None
|
|
|
|
|
|
-
|
|
|
def alter_column(self, database, table_name, column_name, new_column_name, column_type, comment=None, partition_spec=None, cascade=False):
|
|
|
hql = 'ALTER TABLE `%s`.`%s`' % (database, table_name)
|
|
|
|
|
|
@@ -571,27 +576,23 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.get_column(database, table_name, new_column_name)
|
|
|
|
|
|
-
|
|
|
def execute_query(self, query, design):
|
|
|
return self.execute_and_watch(query, design=design)
|
|
|
|
|
|
-
|
|
|
def select_star_from(self, database, table, limit=1000):
|
|
|
if table.partition_keys: # Filter on max number of partitions for partitioned tables
|
|
|
- hql = self._get_sample_partition_query(database, table, limit=limit) # Currently need a limit
|
|
|
+ hql = self._get_sample_partition_query(database, table, limit=limit) # Currently need a limit
|
|
|
else:
|
|
|
hql = "SELECT * FROM `%s`.`%s` LIMIT %d;" % (database, table.name, limit)
|
|
|
return self.execute_statement(hql)
|
|
|
|
|
|
-
|
|
|
def get_select_star_query(self, database, table, limit=1000):
|
|
|
if table.partition_keys: # Filter on max number of partitions for partitioned tables
|
|
|
- hql = self._get_sample_partition_query(database, table, limit=limit) # Currently need a limit
|
|
|
+ hql = self._get_sample_partition_query(database, table, limit=limit) # Currently need a limit
|
|
|
else:
|
|
|
hql = "SELECT * FROM `%s`.`%s` LIMIT %d;" % (database, table.name, limit)
|
|
|
return hql
|
|
|
|
|
|
-
|
|
|
def execute_statement(self, hql):
|
|
|
if self.server_name.startswith('impala'):
|
|
|
query = hql_query(hql, QUERY_TYPES[1])
|
|
|
@@ -599,7 +600,6 @@ class HiveServer2Dbms(object):
|
|
|
query = hql_query(hql, QUERY_TYPES[0])
|
|
|
return self.execute_and_watch(query)
|
|
|
|
|
|
-
|
|
|
def fetch(self, query_handle, start_over=False, rows=None):
|
|
|
no_start_over_support = [
|
|
|
config_variable
|
|
|
@@ -611,15 +611,12 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.client.fetch(query_handle, start_over, rows)
|
|
|
|
|
|
-
|
|
|
def close_operation(self, query_handle):
|
|
|
return self.client.close_operation(query_handle)
|
|
|
|
|
|
-
|
|
|
def open_session(self, user):
|
|
|
return self.client.open_session(user)
|
|
|
|
|
|
-
|
|
|
def close_session(self, session):
|
|
|
resp = self.client.close_session(session)
|
|
|
|
|
|
@@ -633,14 +630,12 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return session
|
|
|
|
|
|
-
|
|
|
def cancel_operation(self, query_handle):
|
|
|
resp = self.client.cancel_operation(query_handle)
|
|
|
if self.client.query_server.get('dialect') == 'impala':
|
|
|
resp = self.client.close_operation(query_handle)
|
|
|
return resp
|
|
|
|
|
|
-
|
|
|
def get_sample(self, database, table, column=None, nested=None, limit=100, generate_sql_only=False, operation=None):
|
|
|
result = None
|
|
|
hql = None
|
|
|
@@ -666,15 +661,15 @@ class HiveServer2Dbms(object):
|
|
|
else:
|
|
|
hql = "SELECT * FROM `%s`.`%s` LIMIT %s;" % (database, table.name, limit)
|
|
|
else:
|
|
|
+ select_clause, from_clause = self.get_nested_select(database, table.name, column, nested)
|
|
|
if operation == 'distinct':
|
|
|
- hql = "SELECT DISTINCT %s FROM `%s`.`%s` LIMIT %s;" % (column, database, table.name, limit)
|
|
|
+ hql = 'SELECT DISTINCT %s FROM %s LIMIT %s;' % (select_clause, from_clause, limit)
|
|
|
elif operation == 'max':
|
|
|
- hql = "SELECT max(%s) FROM `%s`.`%s`;" % (column, database, table.name)
|
|
|
+ hql = 'SELECT max(%s) FROM %s;' % (select_clause, from_clause)
|
|
|
elif operation == 'min':
|
|
|
- hql = "SELECT min(%s) FROM `%s`.`%s`;" % (column, database, table.name)
|
|
|
+ hql = 'SELECT min(%s) FROM %s;' % (select_clause, from_clause)
|
|
|
else:
|
|
|
- hql = "SELECT %s FROM `%s`.`%s` LIMIT %s;" % (column, database, table.name, limit)
|
|
|
- # TODO: Add nested select support for HS2
|
|
|
+ hql = 'SELECT %s FROM %s LIMIT %s;' % (select_clause, from_clause, limit)
|
|
|
|
|
|
if hql:
|
|
|
if generate_sql_only:
|
|
|
@@ -689,6 +684,31 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return result
|
|
|
|
|
|
+ def get_nested_select(self, database, table, column, nested):
|
|
|
+ """
|
|
|
+ Given a column or nested type, return the corresponding SELECT and FROM clauses in Hive's nested-type syntax
|
|
|
+ """
|
|
|
+ select_clause = column
|
|
|
+ from_clause = f"{database}.{table}"
|
|
|
+
|
|
|
+ if nested:
|
|
|
+ nested_tokens = nested.strip('/').split('/')
|
|
|
+ for i, token in enumerate(nested_tokens):
|
|
|
+ if token == 'item':
|
|
|
+ from_clause += f" LATERAL VIEW explode({select_clause}) temp_table AS temp_item{i}"
|
|
|
+ select_clause = f"temp_item{i}"
|
|
|
+ elif token == 'key':
|
|
|
+ from_clause += f" LATERAL VIEW explode({select_clause}) temp_table AS temp_key{i}, temp_value{i}"
|
|
|
+ select_clause = f"temp_key{i}"
|
|
|
+ elif token == 'value':
|
|
|
+ from_clause += f" LATERAL VIEW explode({select_clause}) temp_table AS temp_key{i}, temp_value{i}"
|
|
|
+ select_clause = f"temp_value{i}"
|
|
|
+ else:
|
|
|
+ select_clause += f".{token}"
|
|
|
+ else:
|
|
|
+ select_clause = column
|
|
|
+
|
|
|
+ return select_clause, from_clause
|
|
|
|
|
|
def _get_sample_partition_query(self, database, table, column='*', limit=100, operation=None):
|
|
|
max_parts = QUERY_PARTITIONS_LIMIT.get()
|
|
|
@@ -714,7 +734,6 @@ class HiveServer2Dbms(object):
|
|
|
return prefix + " FROM `%(database)s`.`%(table)s` %(partition_clause)s LIMIT %(limit)s" % \
|
|
|
{'database': database, 'table': table.name, 'partition_clause': partition_clause, 'limit': limit}
|
|
|
|
|
|
-
|
|
|
def analyze_table(self, database, table):
|
|
|
if self.server_name.startswith('impala'):
|
|
|
hql = 'COMPUTE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
|
|
|
@@ -730,7 +749,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.execute_statement(hql)
|
|
|
|
|
|
-
|
|
|
def analyze_table_columns(self, database, table):
|
|
|
if self.server_name.startswith('impala'):
|
|
|
hql = 'COMPUTE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
|
|
|
@@ -743,7 +761,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.execute_statement(hql)
|
|
|
|
|
|
-
|
|
|
def get_table_stats(self, database, table):
|
|
|
stats = []
|
|
|
|
|
|
@@ -763,7 +780,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return stats
|
|
|
|
|
|
-
|
|
|
def get_table_columns_stats(self, database, table, column):
|
|
|
if self.server_name.startswith('impala'):
|
|
|
hql = 'SHOW COLUMN STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
|
|
|
@@ -779,7 +795,7 @@ class HiveServer2Dbms(object):
|
|
|
data = list(result.rows())
|
|
|
|
|
|
if self.server_name.startswith('impala'):
|
|
|
- if column == -1: # All the columns
|
|
|
+ if column == -1: # All the columns
|
|
|
return [self._extract_impala_column(col) for col in data]
|
|
|
else:
|
|
|
data = [col for col in data if col[0] == column][0]
|
|
|
@@ -823,7 +839,6 @@ class HiveServer2Dbms(object):
|
|
|
self.close(handle)
|
|
|
return result
|
|
|
|
|
|
-
|
|
|
def get_table_describe(self, database, table):
|
|
|
hql = 'DESCRIBE `%s`.`%s`' % (database, table)
|
|
|
|
|
|
@@ -835,7 +850,6 @@ class HiveServer2Dbms(object):
|
|
|
self.close(handle)
|
|
|
return result
|
|
|
|
|
|
-
|
|
|
def get_top_terms(self, database, table, column, limit=30, prefix=None):
|
|
|
limit = min(limit, 100)
|
|
|
prefix_match = ''
|
|
|
@@ -852,7 +866,7 @@ class HiveServer2Dbms(object):
|
|
|
}
|
|
|
|
|
|
query = hql_query(hql)
|
|
|
- handle = self.execute_and_wait(query, timeout_sec=60.0) # Hive is very slow
|
|
|
+ handle = self.execute_and_wait(query, timeout_sec=60.0) # Hive is very slow
|
|
|
|
|
|
if handle:
|
|
|
result = self.fetch(handle, rows=limit)
|
|
|
@@ -861,7 +875,6 @@ class HiveServer2Dbms(object):
|
|
|
else:
|
|
|
return []
|
|
|
|
|
|
-
|
|
|
def drop_table(self, database, table):
|
|
|
if table.is_view:
|
|
|
hql = "DROP VIEW `%s`.`%s`" % (database, table.name,)
|
|
|
@@ -870,11 +883,10 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.execute_statement(hql)
|
|
|
|
|
|
-
|
|
|
def load_data(self, database, table, form_data, design, generate_ddl_only=False):
|
|
|
hql = "LOAD DATA INPATH"
|
|
|
source_path = "%(path)s" % form_data
|
|
|
- if source_path.lower().startswith("abfs"): #this is to check if its using an ABFS path
|
|
|
+ if source_path.lower().startswith("abfs"): # this is to check if its using an ABFS path
|
|
|
source_path = abfspath(source_path)
|
|
|
hql += " '%s'" % source_path
|
|
|
if form_data['overwrite']:
|
|
|
@@ -896,7 +908,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.execute_query(query, design)
|
|
|
|
|
|
-
|
|
|
def drop_tables(self, database, tables, design, skip_trash=False, generate_ddl_only=False):
|
|
|
hql = []
|
|
|
|
|
|
@@ -918,11 +929,9 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.execute_query(query, design)
|
|
|
|
|
|
-
|
|
|
def drop_database(self, database):
|
|
|
return self.execute_statement("DROP DATABASE `%s`" % database)
|
|
|
|
|
|
-
|
|
|
def drop_databases(self, databases, design, generate_ddl_only=False):
|
|
|
hql = []
|
|
|
|
|
|
@@ -953,7 +962,6 @@ class HiveServer2Dbms(object):
|
|
|
hql = "INSERT OVERWRITE DIRECTORY '%s' %s" % (target_dir, query)
|
|
|
return self.execute_statement(hql)
|
|
|
|
|
|
-
|
|
|
def create_table_as_a_select(self, request, query_history, target_database, target_table, result_meta):
|
|
|
design = query_history.design.get_design()
|
|
|
database = design.query['database']
|
|
|
@@ -984,7 +992,7 @@ class HiveServer2Dbms(object):
|
|
|
if not delim.isdigit():
|
|
|
delim = str(ord(delim))
|
|
|
|
|
|
- hql = '''
|
|
|
+ hql = r'''
|
|
|
CREATE TABLE `%s` (
|
|
|
%s
|
|
|
)
|
|
|
@@ -1016,24 +1024,19 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return query_history
|
|
|
|
|
|
-
|
|
|
def use(self, database, session=None):
|
|
|
query = hql_query('USE `%s`' % database)
|
|
|
return self.client.use(query, session=session)
|
|
|
|
|
|
-
|
|
|
def get_log(self, query_handle, start_over=True):
|
|
|
return self.client.get_log(query_handle, start_over)
|
|
|
|
|
|
-
|
|
|
def get_state(self, handle):
|
|
|
return self.client.get_state(handle)
|
|
|
|
|
|
-
|
|
|
def get_operation_status(self, handle):
|
|
|
return self.client.get_operation_status(handle)
|
|
|
|
|
|
-
|
|
|
def execute_and_wait(self, query, timeout_sec=30.0, sleep_interval=0.5):
|
|
|
"""
|
|
|
Run query and check status until it finishes or timeouts.
|
|
|
@@ -1064,7 +1067,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
raise QueryServerTimeoutException(message=msg)
|
|
|
|
|
|
-
|
|
|
def execute_next_statement(self, query_history, hql_query):
|
|
|
if query_history.is_success() or query_history.is_expired():
|
|
|
# We need to go to the next statement only if the previous one passed
|
|
|
@@ -1084,7 +1086,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.execute_and_watch(query, query_history=query_history)
|
|
|
|
|
|
-
|
|
|
def execute_and_watch(self, query, design=None, query_history=None):
|
|
|
"""
|
|
|
Run query and return a QueryHistory object in order to see its progress on a Web page.
|
|
|
@@ -1140,24 +1141,20 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return query_history
|
|
|
|
|
|
-
|
|
|
def get_results_metadata(self, handle):
|
|
|
return self.client.get_results_metadata(handle)
|
|
|
|
|
|
-
|
|
|
def close(self, handle):
|
|
|
return self.client.close(handle)
|
|
|
|
|
|
-
|
|
|
def get_partitions(self, db_name, table, partition_spec=None, max_parts=None, reverse_sort=True):
|
|
|
if max_parts is None or max_parts > LIST_PARTITIONS_LIMIT.get():
|
|
|
max_parts = LIST_PARTITIONS_LIMIT.get()
|
|
|
|
|
|
return self.client.get_partitions(db_name, table.name, partition_spec, max_parts=max_parts, reverse_sort=reverse_sort)
|
|
|
|
|
|
-
|
|
|
def get_partition(self, db_name, table_name, partition_spec, generate_ddl_only=False):
|
|
|
- if partition_spec and self.server_name.startswith('impala'): # partition_spec not supported
|
|
|
+ if partition_spec and self.server_name.startswith('impala'): # partition_spec not supported
|
|
|
partition_query = " AND ".join(partition_spec.split(','))
|
|
|
else:
|
|
|
table = self.get_table(db_name, table_name)
|
|
|
@@ -1176,11 +1173,9 @@ class HiveServer2Dbms(object):
|
|
|
else:
|
|
|
return self.execute_statement(hql)
|
|
|
|
|
|
-
|
|
|
def describe_partition(self, db_name, table_name, partition_spec):
|
|
|
return self.client.get_table(db_name, table_name, partition_spec=partition_spec)
|
|
|
|
|
|
-
|
|
|
def drop_partitions(self, db_name, table_name, partition_specs, design=None, generate_ddl_only=False):
|
|
|
hql = []
|
|
|
|
|
|
@@ -1198,7 +1193,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return self.execute_query(query, design)
|
|
|
|
|
|
-
|
|
|
def get_indexes(self, db_name, table_name):
|
|
|
hql = 'SHOW FORMATTED INDEXES ON `%(table)s` IN `%(database)s`' % {'table': table_name, 'database': db_name}
|
|
|
|
|
|
@@ -1211,11 +1205,9 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return result
|
|
|
|
|
|
-
|
|
|
def get_configuration(self):
|
|
|
return self.client.get_configuration()
|
|
|
|
|
|
-
|
|
|
def get_functions(self, prefix=None, database=None):
|
|
|
'''
|
|
|
Not using self.client.get_functions() as pretty limited. More comments there.
|
|
|
@@ -1242,7 +1234,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return rows
|
|
|
|
|
|
-
|
|
|
def get_function(self, name):
|
|
|
hql = 'DESCRIBE FUNCTION EXTENDED `%(name)s`' % {
|
|
|
'name': name,
|
|
|
@@ -1257,7 +1248,6 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return rows
|
|
|
|
|
|
-
|
|
|
def get_query_metadata(self, query):
|
|
|
hql = 'SELECT * FROM ( %(query)s ) t LIMIT 0' % {'query': query.strip(';')}
|
|
|
|
|
|
@@ -1270,11 +1260,9 @@ class HiveServer2Dbms(object):
|
|
|
|
|
|
return result
|
|
|
|
|
|
-
|
|
|
def explain(self, query):
|
|
|
return self.client.explain(query)
|
|
|
|
|
|
-
|
|
|
def get_primary_keys(self, database_name, table_name, catalog_name=None):
|
|
|
|
|
|
return self.client.get_primary_keys(
|
|
|
@@ -1283,7 +1271,6 @@ class HiveServer2Dbms(object):
|
|
|
catalog_name=catalog_name
|
|
|
)
|
|
|
|
|
|
-
|
|
|
def get_foreign_keys(self, parent_catalog_name=None, parent_database_name=None, parent_table_name=None, foreign_catalog_name=None,
|
|
|
foreign_database_name=None, foreign_table_name=None):
|
|
|
|
|
|
@@ -1296,11 +1283,9 @@ class HiveServer2Dbms(object):
|
|
|
foreign_table_name=foreign_table_name
|
|
|
)
|
|
|
|
|
|
-
|
|
|
def get_status(self):
|
|
|
return self.client.getStatus()
|
|
|
|
|
|
-
|
|
|
def get_default_configuration(self, include_hadoop):
|
|
|
return self.client.get_default_configuration(include_hadoop)
|
|
|
|
|
|
@@ -1331,7 +1316,7 @@ class SubQueryTable(object):
|
|
|
self.name = 'Test'
|
|
|
cols = db.get_query_metadata(query).data_table.cols()
|
|
|
for col in cols:
|
|
|
- col.name = re.sub('^t\.', '', col.name)
|
|
|
+ col.name = re.sub(r'^t\.', '', col.name)
|
|
|
col.type = HiveFormat.FIELD_TYPE_TRANSLATE.get(col.type, 'string')
|
|
|
self.cols = cols
|
|
|
self.hdfs_link = None
|