فهرست منبع

HUE-8758 [connector] Adding check connection to livy dialect

Romain 5 سال پیش
والد
کامیت
911408940b

+ 22 - 15
apps/spark/src/spark/conf.py

@@ -34,13 +34,29 @@ LOG = logging.getLogger(__name__)
 LIVY_SERVER_URL = Config(
   key="livy_server_url",
   help=_t("The Livy Server URL."),
-  default="")
+  default=""
+)
+
+SECURITY_ENABLED = Config(
+  key="security_enabled",
+  help=_t("Whether Livy requires client to perform Kerberos authentication."),
+  default=False,
+  type=coerce_bool
+)
+
+CSRF_ENABLED = Config(
+  key="csrf_enabled",
+  help=_t("Whether Livy requres client to have CSRF enabled."),
+  default=False,
+  type=coerce_bool
+)
 
 # Deprecated
 LIVY_SERVER_HOST = Config(
   key="livy_server_host",
   help=_t("Host address of the Livy Server."),
-  default="localhost")
+  default="localhost"
+)
 
 # Deprecated
 LIVY_SERVER_PORT = Config(
@@ -48,22 +64,12 @@ LIVY_SERVER_PORT = Config(
   help=_t("Port of the Livy Server."),
   default="8998")
 
+# Deprecated
 LIVY_SERVER_SESSION_KIND = Config( # Note: this one is ignored by Livy, this should match the current Spark mode
    key="livy_server_session_kind",
    help=_t("Configure livy to start in local 'process' mode, or 'yarn' workers."),
-   default="yarn")
-
-SECURITY_ENABLED = Config(
-  key="security_enabled",
-  help=_t("Whether Livy requires client to perform Kerberos authentication."),
-  default=False,
-  type=coerce_bool)
-
-CSRF_ENABLED = Config(
-  key="csrf_enabled",
-  help=_t("Whether Livy requres client to have CSRF enabled."),
-  default=False,
-  type=coerce_bool)
+   default="yarn"
+)
 
 # Spark SQL
 SQL_SERVER_HOST = Config(
@@ -99,6 +105,7 @@ def get_livy_server_url():
     url = 'http://%s:%s' % (LIVY_SERVER_HOST.get(), LIVY_SERVER_PORT.get())
   return url
 
+
 def get_spark_status(user):
   from spark.livy_client import get_api
   status = None

+ 10 - 6
desktop/libs/notebook/src/notebook/connectors/base.py

@@ -436,10 +436,10 @@ def get_api(request, snippet):
     return OozieApi(user=request.user, request=request)
   elif interface == 'livy':
     from notebook.connectors.spark_shell import SparkApi
-    return SparkApi(request.user)
+    return SparkApi(request.user, interpreter=interpreter)
   elif interface == 'livy-batch':
     from notebook.connectors.spark_batch import SparkBatchApi
-    return SparkBatchApi(request.user)
+    return SparkBatchApi(request.user, interpreter=interpreter)
   elif interface == 'text' or interface == 'markdown':
     from notebook.connectors.text import TextApi
     return TextApi(request.user)
@@ -559,13 +559,17 @@ class Api(object):
   def get_jobs(self, notebook, snippet, logs):
     return []
 
-  def get_sample_data(self, snippet, database=None, table=None, column=None, is_async=False, operation=None): raise NotImplementedError()
+  def get_sample_data(self, snippet, database=None, table=None, column=None, is_async=False, operation=None):
+    raise NotImplementedError()
 
-  def export_data_as_hdfs_file(self, snippet, target_file, overwrite): raise NotImplementedError()
+  def export_data_as_hdfs_file(self, snippet, target_file, overwrite):
+    raise NotImplementedError()
 
-  def export_data_as_table(self, notebook, snippet, destination, is_temporary=False, location=None): raise NotImplementedError()
+  def export_data_as_table(self, notebook, snippet, destination, is_temporary=False, location=None):
+    raise NotImplementedError()
 
-  def export_large_data_to_hdfs(self, notebook, snippet, destination): raise NotImplementedError()
+  def export_large_data_to_hdfs(self, notebook, snippet, destination):
+    raise NotImplementedError()
 
   def statement_risk(self, interface, notebook, snippet):
     response = self._get_current_statement(notebook, snippet)

+ 15 - 0
desktop/libs/notebook/src/notebook/connectors/spark_shell.py

@@ -375,6 +375,21 @@ class SparkApi(Api):
     else:
       return self._get_standalone_jobs(logs)
 
+  def get_sample_data(self, snippet, database=None, table=None, column=None, is_async=False, operation=None):
+    if operation != 'hello':
+      raise NotImplementedError()
+
+    response = {}
+
+    api = get_spark_api(self.user)
+
+    api.get_status()
+
+    response['status'] = 0
+    response['rows'] = []
+
+    return response
+
   def _get_standalone_jobs(self, logs):
     job_ids = set([])