Browse Source

HUE-9277 [spark] Support connectors in Livy client

Romain 5 years ago
parent
commit
d8ae378ae0

+ 1 - 1
apps/jobbrowser/src/jobbrowser/templates/job_browser.mako

@@ -3926,7 +3926,7 @@ ${ commonheader("Job Browser", "jobbrowser", user, request) | n,unicode }
           return self.appConfig() && self.appConfig()['scheduler'] && self.appConfig()['scheduler']['interpreter_names'].indexOf('celery-beat') != -1;
         };
         var livyInterfaceCondition = function () {
-          return '${ is_mini }' == 'False' && self.appConfig() && self.appConfig()['editor'] && self.appConfig()['editor']['interpreter_names'].indexOf('pyspark') != -1 && (!self.cluster() || self.cluster()['type'].indexOf('altus') == -1);
+          return '${ is_mini }' == 'False' && self.appConfig() && self.appConfig()['editor'] && (self.appConfig()['editor']['interpreter_names'].indexOf('pyspark') != -1 || self.appConfig()['editor']['interpreter_names'].indexOf('sparksql') != -1);
         };
         var queryInterfaceCondition = function () {
           return '${ ENABLE_QUERY_BROWSER.get() }' == 'True' && self.appConfig() && self.appConfig()['editor'] && self.appConfig()['editor']['interpreter_names'].indexOf('impala') != -1 && (!self.cluster() || self.cluster()['type'].indexOf('altus') == -1);

+ 21 - 12
apps/spark/src/spark/livy_client.py

@@ -21,6 +21,7 @@ import json
 import posixpath
 import threading
 
+from desktop.conf import has_connectors
 from desktop.lib.rest.http_client import HttpClient
 from desktop.lib.rest.resource import Resource
 
@@ -39,17 +40,22 @@ API_CACHE = None
 API_CACHE_LOCK = threading.Lock()
 
 
-def get_api(user):
-  global API_CACHE
-  if API_CACHE is None:
-    API_CACHE_LOCK.acquire()
-    try:
-      if API_CACHE is None:
-        API_CACHE = LivyClient(get_livy_server_url())
-    finally:
-      API_CACHE_LOCK.release()
-  API_CACHE.setuser(user)
-  return API_CACHE
+def get_api(user, connector=None):
+  if connector is not None and connector.get('options'):
+    client = LivyClient(connector['options']['api_url'])
+    client.setuser(user)
+    return client
+  else:
+    global API_CACHE
+    if API_CACHE is None:
+      API_CACHE_LOCK.acquire()
+      try:
+        if API_CACHE is None:
+          API_CACHE = LivyClient(get_livy_server_url())
+      finally:
+        API_CACHE_LOCK.release()
+    API_CACHE.setuser(user)
+    return API_CACHE
 
 
 class LivyClient(object):
@@ -112,7 +118,10 @@ class LivyClient(object):
     return '\n'.join(response['log'])
 
   def create_session(self, **properties):
-    properties['proxyUser'] = self.user
+    properties['proxyUser'] = self.user.split('@')[0]
+    if has_connectors():  # Only SQL supported via connectors currently
+      properties['kind'] = 'sql'
+
     return self._root.post('sessions', data=json.dumps(properties), contenttype=_JSON_CONTENT_TYPE)
 
   def get_sessions(self):

+ 4 - 4
desktop/core/src/desktop/lib/connectors/types.py

@@ -174,7 +174,7 @@ CONNECTOR_TYPES = [
       {'name': 'ssh_server_host', 'value': '127.0.0.1'},
     ],
     'category': 'editor',
-    'description': '',
+    'description': 'Via Thrift server',
     'properties': {
       'is_sql': True,
       'sql_identifier_quote': '`',
@@ -192,8 +192,8 @@ CONNECTOR_TYPES = [
     }
   },
   {
-    'nice_name': "Livy",
-    'dialect': 'livy',
+    'nice_name': "SparkSQL",
+    'dialect': 'sparksql',
     'interface': 'livy',
     'settings': [
       {'name': 'api_url', 'value': 'http://localhost:8998'},
@@ -201,7 +201,7 @@ CONNECTOR_TYPES = [
       {'name': 'ssh_server_host', 'value': '127.0.0.1'},
     ],
     'category': 'editor',
-    'description': '',
+    'description': 'Via Livy server',
     'properties': {
       'is_sql': True,
       'sql_identifier_quote': '`',

+ 3 - 1
desktop/libs/dashboard/src/dashboard/models.py

@@ -962,9 +962,11 @@ def extract_solr_exception_message(e):
     message = json.loads(e.message)
     msg = message['error'].get('msg')
     response['error'] = msg if msg else message['error']['trace']
+  except ValueError:
+    LOG.warn('Failed to parse json response: %s' % force_unicode(e))
+    response['error'] = force_unicode(e)
   except Exception as e2:
     LOG.exception('Failed to extract json message: %s' % force_unicode(e2))
-    LOG.exception('Failed to parse json response: %s' % force_unicode(e))
     response['error'] = force_unicode(e)
 
   return response

+ 120 - 123
desktop/libs/notebook/src/notebook/connectors/spark_shell.py

@@ -42,123 +42,18 @@ except ImportError as e:
   LOG.exception('Spark is not enabled')
 
 
-class SparkConfiguration(object):
-
-  APP_NAME = 'spark'
-
-  PROPERTIES = [
-    {
-      "name": "conf",
-      "nice_name": _("Spark Conf"),
-      "help_text": _("Add one or more Spark conf properties to the session."),
-      "type": "settings",
-      "is_yarn": False,
-      "multiple": True,
-      "defaultValue": [],
-      "value": [],
-    },
-    {
-      "name": "jars",
-      "nice_name": _("Jars"),
-      "help_text": _("Add one or more JAR files to the list of resources."),
-      "type": "csv-hdfs-files",
-      "is_yarn": False,
-      "multiple": True,
-      "defaultValue": [],
-      "value": [],
-    }, {
-      "name": "files",
-      "nice_name": _("Files"),
-      "help_text": _("Files to be placed in the working directory of each executor."),
-      "type": "csv-hdfs-files",
-      "is_yarn": False,
-      "multiple": True,
-      "defaultValue": [],
-      "value": [],
-    }, {
-      "name": "pyFiles",
-      "nice_name": _("pyFiles"),
-      "help_text": _("Python files to be placed in the working directory of each executor."),
-      "type": "csv-hdfs-files",
-      "is_yarn": False,
-      "multiple": True,
-      "defaultValue": [],
-      "value": [],
-    }, {
-      "name": "driverMemory",
-      "nice_name": _("Driver Memory"),
-      "help_text": _("Amount of memory to use for the driver process in GB. (Default: 1). "),
-      "type": "jvm",
-      "is_yarn": False,
-      "multiple": False,
-      "defaultValue": '1G',
-      "value": '1G',
-    },
-    # YARN-only properties
-    {
-      "name": "driverCores",
-      "nice_name": _("Driver Cores"),
-      "help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
-      "type": "number",
-      "is_yarn": True,
-      "multiple": False,
-      "defaultValue": 1,
-      "value": 1,
-    }, {
-      "name": "numExecutors",
-      "nice_name": _("numExecutors"),
-      "help_text": _("Number of executors to launch for this session (Default: 2)"),
-      "type": "number",
-      "is_yarn": True,
-      "multiple": False,
-      "defaultValue": 2,
-      "value": 2,
-    }, {
-      "name": "executorMemory",
-      "nice_name": _("Executor Memory"),
-      "help_text": _("Amount of memory to use per executor process in GB. (Default: 1)"),
-      "type": "jvm",
-      "is_yarn": True,
-      "multiple": False,
-      "defaultValue": '1G',
-      "value": '1G',
-    }, {
-      "name": "executorCores",
-      "nice_name": _("Executor Cores"),
-      "help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
-      "type": "number",
-      "is_yarn": True,
-      "multiple": False,
-      "defaultValue": 1,
-      "value": 1,
-    }, {
-      "name": "queue",
-      "nice_name": _("Queue"),
-      "help_text": _("The YARN queue to submit to, only in cluster mode (Default: default)"),
-      "type": "string",
-      "is_yarn": True,
-      "multiple": False,
-      "defaultValue": 'default',
-      "value": 'default',
-    }, {
-      "name": "archives",
-      "nice_name": _("Archives"),
-      "help_text": _("Archives to be extracted into the working directory of each executor, only in cluster mode."),
-      "type": "csv-hdfs-files",
-      "is_yarn": True,
-      "multiple": True,
-      "defaultValue": [],
-      "value": [],
-    }
-  ]
-
-
 class SparkApi(Api):
 
   SPARK_UI_RE = re.compile("Started SparkUI at (http[s]?://([0-9a-zA-Z-_\.]+):(\d+))")
   YARN_JOB_RE = re.compile("tracking URL: (http[s]?://.+/)")
   STANDALONE_JOB_RE = re.compile("Got job (\d+)")
 
+  def __init__(self, user, interpreter):
+    super(SparkApi, self).__init__(user=user, interpreter=interpreter)
+
+  def get_api(self):
+    return get_spark_api(self.user, self.interpreter)
+
   @staticmethod
   def get_livy_props(lang, properties=None):
     props = dict([(p['name'], p['value']) for p in SparkConfiguration.PROPERTIES])
@@ -182,14 +77,14 @@ class SparkApi(Api):
       LOG.debug("Check List type: {} was not a list".format(key))
       _tmp = props[key]
       props[key] = _tmp.split(",")
-    
+
     # Convert the conf list to a dict for Livy
     LOG.debug("Property Spark Conf kvp list from UI is: " + str(props['conf']))
     props['conf'] = {conf.get('key'): conf.get('value') for i, conf in enumerate(props['conf'])}
     LOG.debug("Property Spark Conf dictionary is: " + str(props['conf']))
-    
-    props['kind'] = lang
-      
+
+    props['kind'] = 'sql' if lang == 'sparksql' else lang
+
     return props
 
   @staticmethod
@@ -226,7 +121,7 @@ class SparkApi(Api):
 
     if status['state'] != 'idle':
       info = '\n'.join(status['log']) if status['log'] else 'timeout'
-      raise QueryError(_('The Spark session could not be created in the cluster: %s') % info)
+      raise QueryError(_('The Spark session is %s and could not be created in the cluster: %s') % (status['state'], info))
 
     return {
         'type': lang,
@@ -235,7 +130,7 @@ class SparkApi(Api):
     }
 
   def execute(self, notebook, snippet):
-    api = get_spark_api(self.user)
+    api = self.get_api()
     session = _get_snippet_session(notebook, snippet)
 
     try:
@@ -253,7 +148,7 @@ class SparkApi(Api):
         raise e
 
   def check_status(self, notebook, snippet):
-    api = get_spark_api(self.user)
+    api = self.get_api()
     session = _get_snippet_session(notebook, snippet)
     cell = snippet['result']['handle']['id']
 
@@ -270,7 +165,7 @@ class SparkApi(Api):
         raise e
 
   def fetch_result(self, notebook, snippet, rows, start_over):
-    api = get_spark_api(self.user)
+    api = self.get_api()
     session = _get_snippet_session(notebook, snippet)
     cell = snippet['result']['handle']['id']
 
@@ -336,14 +231,14 @@ class SparkApi(Api):
       raise QueryError(msg)
 
   def cancel(self, notebook, snippet):
-    api = get_spark_api(self.user)
+    api = self.get_api()
     session = _get_snippet_session(notebook, snippet)
     response = api.cancel(session['id'])
 
     return {'status': 0}
 
   def get_log(self, notebook, snippet, startFrom=0, size=None):
-    api = get_spark_api(self.user)
+    api = self.get_api()
     session = _get_snippet_session(notebook, snippet)
 
     return api.get_log(session['id'], startFrom=startFrom, size=size)
@@ -352,7 +247,7 @@ class SparkApi(Api):
     pass
 
   def close_session(self, session):
-    api = get_spark_api(self.user)
+    api = self.get_api()
 
     if session['id'] is not None:
       try:
@@ -389,7 +284,7 @@ class SparkApi(Api):
 
     response = {}
 
-    api = get_spark_api(self.user)
+    api = self.get_api()
 
     api.get_status()
 
@@ -439,3 +334,105 @@ class SparkApi(Api):
 
   def _is_yarn_mode(self):
     return LIVY_SERVER_SESSION_KIND.get() == "yarn"
+
+
+class SparkConfiguration(object):
+
+  APP_NAME = 'spark'
+
+  PROPERTIES = [
+    {
+      "name": "conf",
+      "nice_name": _("Spark Conf"),
+      "help_text": _("Add one or more Spark conf properties to the session."),
+      "type": "settings",
+      "is_yarn": False,
+      "multiple": True,
+      "defaultValue": [],
+      "value": [],
+    },
+    {
+      "name": "jars",
+      "nice_name": _("Jars"),
+      "help_text": _("Add one or more JAR files to the list of resources."),
+      "type": "csv-hdfs-files",
+      "is_yarn": False,
+      "multiple": True,
+      "defaultValue": [],
+      "value": [],
+    }, {
+      "name": "files",
+      "nice_name": _("Files"),
+      "help_text": _("Files to be placed in the working directory of each executor."),
+      "type": "csv-hdfs-files",
+      "is_yarn": False,
+      "multiple": True,
+      "defaultValue": [],
+      "value": [],
+    }, {
+      "name": "pyFiles",
+      "nice_name": _("pyFiles"),
+      "help_text": _("Python files to be placed in the working directory of each executor."),
+      "type": "csv-hdfs-files",
+      "is_yarn": False,
+      "multiple": True,
+      "defaultValue": [],
+      "value": [],
+    }, {
+      "name": "driverMemory",
+      "nice_name": _("Driver Memory"),
+      "help_text": _("Amount of memory to use for the driver process in GB. (Default: 1). "),
+      "type": "jvm",
+      "is_yarn": False,
+      "multiple": False,
+      "defaultValue": '1G',
+      "value": '1G',
+    },
+    # YARN-only properties
+    {
+      "name": "driverCores",
+      "nice_name": _("Driver Cores"),
+      "help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
+      "type": "number",
+      "is_yarn": True,
+      "multiple": False,
+      "defaultValue": 1,
+      "value": 1,
+    }, {
+      "name": "executorMemory",
+      "nice_name": _("Executor Memory"),
+      "help_text": _("Amount of memory to use per executor process in GB. (Default: 1)"),
+      "type": "jvm",
+      "is_yarn": True,
+      "multiple": False,
+      "defaultValue": '1G',
+      "value": '1G',
+    }, {
+      "name": "executorCores",
+      "nice_name": _("Executor Cores"),
+      "help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
+      "type": "number",
+      "is_yarn": True,
+      "multiple": False,
+      "defaultValue": 1,
+      "value": 1,
+    }, {
+      "name": "queue",
+      "nice_name": _("Queue"),
+      "help_text": _("The YARN queue to submit to, only in cluster mode (Default: default)"),
+      "type": "string",
+      "is_yarn": True,
+      "multiple": False,
+      "defaultValue": 'default',
+      "value": 'default',
+    }, {
+      "name": "archives",
+      "nice_name": _("Archives"),
+      "help_text": _("Archives to be extracted into the working directory of each executor, only in cluster mode."),
+      "type": "csv-hdfs-files",
+      "is_yarn": True,
+      "multiple": True,
+      "defaultValue": [],
+      "value": [],
+    }
+  ]

+ 16 - 1
desktop/libs/notebook/src/notebook/connectors/spark_shell_tests.py

@@ -32,7 +32,22 @@ class TestSparkApi(object):
 
   def setUp(self):
     self.user = 'hue_test'
-    self.api = SparkApi(self.user)
+    self.interpreter = {
+        'name': 'livy',
+        'options': {
+          'api_url': 'http://gethue.com:8998'
+        },
+      }
+    self.api = SparkApi(self.user, self.interpreter)
+
+
+  def test_get_api(self):
+    lang = 'pyspark'
+    properties = None
+
+    # with patch('notebook.connectors.spark_shell.get_spark_api') as get_spark_api:
+    spark_api = self.api.get_api()
+    assert_equal(spark_api.__class__.__name__, 'LivyClient')
 
   def test_get_livy_props_method(self):
     test_properties = [{