|
@@ -42,123 +42,18 @@ except ImportError as e:
|
|
|
LOG.exception('Spark is not enabled')
|
|
LOG.exception('Spark is not enabled')
|
|
|
|
|
|
|
|
|
|
|
|
|
-class SparkConfiguration(object):
|
|
|
|
|
-
|
|
|
|
|
- APP_NAME = 'spark'
|
|
|
|
|
-
|
|
|
|
|
- PROPERTIES = [
|
|
|
|
|
- {
|
|
|
|
|
- "name": "conf",
|
|
|
|
|
- "nice_name": _("Spark Conf"),
|
|
|
|
|
- "help_text": _("Add one or more Spark conf properties to the session."),
|
|
|
|
|
- "type": "settings",
|
|
|
|
|
- "is_yarn": False,
|
|
|
|
|
- "multiple": True,
|
|
|
|
|
- "defaultValue": [],
|
|
|
|
|
- "value": [],
|
|
|
|
|
- },
|
|
|
|
|
- {
|
|
|
|
|
- "name": "jars",
|
|
|
|
|
- "nice_name": _("Jars"),
|
|
|
|
|
- "help_text": _("Add one or more JAR files to the list of resources."),
|
|
|
|
|
- "type": "csv-hdfs-files",
|
|
|
|
|
- "is_yarn": False,
|
|
|
|
|
- "multiple": True,
|
|
|
|
|
- "defaultValue": [],
|
|
|
|
|
- "value": [],
|
|
|
|
|
- }, {
|
|
|
|
|
- "name": "files",
|
|
|
|
|
- "nice_name": _("Files"),
|
|
|
|
|
- "help_text": _("Files to be placed in the working directory of each executor."),
|
|
|
|
|
- "type": "csv-hdfs-files",
|
|
|
|
|
- "is_yarn": False,
|
|
|
|
|
- "multiple": True,
|
|
|
|
|
- "defaultValue": [],
|
|
|
|
|
- "value": [],
|
|
|
|
|
- }, {
|
|
|
|
|
- "name": "pyFiles",
|
|
|
|
|
- "nice_name": _("pyFiles"),
|
|
|
|
|
- "help_text": _("Python files to be placed in the working directory of each executor."),
|
|
|
|
|
- "type": "csv-hdfs-files",
|
|
|
|
|
- "is_yarn": False,
|
|
|
|
|
- "multiple": True,
|
|
|
|
|
- "defaultValue": [],
|
|
|
|
|
- "value": [],
|
|
|
|
|
- }, {
|
|
|
|
|
- "name": "driverMemory",
|
|
|
|
|
- "nice_name": _("Driver Memory"),
|
|
|
|
|
- "help_text": _("Amount of memory to use for the driver process in GB. (Default: 1). "),
|
|
|
|
|
- "type": "jvm",
|
|
|
|
|
- "is_yarn": False,
|
|
|
|
|
- "multiple": False,
|
|
|
|
|
- "defaultValue": '1G',
|
|
|
|
|
- "value": '1G',
|
|
|
|
|
- },
|
|
|
|
|
- # YARN-only properties
|
|
|
|
|
- {
|
|
|
|
|
- "name": "driverCores",
|
|
|
|
|
- "nice_name": _("Driver Cores"),
|
|
|
|
|
- "help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
|
|
|
|
|
- "type": "number",
|
|
|
|
|
- "is_yarn": True,
|
|
|
|
|
- "multiple": False,
|
|
|
|
|
- "defaultValue": 1,
|
|
|
|
|
- "value": 1,
|
|
|
|
|
- }, {
|
|
|
|
|
- "name": "numExecutors",
|
|
|
|
|
- "nice_name": _("numExecutors"),
|
|
|
|
|
- "help_text": _("Number of executors to launch for this session (Default: 2)"),
|
|
|
|
|
- "type": "number",
|
|
|
|
|
- "is_yarn": True,
|
|
|
|
|
- "multiple": False,
|
|
|
|
|
- "defaultValue": 2,
|
|
|
|
|
- "value": 2,
|
|
|
|
|
- }, {
|
|
|
|
|
- "name": "executorMemory",
|
|
|
|
|
- "nice_name": _("Executor Memory"),
|
|
|
|
|
- "help_text": _("Amount of memory to use per executor process in GB. (Default: 1)"),
|
|
|
|
|
- "type": "jvm",
|
|
|
|
|
- "is_yarn": True,
|
|
|
|
|
- "multiple": False,
|
|
|
|
|
- "defaultValue": '1G',
|
|
|
|
|
- "value": '1G',
|
|
|
|
|
- }, {
|
|
|
|
|
- "name": "executorCores",
|
|
|
|
|
- "nice_name": _("Executor Cores"),
|
|
|
|
|
- "help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
|
|
|
|
|
- "type": "number",
|
|
|
|
|
- "is_yarn": True,
|
|
|
|
|
- "multiple": False,
|
|
|
|
|
- "defaultValue": 1,
|
|
|
|
|
- "value": 1,
|
|
|
|
|
- }, {
|
|
|
|
|
- "name": "queue",
|
|
|
|
|
- "nice_name": _("Queue"),
|
|
|
|
|
- "help_text": _("The YARN queue to submit to, only in cluster mode (Default: default)"),
|
|
|
|
|
- "type": "string",
|
|
|
|
|
- "is_yarn": True,
|
|
|
|
|
- "multiple": False,
|
|
|
|
|
- "defaultValue": 'default',
|
|
|
|
|
- "value": 'default',
|
|
|
|
|
- }, {
|
|
|
|
|
- "name": "archives",
|
|
|
|
|
- "nice_name": _("Archives"),
|
|
|
|
|
- "help_text": _("Archives to be extracted into the working directory of each executor, only in cluster mode."),
|
|
|
|
|
- "type": "csv-hdfs-files",
|
|
|
|
|
- "is_yarn": True,
|
|
|
|
|
- "multiple": True,
|
|
|
|
|
- "defaultValue": [],
|
|
|
|
|
- "value": [],
|
|
|
|
|
- }
|
|
|
|
|
- ]
|
|
|
|
|
-
|
|
|
|
|
-
|
|
|
|
|
class SparkApi(Api):
|
|
class SparkApi(Api):
|
|
|
|
|
|
|
|
SPARK_UI_RE = re.compile("Started SparkUI at (http[s]?://([0-9a-zA-Z-_\.]+):(\d+))")
|
|
SPARK_UI_RE = re.compile("Started SparkUI at (http[s]?://([0-9a-zA-Z-_\.]+):(\d+))")
|
|
|
YARN_JOB_RE = re.compile("tracking URL: (http[s]?://.+/)")
|
|
YARN_JOB_RE = re.compile("tracking URL: (http[s]?://.+/)")
|
|
|
STANDALONE_JOB_RE = re.compile("Got job (\d+)")
|
|
STANDALONE_JOB_RE = re.compile("Got job (\d+)")
|
|
|
|
|
|
|
|
|
|
+ def __init__(self, user, interpreter):
|
|
|
|
|
+ super(SparkApi, self).__init__(user=user, interpreter=interpreter)
|
|
|
|
|
+
|
|
|
|
|
+ def get_api(self):
|
|
|
|
|
+ return get_spark_api(self.user, self.interpreter)
|
|
|
|
|
+
|
|
|
@staticmethod
|
|
@staticmethod
|
|
|
def get_livy_props(lang, properties=None):
|
|
def get_livy_props(lang, properties=None):
|
|
|
props = dict([(p['name'], p['value']) for p in SparkConfiguration.PROPERTIES])
|
|
props = dict([(p['name'], p['value']) for p in SparkConfiguration.PROPERTIES])
|
|
@@ -182,14 +77,14 @@ class SparkApi(Api):
|
|
|
LOG.debug("Check List type: {} was not a list".format(key))
|
|
LOG.debug("Check List type: {} was not a list".format(key))
|
|
|
_tmp = props[key]
|
|
_tmp = props[key]
|
|
|
props[key] = _tmp.split(",")
|
|
props[key] = _tmp.split(",")
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
# Convert the conf list to a dict for Livy
|
|
# Convert the conf list to a dict for Livy
|
|
|
LOG.debug("Property Spark Conf kvp list from UI is: " + str(props['conf']))
|
|
LOG.debug("Property Spark Conf kvp list from UI is: " + str(props['conf']))
|
|
|
props['conf'] = {conf.get('key'): conf.get('value') for i, conf in enumerate(props['conf'])}
|
|
props['conf'] = {conf.get('key'): conf.get('value') for i, conf in enumerate(props['conf'])}
|
|
|
LOG.debug("Property Spark Conf dictionary is: " + str(props['conf']))
|
|
LOG.debug("Property Spark Conf dictionary is: " + str(props['conf']))
|
|
|
-
|
|
|
|
|
- props['kind'] = lang
|
|
|
|
|
-
|
|
|
|
|
|
|
+
|
|
|
|
|
+ props['kind'] = 'sql' if lang == 'sparksql' else lang
|
|
|
|
|
+
|
|
|
return props
|
|
return props
|
|
|
|
|
|
|
|
@staticmethod
|
|
@staticmethod
|
|
@@ -226,7 +121,7 @@ class SparkApi(Api):
|
|
|
|
|
|
|
|
if status['state'] != 'idle':
|
|
if status['state'] != 'idle':
|
|
|
info = '\n'.join(status['log']) if status['log'] else 'timeout'
|
|
info = '\n'.join(status['log']) if status['log'] else 'timeout'
|
|
|
- raise QueryError(_('The Spark session could not be created in the cluster: %s') % info)
|
|
|
|
|
|
|
+ raise QueryError(_('The Spark session is %s and could not be created in the cluster: %s') % (status['state'], info))
|
|
|
|
|
|
|
|
return {
|
|
return {
|
|
|
'type': lang,
|
|
'type': lang,
|
|
@@ -235,7 +130,7 @@ class SparkApi(Api):
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
def execute(self, notebook, snippet):
|
|
def execute(self, notebook, snippet):
|
|
|
- api = get_spark_api(self.user)
|
|
|
|
|
|
|
+ api = self.get_api()
|
|
|
session = _get_snippet_session(notebook, snippet)
|
|
session = _get_snippet_session(notebook, snippet)
|
|
|
|
|
|
|
|
try:
|
|
try:
|
|
@@ -253,7 +148,7 @@ class SparkApi(Api):
|
|
|
raise e
|
|
raise e
|
|
|
|
|
|
|
|
def check_status(self, notebook, snippet):
|
|
def check_status(self, notebook, snippet):
|
|
|
- api = get_spark_api(self.user)
|
|
|
|
|
|
|
+ api = self.get_api()
|
|
|
session = _get_snippet_session(notebook, snippet)
|
|
session = _get_snippet_session(notebook, snippet)
|
|
|
cell = snippet['result']['handle']['id']
|
|
cell = snippet['result']['handle']['id']
|
|
|
|
|
|
|
@@ -270,7 +165,7 @@ class SparkApi(Api):
|
|
|
raise e
|
|
raise e
|
|
|
|
|
|
|
|
def fetch_result(self, notebook, snippet, rows, start_over):
|
|
def fetch_result(self, notebook, snippet, rows, start_over):
|
|
|
- api = get_spark_api(self.user)
|
|
|
|
|
|
|
+ api = self.get_api()
|
|
|
session = _get_snippet_session(notebook, snippet)
|
|
session = _get_snippet_session(notebook, snippet)
|
|
|
cell = snippet['result']['handle']['id']
|
|
cell = snippet['result']['handle']['id']
|
|
|
|
|
|
|
@@ -336,14 +231,14 @@ class SparkApi(Api):
|
|
|
raise QueryError(msg)
|
|
raise QueryError(msg)
|
|
|
|
|
|
|
|
def cancel(self, notebook, snippet):
|
|
def cancel(self, notebook, snippet):
|
|
|
- api = get_spark_api(self.user)
|
|
|
|
|
|
|
+ api = self.get_api()
|
|
|
session = _get_snippet_session(notebook, snippet)
|
|
session = _get_snippet_session(notebook, snippet)
|
|
|
response = api.cancel(session['id'])
|
|
response = api.cancel(session['id'])
|
|
|
|
|
|
|
|
return {'status': 0}
|
|
return {'status': 0}
|
|
|
|
|
|
|
|
def get_log(self, notebook, snippet, startFrom=0, size=None):
|
|
def get_log(self, notebook, snippet, startFrom=0, size=None):
|
|
|
- api = get_spark_api(self.user)
|
|
|
|
|
|
|
+ api = self.get_api()
|
|
|
session = _get_snippet_session(notebook, snippet)
|
|
session = _get_snippet_session(notebook, snippet)
|
|
|
|
|
|
|
|
return api.get_log(session['id'], startFrom=startFrom, size=size)
|
|
return api.get_log(session['id'], startFrom=startFrom, size=size)
|
|
@@ -352,7 +247,7 @@ class SparkApi(Api):
|
|
|
pass
|
|
pass
|
|
|
|
|
|
|
|
def close_session(self, session):
|
|
def close_session(self, session):
|
|
|
- api = get_spark_api(self.user)
|
|
|
|
|
|
|
+ api = self.get_api()
|
|
|
|
|
|
|
|
if session['id'] is not None:
|
|
if session['id'] is not None:
|
|
|
try:
|
|
try:
|
|
@@ -389,7 +284,7 @@ class SparkApi(Api):
|
|
|
|
|
|
|
|
response = {}
|
|
response = {}
|
|
|
|
|
|
|
|
- api = get_spark_api(self.user)
|
|
|
|
|
|
|
+ api = self.get_api()
|
|
|
|
|
|
|
|
api.get_status()
|
|
api.get_status()
|
|
|
|
|
|
|
@@ -439,3 +334,105 @@ class SparkApi(Api):
|
|
|
|
|
|
|
|
def _is_yarn_mode(self):
|
|
def _is_yarn_mode(self):
|
|
|
return LIVY_SERVER_SESSION_KIND.get() == "yarn"
|
|
return LIVY_SERVER_SESSION_KIND.get() == "yarn"
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+class SparkConfiguration(object):
|
|
|
|
|
+
|
|
|
|
|
+ APP_NAME = 'spark'
|
|
|
|
|
+
|
|
|
|
|
+ PROPERTIES = [
|
|
|
|
|
+ {
|
|
|
|
|
+ "name": "conf",
|
|
|
|
|
+ "nice_name": _("Spark Conf"),
|
|
|
|
|
+ "help_text": _("Add one or more Spark conf properties to the session."),
|
|
|
|
|
+ "type": "settings",
|
|
|
|
|
+ "is_yarn": False,
|
|
|
|
|
+ "multiple": True,
|
|
|
|
|
+ "defaultValue": [],
|
|
|
|
|
+ "value": [],
|
|
|
|
|
+ },
|
|
|
|
|
+ {
|
|
|
|
|
+ "name": "jars",
|
|
|
|
|
+ "nice_name": _("Jars"),
|
|
|
|
|
+ "help_text": _("Add one or more JAR files to the list of resources."),
|
|
|
|
|
+ "type": "csv-hdfs-files",
|
|
|
|
|
+ "is_yarn": False,
|
|
|
|
|
+ "multiple": True,
|
|
|
|
|
+ "defaultValue": [],
|
|
|
|
|
+ "value": [],
|
|
|
|
|
+ }, {
|
|
|
|
|
+ "name": "files",
|
|
|
|
|
+ "nice_name": _("Files"),
|
|
|
|
|
+ "help_text": _("Files to be placed in the working directory of each executor."),
|
|
|
|
|
+ "type": "csv-hdfs-files",
|
|
|
|
|
+ "is_yarn": False,
|
|
|
|
|
+ "multiple": True,
|
|
|
|
|
+ "defaultValue": [],
|
|
|
|
|
+ "value": [],
|
|
|
|
|
+ }, {
|
|
|
|
|
+ "name": "pyFiles",
|
|
|
|
|
+ "nice_name": _("pyFiles"),
|
|
|
|
|
+ "help_text": _("Python files to be placed in the working directory of each executor."),
|
|
|
|
|
+ "type": "csv-hdfs-files",
|
|
|
|
|
+ "is_yarn": False,
|
|
|
|
|
+ "multiple": True,
|
|
|
|
|
+ "defaultValue": [],
|
|
|
|
|
+ "value": [],
|
|
|
|
|
+ }, {
|
|
|
|
|
+ "name": "driverMemory",
|
|
|
|
|
+ "nice_name": _("Driver Memory"),
|
|
|
|
|
+ "help_text": _("Amount of memory to use for the driver process in GB. (Default: 1). "),
|
|
|
|
|
+ "type": "jvm",
|
|
|
|
|
+ "is_yarn": False,
|
|
|
|
|
+ "multiple": False,
|
|
|
|
|
+ "defaultValue": '1G',
|
|
|
|
|
+ "value": '1G',
|
|
|
|
|
+ },
|
|
|
|
|
+ # YARN-only properties
|
|
|
|
|
+ {
|
|
|
|
|
+ "name": "driverCores",
|
|
|
|
|
+ "nice_name": _("Driver Cores"),
|
|
|
|
|
+ "help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
|
|
|
|
|
+ "type": "number",
|
|
|
|
|
+ "is_yarn": True,
|
|
|
|
|
+ "multiple": False,
|
|
|
|
|
+ "defaultValue": 1,
|
|
|
|
|
+ "value": 1,
|
|
|
|
|
+ }, {
|
|
|
|
|
+ "name": "executorMemory",
|
|
|
|
|
+ "nice_name": _("Executor Memory"),
|
|
|
|
|
+ "help_text": _("Amount of memory to use per executor process in GB. (Default: 1)"),
|
|
|
|
|
+ "type": "jvm",
|
|
|
|
|
+ "is_yarn": True,
|
|
|
|
|
+ "multiple": False,
|
|
|
|
|
+ "defaultValue": '1G',
|
|
|
|
|
+ "value": '1G',
|
|
|
|
|
+ }, {
|
|
|
|
|
+ "name": "executorCores",
|
|
|
|
|
+ "nice_name": _("Executor Cores"),
|
|
|
|
|
+ "help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
|
|
|
|
|
+ "type": "number",
|
|
|
|
|
+ "is_yarn": True,
|
|
|
|
|
+ "multiple": False,
|
|
|
|
|
+ "defaultValue": 1,
|
|
|
|
|
+ "value": 1,
|
|
|
|
|
+ }, {
|
|
|
|
|
+ "name": "queue",
|
|
|
|
|
+ "nice_name": _("Queue"),
|
|
|
|
|
+ "help_text": _("The YARN queue to submit to, only in cluster mode (Default: default)"),
|
|
|
|
|
+ "type": "string",
|
|
|
|
|
+ "is_yarn": True,
|
|
|
|
|
+ "multiple": False,
|
|
|
|
|
+ "defaultValue": 'default',
|
|
|
|
|
+ "value": 'default',
|
|
|
|
|
+ }, {
|
|
|
|
|
+ "name": "archives",
|
|
|
|
|
+ "nice_name": _("Archives"),
|
|
|
|
|
+ "help_text": _("Archives to be extracted into the working directory of each executor, only in cluster mode."),
|
|
|
|
|
+ "type": "csv-hdfs-files",
|
|
|
|
|
+ "is_yarn": True,
|
|
|
|
|
+ "multiple": True,
|
|
|
|
|
+ "defaultValue": [],
|
|
|
|
|
+ "value": [],
|
|
|
|
|
+ }
|
|
|
|
|
+ ]
|