Răsfoiți Sursa

[core] Moved namespaces and computes to database

The existing implementation was connecting to external Altus and DWX
api's to fetch the available computes. This commit changes that by
moving the cluster config to database. Two new tables have been added
(a) `beeswax_namespace` to hold config for a namespace/dialect
(b) `beeswax_compute` to hold configs for individual compute clusters
linked to the namespaces.
This change currently support hive and impala clusters.

There is also a service-discovery component that keeps the list of
namespaces and computes updated in the corresponding tables.
`sync_warehouses.py` performs the service discovery in the CDW environ
by talking to kubernetes api's. It creates one Hive and one Impala
namespace and one compute for each virtual warehouse. The command is
supposed to run every minute and keep the list of warehouses updated.

There are related changes and fixes to the rest of the code to support
the query execution on different computes.

Change-Id: Ifd8dbc8d716dfe2000fbfa8121e39f2610051fa1
Amit Srivastava 2 ani în urmă
părinte
comite
283676c5f9

+ 52 - 0
apps/beeswax/src/beeswax/migrations/0003_compute_namespace.py

@@ -0,0 +1,52 @@
+# Generated by Django 3.2.16 on 2023-09-07 14:30
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('beeswax', '0002_auto_20200320_0746'),
+    ]
+
+    operations = [
+        migrations.CreateModel(
+            name='Namespace',
+            fields=[
+                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('name', models.CharField(default='', max_length=255)),
+                ('description', models.TextField(default='')),
+                ('dialect', models.CharField(db_index=True, help_text='Type of namespace, e.g. hive, mysql... ', max_length=32)),
+                ('interface', models.CharField(db_index=True, default='sqlalchemy', help_text='Type of interface, e.g. sqlalchemy, hiveserver2... ', max_length=32)),
+                ('external_id', models.CharField(db_index=True, max_length=255, null=True)),
+                ('last_modified', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Time last modified')),
+            ],
+            options={
+                'verbose_name': 'namespace',
+                'verbose_name_plural': 'namespaces',
+                'unique_together': {('name',)},
+            },
+        ),
+        migrations.CreateModel(
+            name='Compute',
+            fields=[
+                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('name', models.CharField(default='', max_length=255)),
+                ('description', models.TextField(default='')),
+                ('dialect', models.CharField(db_index=True, help_text='Type of compute, e.g. hive, impala... ', max_length=32)),
+                ('interface', models.CharField(db_index=True, default='sqlalchemy', help_text='Type of interface, e.g. sqlalchemy, hiveserver2... ', max_length=32)),
+                ('is_ready', models.BooleanField(default=True)),
+                ('external_id', models.CharField(db_index=True, max_length=255, null=True)),
+                ('ldap_groups_json', models.TextField(default='[]')),
+                ('settings', models.TextField(default='{}')),
+                ('last_modified', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Time last modified')),
+                ('namespace', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='beeswax.namespace')),
+            ],
+            options={
+                'verbose_name': 'compute',
+                'verbose_name_plural': 'computes',
+                'unique_together': {('name',)},
+            },
+        ),
+    ]

+ 105 - 1
apps/beeswax/src/beeswax/models.py

@@ -35,7 +35,7 @@ from desktop.lib.exceptions_renderable import PopupException
 from desktop.models import Document, Document2
 from desktop.redaction import global_redaction_engine
 from librdbms.server import dbms as librdbms_dbms
-from useradmin.models import User
+from useradmin.models import User, UserProfile
 
 from beeswax.design import HQLdesign
 
@@ -607,3 +607,107 @@ class MetaInstall(models.Model):
       return MetaInstall.objects.get(id=1)
     except MetaInstall.DoesNotExist:
       return MetaInstall(id=1)
+
+class Namespace(models.Model):
+  name = models.CharField(default='', max_length=255)
+  description = models.TextField(default='')
+  dialect = models.CharField(max_length=32, db_index=True, help_text=_t('Type of namespace, e.g. hive, mysql... '))
+  interface = models.CharField(
+    max_length=32,
+    db_index=True,
+    help_text=_t('Type of interface, e.g. sqlalchemy, hiveserver2... '),
+    default='sqlalchemy'
+  )
+  external_id = models.CharField(max_length=255, null=True, db_index=True)
+  last_modified = models.DateTimeField(auto_now=True, db_index=True, verbose_name=_t('Time last modified'))
+
+  class Meta:
+    verbose_name = _t('namespace')
+    verbose_name_plural = _t('namespaces')
+    unique_together = ('name',)
+
+  def get_computes(self, user):
+    """Returns the computes belonging to the current namespace that are available to the current user."""
+    if user is None:
+      return []
+    profile = UserProfile.objects.get(user=user)
+    user_groups = set(profile.data.get("saml_attributes", {}).get("groups", []))
+
+    computes = Compute.objects.filter(namespace=self)
+    computes = [co.to_dict() for co in computes if not co.ldap_groups or co.ldap_groups.intersection(user_groups)]
+    computes.sort(key=lambda c: (not c['is_ready'], c['name']))
+    return computes
+
+  def __str__(self):
+    return '%s (%s)' % (self.name, self.dialect)
+
+  def to_dict(self):
+    return {
+      'id': self.id,
+      'type': str(self.id),
+      'name': self.name,
+      'description': self.description,
+      'dialect': self.dialect,
+      'interface': self.interface,
+      'external_id': self.external_id,
+      'last_modified': self.last_modified
+    }
+
+class Compute(models.Model):
+  """
+  Instance of a compute type pointing to a Hive or Impala compute resources.
+  """
+  name = models.CharField(default='', max_length=255)
+  description = models.TextField(default='')
+  dialect = models.CharField(max_length=32, db_index=True, help_text=_t('Type of compute, e.g. hive, impala... '))
+  interface = models.CharField(
+      max_length=32,
+      db_index=True,
+      help_text=_t('Type of interface, e.g. sqlalchemy, hiveserver2... '),
+      default='sqlalchemy'
+  )
+  namespace = models.ForeignKey(Namespace, on_delete=models.CASCADE, null=True)
+  is_ready = models.BooleanField(default=True)
+  external_id = models.CharField(max_length=255, null=True, db_index=True)
+  ldap_groups_json = models.TextField(default='[]')
+  settings = models.TextField(default='{}')
+  last_modified = models.DateTimeField(auto_now=True, db_index=True, verbose_name=_t('Time last modified'))
+
+  class Meta:
+    verbose_name = _t('compute')
+    verbose_name_plural = _t('computes')
+    unique_together = ('name',)
+
+  def __str__(self):
+    return '%s (%s)' % (self.name, self.dialect)
+
+  def to_dict(self):
+    return {
+      'id': self.id,
+      'type': self.dialect + '-compute',
+      'name': self.name,
+      'namespace': self.namespace.name,
+      'description': self.description,
+      'dialect': self.dialect,
+      'interface': self.interface,
+      'is_ready': self.is_ready,
+      'options': self.options,
+      'external_id': self.external_id,
+      'last_modified': self.last_modified
+    }
+
+  @property
+  def ldap_groups(self):
+    if not self.ldap_groups_json:
+      self.ldap_groups_json = json.dumps([])
+    return set(json.loads(self.ldap_groups_json))
+
+  @ldap_groups.setter
+  def ldap_groups(self, val):
+    self.ldap_groups_json = json.dumps(list(val or []))
+
+  @property
+  def options(self):
+    if not self.settings:
+      self.settings = json.dumps([])
+    return {setting['name']: setting['value'] for setting in json.loads(self.settings)}

+ 21 - 18
apps/beeswax/src/beeswax/server/dbms.py

@@ -27,6 +27,7 @@ from django.core.cache import caches
 from django.urls import reverse
 from kazoo.client import KazooClient
 
+from beeswax.models import Compute
 from desktop.conf import CLUSTER_ID, has_connectors
 from desktop.lib.django_util import format_preserving_redirect
 from desktop.lib.exceptions_renderable import PopupException
@@ -142,8 +143,9 @@ def get(user, query_server=None, cluster=None):
 
 
 def get_query_server_config(name='beeswax', connector=None):
-  if connector and has_connectors(): # TODO: Give empty connector when no connector in use
-    LOG.debug("Query via connector %s" % name)
+  if connector and (has_connectors() or connector.get('compute')
+                    or connector.get('type') in ('hive-compute', 'impala-compute')):
+    LOG.debug("Query via connector %s (%s)" % (name, connector.get('type')))
     query_server = get_query_server_config_via_connector(connector)
   else:
     LOG.debug("Query via ini %s" % name)
@@ -306,28 +308,27 @@ def get_query_server_config(name='beeswax', connector=None):
 
 def get_query_server_config_via_connector(connector):
   # TODO: connector is actually a notebook interpreter
-  connector_name = full_connector_name = connector['type']
-  compute_name = None
-  if connector.get('compute'):
-    compute_name = connector['compute']['name']
-    full_connector_name = '%s-%s' % (connector_name, compute_name)
-  LOG.debug("Query cluster connector %s compute %s" % (connector_name, compute_name))
-
-  if connector['options'].get('has_ssh') == 'true':
+  compute = connector.get('compute', connector)
+  connector_name = connector['type']
+  compute_name = compute['name']
+  if compute.get('id'):
+    compute = Compute.objects.get(id=compute['id']).to_dict() #Reload the full compute from db
+  LOG.debug("Query cluster connector %s compute %s" % (connector_name, compute))
+
+  if compute['options'].get('has_ssh') == 'true':
     server_host = '127.0.0.1'
-    server_port = connector['options']['server_port']
   else:
-    server_host = (connector['compute']['options'] if 'compute' in connector else connector['options'])['server_host']
-    server_port = int((connector['compute']['options'] if 'compute' in connector else connector['options'])['server_port'])
+    server_host = compute['options']['server_host']
+  server_port = int(compute['options']['server_port'])
 
-  if 'impersonation_enabled' in connector['options']:
-    impersonation_enabled = connector['options']['impersonation_enabled'] == 'true'
+  if 'impersonation_enabled' in compute['options']:
+    impersonation_enabled = bool(compute['options']['impersonation_enabled'])
   else:
     impersonation_enabled = hiveserver2_impersonation_enabled()
 
   return {
-      'dialect': connector['dialect'],
-      'server_name': full_connector_name,
+      'dialect': compute['dialect'],
+      'server_name': compute_name,
       'server_host': server_host,
       'server_port': server_port,
       'principal': 'TODO',
@@ -335,10 +336,12 @@ def get_query_server_config_via_connector(connector):
       'auth_password': AUTH_PASSWORD.get(),
 
       'impersonation_enabled': impersonation_enabled,
-      'use_sasl': connector['options'].get('use_sasl', 'true') == 'true',
+      'use_sasl': str(compute['options'].get('use_sasl', True)).upper() == 'TRUE',
       'SESSION_TIMEOUT_S': 15 * 60,
       'querycache_rows': 1000,
       'QUERY_TIMEOUT_S': 15 * 60,
+      'transport_mode': compute['options'].get('transport_mode', 'http'),
+      'http_url': compute['options'].get('http_url', 'http://%s:%s/cliservice' % (server_host, server_port)),
   }
 
 

+ 5 - 2
apps/beeswax/src/beeswax/server/hive_server2_lib.py

@@ -667,7 +667,10 @@ class HiveServerClient(object):
         'configuration': {},
     }
     connector_type = 'hive' if self.query_server['server_name'] == 'beeswax' else self.query_server['server_name']
-    interpreter = get_interpreter(connector_type=connector_type, user=self.user)
+    interpreter_dialect = self.query_server['dialect']
+    if not interpreter_dialect:
+      interpreter = get_interpreter(connector_type=connector_type, user=self.user)
+      interpreter_dialect = interpreter.get('dialect')
 
     if self.impersonation_enabled:
       kwargs.update({'username': DEFAULT_USER})
@@ -684,7 +687,7 @@ class HiveServerClient(object):
       if csrf_header and ENABLE_X_CSRF_TOKEN_FOR_HIVE_IMPALA.get():
         kwargs['configuration'].update({'X-CSRF-TOKEN': csrf_header})
 
-    if self.query_server['server_name'] == 'hplsql' or interpreter['dialect'] == 'hplsql': # All the time
+    if self.query_server['server_name'] == 'hplsql' or interpreter_dialect == 'hplsql': # All the time
       kwargs['configuration'].update({'hive.server2.proxy.user': user.username, 'set:hivevar:mode': 'HPLSQL'})
 
     if self.query_server['server_name'] == 'llap': # All the time

+ 2 - 0
apps/beeswax/src/beeswax/server/hive_server2_lib_tests.py

@@ -56,6 +56,8 @@ class TestHiveServerClient():
         'use_sasl': True,
         'server_host': 'localhost',
         'server_port': 10000,
+        'dialect': 'hive',
+        'interface': 'hiveserver2'
     }
 
   def test_open_session(self):

+ 3 - 1
apps/impala/src/impala/dbms_tests.py

@@ -48,8 +48,10 @@ class TestDbms():
 
   def test_get_connector_config(self):
     connector = {
-      'type': 'impala-1',
+      'type': 'impala-compute',
+      'name': 'impala-1',
       'dialect': 'impala',
+      'interface': 'hiveserver2',
       'options': {'server_host': 'gethue.com', 'server_port': 10000}
     }
 

+ 8 - 4
apps/jobbrowser/src/jobbrowser/apis/query_api.py

@@ -42,18 +42,22 @@ ANALYZER = rules.TopDownAnalysis() # We need to parse some files so save as glob
 LOG = logging.getLogger()
 
 try:
-  from beeswax.models import Session
+  from beeswax.models import Session, Compute
   from impala.server import get_api as get_impalad_api, _get_impala_server_url
 except ImportError as e:
   LOG.exception('Some application are not enabled: %s' % e)
 
 
 def _get_api(user, cluster=None):
-  if cluster and cluster.get('type') == 'altus-dw':
-    server_url = 'http://impala-coordinator-%(name)s:25000' % cluster
+  compute = cluster['compute'] if cluster.get('compute') else cluster
+  if compute and compute.get('type') == 'impala-compute':
+    if compute.get('id') and not (compute.get('options') and compute['options'].get('http_url')):
+      compute = Compute.objects.get(id=compute['id']).to_dict()  # Reload the full compute from db
+    if compute.get('options') and compute['options'].get('api_url'):
+      server_url = compute['options'].get('api_url')
   else:
     # TODO: multi computes if snippet.get('compute') or snippet['type'] has computes
-    application = cluster.get('interface', 'impala')
+    application = cluster['compute']['type'] if cluster.get('compute') else cluster.get('interface', 'impala')
     session = Session.objects.get_session(user, application=application)
     server_url = _get_impala_server_url(session)
   return get_impalad_api(user=user, url=server_url)

+ 30 - 17
apps/metastore/src/metastore/views.py

@@ -36,7 +36,7 @@ from desktop.lib.exceptions_renderable import PopupException
 from desktop.models import Document2, get_cluster_config, _get_apps
 
 from beeswax.design import hql_query
-from beeswax.models import SavedQuery
+from beeswax.models import SavedQuery, Namespace
 from beeswax.server import dbms
 from beeswax.server.dbms import get_query_server_config
 from desktop.lib.view_util import location_to_url
@@ -85,9 +85,8 @@ Database Views
 
 def databases(request):
   search_filter = request.GET.get('filter', '')
-  cluster = json.loads(request.POST.get('cluster', '{}'))
 
-  db = _get_db(user=request.user, cluster=cluster)
+  db = _get_db(user=request.user, cluster=_find_cluster(request))
   databases = db.get_databases(search_filter)
   apps_list = _get_apps(request.user, '')
 
@@ -110,7 +109,7 @@ def databases(request):
 @check_has_write_access_permission
 def drop_database(request):
   source_type = request.POST.get('source_type', request.GET.get('source_type', 'hive'))
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
 
@@ -164,7 +163,7 @@ def alter_database(request, database):
   response = {'status': -1, 'data': ''}
 
   source_type = request.POST.get('source_type', 'hive')
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
 
@@ -192,7 +191,7 @@ def get_database_metadata(request, database):
   response = {'status': -1, 'data': ''}
 
   source_type = request.POST.get('source_type', 'hive')
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
 
@@ -236,7 +235,7 @@ def table_queries(request, database, table):
 Table Views
 """
 def show_tables(request, database=None):
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, cluster=cluster)
 
@@ -291,7 +290,7 @@ def show_tables(request, database=None):
 
 
 def get_table_metadata(request, database, table):
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
   source_type = request.POST.get('source_type')
 
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
@@ -315,9 +314,8 @@ def get_table_metadata(request, database, table):
 
 def describe_table(request, database, table):
   app_name = get_app_name(request)
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
   source_type = request.POST.get('source_type', request.GET.get('connector_id', request.GET.get('source_type', 'hive')))
-
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
 
   try:
@@ -380,7 +378,7 @@ def alter_table(request, database, table):
   response = {'status': -1, 'data': ''}
 
   source_type = request.POST.get('source_type', 'hive')
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
 
@@ -415,7 +413,7 @@ def alter_column(request, database, table):
   response = {'status': -1, 'message': ''}
 
   source_type = request.POST.get('source_type', 'hive')
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
 
@@ -460,7 +458,7 @@ def alter_column(request, database, table):
 @check_has_write_access_permission
 def drop_table(request, database):
   source_type = request.POST.get('source_type', request.GET.get('source_type', 'hive'))
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
 
@@ -528,7 +526,7 @@ def load_table(request, database, table):
   response = {'status': -1, 'data': 'None'}
 
   source_type = request.POST.get('source_type', request.GET.get('source_type', 'hive'))
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
 
@@ -602,7 +600,7 @@ def load_table(request, database, table):
 
 
 def describe_partitions(request, database, table):
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, cluster=cluster)
   table_obj = db.get_table(database, table)
@@ -687,7 +685,7 @@ def _massage_partition(database, table, partition):
 
 
 def browse_partition(request, database, table, partition_spec):
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, cluster=cluster)
   try:
@@ -723,7 +721,7 @@ def read_partition(request, database, table, partition_spec):
 @check_has_write_access_permission
 def drop_partition(request, database, table):
   source_type = request.POST.get('source_type', 'hive')
-  cluster = json.loads(request.POST.get('cluster', '{}'))
+  cluster = _find_cluster(request)
 
   db = _get_db(user=request.user, source_type=source_type, cluster=cluster)
 
@@ -780,6 +778,21 @@ def _get_db(user, source_type=None, cluster=None):
   return dbms.get(user, query_server)
 
 
+def _find_cluster(request):
+  cluster = json.loads(request.POST.get('cluster', '{}'))
+  source_type = request.POST.get('source_type', request.GET.get('connector_id', request.GET.get('source_type', 'hive')))
+  namespace_id = request.GET.get('namespace')
+  if not cluster:
+    # Find the default compute
+    if namespace_id:
+      ns = Namespace.objects.filter(id=namespace_id).first()
+    else:
+      ns = Namespace.objects.filter(dialect=source_type).first()
+    if ns:
+      computes = ns.get_computes(request.user) if ns else None
+      cluster = computes[0] if computes else None
+  return cluster
+
 def _get_servername(db):
   if has_connectors():
     return db.client.query_server['server_name']

+ 1 - 0
desktop/core/base_requirements.txt

@@ -35,6 +35,7 @@ jaeger-client==4.3.0
 jdcal==1.0.1
 kazoo==2.8.0
 kerberos==1.3.0
+kubernetes==26.1.0
 lockfile==0.12.2
 Mako==1.2.3
 Markdown==3.1

+ 31 - 74
desktop/core/src/desktop/api2.py

@@ -40,14 +40,13 @@ from django.views.decorators.http import require_POST
 from metadata.conf import has_catalog
 from metadata.catalog_api import search_entities as metadata_search_entities, _highlight, \
   search_entities_interactive as metadata_search_entities_interactive
-from notebook.connectors.altus import SdxApi, AnalyticDbApi, DataEngApi, DataWarehouse2Api
-from notebook.connectors.base import Notebook, get_interpreter
-from notebook.models import Analytics
+from notebook.connectors.base import Notebook
 from useradmin.models import User, Group
 
+from beeswax.models import Namespace
 from desktop import appmanager
 from desktop.auth.backend import is_admin
-from desktop.conf import ENABLE_CONNECTORS, ENABLE_GIST_PREVIEW, CUSTOM, get_clusters, IS_K8S_ONLY, ENABLE_SHARING
+from desktop.conf import ENABLE_CONNECTORS, ENABLE_GIST_PREVIEW, CUSTOM, get_clusters, ENABLE_SHARING
 from desktop.conf import ENABLE_NEW_STORAGE_BROWSER
 from desktop.lib.conf import BoundContainer, GLOBAL_CONFIG, is_anonymous
 from desktop.lib.django_util import JsonResponse, login_notrequired, render
@@ -170,88 +169,46 @@ def get_context_namespaces(request, interface):
   response = {}
   namespaces = []
 
-  clusters = list(get_clusters(request.user).values())
-
-  # Currently broken if not sent
-  namespaces.extend([{
-      'id': cluster['id'],
-      'name': cluster['name'],
-      'status': 'CREATED',
-      'computes': [cluster]
-    } for cluster in clusters if cluster.get('type') == 'direct'
-  ])
-
-  if interface == 'hive' or interface == 'impala' or interface == 'report':
-    if get_cluster_config(request.user)['has_computes']:
-      # Note: attaching computes to namespaces might be done via the frontend in the future
-      if interface == 'impala':
-        if IS_K8S_ONLY.get():
-          adb_clusters = DataWarehouse2Api(request.user).list_clusters()['clusters']
-        else:
-          adb_clusters = AnalyticDbApi(request.user).list_clusters()['clusters']
-        for _cluster in adb_clusters: # Add "fake" namespace if needed
-          if not _cluster.get('namespaceCrn'):
-            _cluster['namespaceCrn'] = _cluster['crn']
-            _cluster['id'] = _cluster['crn']
-            _cluster['namespaceName'] = _cluster['clusterName']
-            _cluster['name'] = _cluster['clusterName']
-            _cluster['compute_end_point'] = '%(publicHost)s' % _cluster['coordinatorEndpoint'] if IS_K8S_ONLY.get() else '',
-      else:
-        adb_clusters = []
-
-      if IS_K8S_ONLY.get():
-        sdx_namespaces = []
-      else:
-        sdx_namespaces = SdxApi(request.user).list_namespaces()
-
-      # Adding "fake" namespace for cluster without one
-      sdx_namespaces.extend([_cluster for _cluster in adb_clusters if not _cluster.get('namespaceCrn') or \
-        (IS_K8S_ONLY.get() and 'TERMINAT' not in _cluster['status'])])
-
-      namespaces.extend([{
-          'id': namespace.get('crn', 'None'),
-          'name': namespace.get('namespaceName'),
-          'status': namespace.get('status'),
-          'computes': [_cluster for _cluster in adb_clusters if _cluster.get('namespaceCrn') == namespace.get('crn')]
-        } for namespace in sdx_namespaces if namespace.get('status') == 'CREATED' or IS_K8S_ONLY.get()
-      ])
+  ns_objs = Namespace.objects.filter(dialect=interface)
+  if ns_objs:
+    namespaces = [{
+        'id': ns.id,
+        'name': ns.name,
+        'status': 'CREATED',
+        'computes': [{'id': c['id'], 'type': c['type'], 'name': c['name'], 'dialect': c['dialect'],
+                      'interface': c['interface']} for c in ns.get_computes(request.user)]
+      } for ns in ns_objs
+    ]
+    namespaces = [ns for ns in namespaces if ns['computes']]
+  else:
+    # Currently broken if not sent
+    clusters = list(get_clusters(request.user).values())
+    namespaces.extend([{
+        'id': cluster['id'],
+        'name': cluster['name'],
+        'status': 'CREATED',
+        'computes': [cluster]
+      } for cluster in clusters if cluster.get('type') == 'direct'
+    ])
 
   response[interface] = namespaces
   response['status'] = 0
 
   return JsonResponse(response)
 
-
 @api_error_handler
 def get_context_computes(request, interface):
   '''
   Some clusters like Snowball can have multiple computes for a certain languages (Hive, Impala...).
   '''
   response = {}
-  computes = []
-
-  clusters = list(get_clusters(request.user).values())
-
-  if get_cluster_config(request.user)['has_computes']: # TODO: only based on interface selected?
-    interpreter = get_interpreter(connector_type=interface, user=request.user)
-    if interpreter['dialect'] == 'impala':
-      # dw_clusters = DataWarehouse2Api(request.user).list_clusters()['clusters']
-      dw_clusters = [
-        {'crn': 'c1', 'clusterName': 'c1', 'status': 'created', 'options': {'server_host': 'c1.gethue.com', 'server_port': 10000}},
-        {'crn': 'c2', 'clusterName': 'c2', 'status': 'created', 'options': {'server_host': 'c2.gethue.com', 'server_port': 10000}},
-      ]
-      computes.extend([{
-          'id': cluster.get('crn'),
-          'name': cluster.get('clusterName'),
-          'status': cluster.get('status'),
-          'namespace': cluster.get('namespaceCrn', cluster.get('crn')),
-          'type': interpreter['dialect'],
-          'options': cluster['options'],
-        } for cluster in dw_clusters]
-      )
-  else:
+
+  ns = Namespace.objects.filter(dialect=interface).first()
+  computes = ns.get_computes(request.user) if ns else None
+  if not computes:
     # Currently broken if not sent
-    computes.extend([{
+    clusters = list(get_clusters(request.user).values())
+    computes = [{
         'id': cluster['id'],
         'name': cluster['name'],
         'namespace': cluster['id'],
@@ -259,7 +216,7 @@ def get_context_computes(request, interface):
         'type': cluster['type'],
         'options': {}
       } for cluster in clusters if cluster.get('type') == 'direct'
-    ])
+    ]
 
   response[interface] = computes
   response['status'] = 0

+ 3 - 2
desktop/core/src/desktop/js/ko/components/ko.contextSelector.js

@@ -248,12 +248,13 @@ const HueContextSelector = function (params) {
   }
 
   self.loadingContext = ko.pureComputed(() => {
-    return (
+    /*return (
       self[TYPES_INDEX.cluster.loading]() ||
       self[TYPES_INDEX.namespace.loading]() ||
       self[TYPES_INDEX.compute.loading]() ||
       self.loadingDatabases()
-    );
+    );*/
+    return false;
   });
 };
 

+ 212 - 0
desktop/core/src/desktop/management/commands/sync_warehouses.py

@@ -0,0 +1,212 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# HUE_CONF_DIR=/etc/hue/conf HUE_IGNORE_PASSWORD_SCRIPT_ERRORS=1 /opt/hive/build/env/bin/hue sync_warehouses
+
+from beeswax import models
+from django.core.management.base import BaseCommand
+from hadoop import confparse
+import json
+from kubernetes import client, config
+import logging
+import os
+import re
+import sys
+
+LOG = logging.getLogger()
+
+if (config.incluster_config.SERVICE_HOST_ENV_NAME in os.environ
+    and config.incluster_config.SERVICE_PORT_ENV_NAME in os.environ):
+  # We are running in a k8s environment and must use service account
+  config.load_incluster_config()
+else:
+  # Try loading the default kubernetes config file. Intended for local dev
+  config.load_kube_config()
+
+core_v1 = client.CoreV1Api()
+apps_v1 = client.AppsV1Api()
+
+SERVER_HELP = r"""
+  Sync up the desktop_connectors with the available hive and impala warehouses
+"""
+
+
+class Command(BaseCommand):
+  def add_arguments(self, parser):
+    pass
+
+  def handle(self, *args, **options):
+    sync_warehouses(args, options)
+
+  def usage(self, subcommand):
+    return SERVER_HELP
+
+
+def sync_warehouses(args, options):
+  (hives, impalas) = get_computes_from_k8s()
+
+  (hive_warehouse, created) = models.Namespace.objects.get_or_create(
+    external_id="CDW_HIVE_WAREHOUSE",
+    defaults={'name': 'CDW Hive', 'description': 'CDW Hive Warehouse', 'dialect': 'hive', 'interface': 'hiveserver2'})
+  add_computes_to_warehouse(hive_warehouse, hives)
+
+  (impala_warehouse, created) = models.Namespace.objects.get_or_create(
+    external_id="CDW_IMPALA_WAREHOUSE",
+    defaults={'name': 'CDW Impala', 'description': 'CDW Impala Warehouse', 'dialect': 'impala', 'interface': 'hiveserver2'})
+  add_computes_to_warehouse(impala_warehouse, impalas)
+
+  LOG.info("Synced computes")
+  LOG.debug("Current computes %s" % models.Compute.objects.all())
+
+
+def add_computes_to_warehouse(warehouse, computes):
+  for c in computes:
+    c['namespace'] = warehouse
+    models.Compute.objects.update_or_create(external_id=c['external_id'], defaults=c)
+  external_ids = [c['external_id'] for c in computes]
+  models.Compute.objects.filter(namespace=warehouse).exclude(external_id__in=external_ids).delete()
+
+
+if __name__ == '__main__':
+  args = sys.argv[1:]
+  options = {}
+  sync_warehouses(args, options)
+
+
+def get_computes_from_k8s():
+  catalogs = []
+  hives = []
+  impalas = []
+  computes = {}
+
+  for n in core_v1.list_namespace().items:
+    namespace = n.metadata.name
+    item = {
+      'name': n.metadata.labels.get('displayname'),
+      'description': '%s (%s)' % (n.metadata.labels.get('displayname'), n.metadata.name),
+      'external_id': namespace,
+      #'creation_timestamp': n.metadata.labels.get('creation_timestamp'),
+    }
+
+    if namespace.startswith('warehouse-'):
+      catalogs.append(item)
+    elif namespace.startswith('compute-'):
+      hives.append(item)
+      computes[namespace] = item
+      update_hive_configs(namespace, item, 'hiveserver2-service.%s.svc.cluster.local' % namespace)
+    elif namespace.startswith('impala-'):
+      impalas.append(item)
+      computes[namespace] = item
+      populate_impala(namespace, item)
+
+  return (hives, impalas)
+
+def update_hive_configs(namespace, hive, host, port=80):
+  hs2_stfs = apps_v1.read_namespaced_stateful_set('hiveserver2', namespace)
+
+  hive_configs = core_v1.read_namespaced_config_map('hive-conf-hiveserver2', namespace)
+  hive_site_data = confparse.ConfParse(hive_configs.data['hive-site.xml'])
+  ldap_groups = hive_site_data.get('hive.server2.authentication.ldap.groupFilter', '')
+  hive_metastore_uris = hive_site_data.get('hive.metastore.uris')
+
+  settings = [
+    {"name": "server_host", "value": host},
+    {"name": "server_port", "value": port},
+    {"name": "transport_mode", "value": 'http'},
+    {"name": "http_url", "value": 'http://%s:%s/cliservice' % (host, port)},
+    {"name": "is_llap", "value": False},
+    {"name": "use_sasl", "value": True},
+    {"name": "hive_metastore_uris", "value": hive_metastore_uris},
+  ]
+
+  hive.update({
+    'dialect': 'hive',
+    'interface': 'hiveserver2',
+    'is_ready': bool(hs2_stfs.status.ready_replicas),
+    'ldap_groups': ldap_groups.split(",") if ldap_groups else None,
+    'settings': json.dumps(settings)
+  })
+
+
+def populate_impala(namespace, impala):
+  deployments = apps_v1.list_namespaced_deployment(namespace).items
+  stfs = apps_v1.list_namespaced_stateful_set(namespace).items
+  catalogd_dep = next((d for d in deployments if d.metadata.labels['app'] == 'catalogd'), None)
+  catalogd_stfs = next((s for s in stfs if s.metadata.labels['app'] == 'catalogd'), None)
+  statestore_dep = next((d for d in deployments if d.metadata.labels['app'] == 'statestored'), None)
+  admissiond_dep = next((d for d in deployments if d.metadata.labels['app'] == 'admissiond'), None)
+
+  impala['is_ready'] = bool(((catalogd_dep and catalogd_dep.status.ready_replicas) or (
+        catalogd_stfs and catalogd_stfs.status.ready_replicas))
+                     and (statestore_dep and statestore_dep.status.ready_replicas)
+                     and (admissiond_dep and admissiond_dep.status.ready_replicas))
+
+  if not impala['is_ready']:
+    LOG.info("Impala %s not ready" % namespace)
+
+  impala_proxy = next((d for d in deployments if d.metadata.labels['app'] == 'impala-proxy'), None)
+  if impala_proxy:
+    impala['server_port'] = 25000
+    impala['api_port'] = 25000
+    update_impala_configs(namespace, impala, 'impala-proxy.%s.svc.cluster.local' % namespace)
+  else:
+    coordinator = next((s for s in stfs if s.metadata.labels['app'] == 'coordinator'), None)
+    impala['is_ready'] = impala['is_ready'] and (coordinator and coordinator.status.ready_replicas)
+
+    hs2_stfs = next((s for s in stfs if s.metadata.labels['app'] == 'hiveserver2'), None)
+    if hs2_stfs:
+      # Impala is running with UA
+      impala['is_ready'] = impala['is_ready'] and hs2_stfs.status.ready_replicas
+      update_hive_configs(namespace, impala, 'hiveserver2-service.%s.svc.cluster.local' % namespace)
+    else:
+      # Impala is not running with UA
+      svcs = core_v1.list_namespaced_service(namespace).items
+      coordinator_svc = next((s for s in svcs if s.metadata.labels['app'] == 'coordinator'), None)
+      ports = coordinator_svc.spec.ports if coordinator_svc else []
+      impala['server_port'] = next((p.port for p in ports if p.name == 'http'), 28000)
+      impala['api_port'] = next((p.port for p in ports if p.name == 'web'), 25000)
+      update_impala_configs(namespace, impala, 'coordinator.%s.svc.cluster.local' % namespace)
+
+def update_impala_configs(namespace, impala, host):
+  hive_configs = core_v1.read_namespaced_config_map('impala-coordinator-hive-conf', namespace)
+  hive_site_data = confparse.ConfParse(hive_configs.data['hive-site.xml'])
+  hive_metastore_uris = hive_site_data.get('hive.metastore.uris')
+
+  impala_flag_file = core_v1.read_namespaced_config_map('impala-coordinator-flagfile', namespace)
+  flag_file_data = impala_flag_file.data['flagfile']
+  ldap_regex = r'--ldap_group_filter=(.*)'
+  match = re.search(ldap_regex, flag_file_data)
+  ldap_groups = match.group(1) if match and match.group(1) else None
+
+  settings = [
+    {"name": "server_host", "value": host},
+    {"name": "server_port", "value": impala['server_port']},
+    {"name": "api_port", "value": impala['api_port']},
+    {"name": "transport_mode", "value": 'http'},
+    {"name": "http_url", "value": 'http://%s:%s/cliservice' % (host, impala['server_port'])},
+    {"name": "api_url", "value": 'http://%s:%s' % (host, impala['api_port'])},
+    {"name": "impersonation_enabled", "value": False},
+    {"name": "use_sasl", "value": False},
+    {"name": "hive_metastore_uris", "value": hive_metastore_uris},
+  ]
+
+  impala.update({
+    'dialect': 'impala',
+    'interface': 'hiveserver2',
+    'ldap_groups': ldap_groups.split(",") if ldap_groups else None,
+    'settings': json.dumps(settings)
+  })

+ 1 - 1
desktop/core/src/desktop/models.py

@@ -1800,7 +1800,7 @@ class ClusterConfig(object):
       ],
       'default_sql_interpreter': default_sql_interpreter,
       'cluster_type': self.cluster_type,
-      'has_computes': self.cluster_type in ('altus', 'snowball'), # or any grouped engine connectors
+      'has_computes': self.cluster_type in ('cdw', 'altus', 'snowball'), # or any grouped engine connectors
       'hue_config': {
         'enable_sharing': ENABLE_SHARING.get(),
         'collect_usage': COLLECT_USAGE.get()

+ 5 - 2
desktop/libs/indexer/src/indexer/indexers/sql.py

@@ -230,7 +230,8 @@ class SQLIndexer(object):
         'overwrite': False,
         'partition_columns': [(partition['name'], partition['partitionValue']) for partition in partition_columns],
       }
-      query_server_config = dbms.get_query_server_config(name=source_type)
+      compute = destination['compute'] if 'compute' in destination else None
+      query_server_config = dbms.get_query_server_config(name=source_type, connector=compute)
       db = dbms.get(self.user, query_server=query_server_config)
       sql += "\n\n%s;" % db.load_data(database, table_name, form_data, None, generate_ddl_only=True)
 
@@ -299,7 +300,9 @@ class SQLIndexer(object):
         database=database,
         on_success_url=on_success_url,
         last_executed=start_time,
-        is_task=True
+        is_task=True,
+        namespace=destination['namespace'] if 'namespace' in destination else None,
+        compute=destination['compute'] if 'compute' in destination else None
     )
 
   def nomalize_booleans(self, row, columns):

+ 9 - 4
desktop/libs/notebook/src/notebook/api.py

@@ -162,7 +162,7 @@ def _execute_notebook(request, notebook, snippet):
         notebook['sessions'] = pre_execute_sessions
 
       # Retrieve and remove the result from the handle
-      if response['handle'].get('sync'):
+      if response['handle'] and response['handle'].get('sync'):
         result = response['handle'].pop('result')
     finally:
       if historify:
@@ -180,7 +180,7 @@ def _execute_notebook(request, notebook, snippet):
             }
             notebook_executable['operationId'] = history.uuid
 
-        if 'handle' in response: # No failure
+        if response.get('handle'): # No failure
           if 'result' not in _snippet: # Editor v2
             _snippet['result'] = {}
           _snippet['result']['handle'] = response['handle']
@@ -750,7 +750,11 @@ def autocomplete(request, server=None, database=None, table=None, column=None, n
 
   # Passed by check_document_access_permission but unused by APIs
   notebook = json.loads(request.POST.get('notebook', '{}'))
-  snippet = json.loads(request.POST.get('snippet', '{}'))
+  cluster = json.loads(request.POST.get('cluster', '{}'))
+  if cluster and cluster.get('type') in ('hive-compute', 'impala-compute'):
+    snippet = cluster
+  else:
+    snippet = json.loads(request.POST.get('snippet', '{}'))
   action = request.POST.get('operation', 'schema')
 
   try:
@@ -1033,7 +1037,8 @@ def describe(request, database, table=None, column=None):
   response = {'status': -1, 'message': ''}
   notebook = json.loads(request.POST.get('notebook', '{}'))
   source_type = request.POST.get('source_type', '')
-  connector = json.loads(request.POST.get('connector', '{}'))
+  cluster = json.loads(request.POST.get('cluster', '{}'))
+  connector = cluster if cluster else json.loads(request.POST.get('connector', '{}'))
 
   snippet = {'type': source_type, 'connector': connector}
   patch_snippet_for_connector(snippet)

+ 12 - 8
desktop/libs/notebook/src/notebook/connectors/base.py

@@ -25,6 +25,7 @@ import uuid
 
 from django.utils.encoding import smart_str
 
+from beeswax.models import Compute
 from desktop.auth.backend import is_admin
 from desktop.conf import TASK_SERVER, has_connectors
 from desktop.lib import export_csvxls
@@ -406,6 +407,9 @@ def patch_snippet_for_connector(snippet):
   Connector backward compatibility switcher.
   # TODO Connector unification
   """
+  if snippet['type'] == 'hive-compute' or snippet['type'] == 'impala-compute':
+    # No patching is needed
+    return
   if snippet.get('connector') and snippet['connector'].get('type'):
     if snippet['connector']['dialect'] != 'hplsql':   # this is a workaround for hplsql describe not working
       snippet['type'] = snippet['connector']['type']  # To rename to 'id'
@@ -430,7 +434,14 @@ def get_api(request, snippet):
   if has_connectors() and snippet.get('type') == 'hello' and is_admin(request.user):
     interpreter = snippet.get('interpreter')
   else:
-    interpreter = get_interpreter(connector_type=connector_name, user=request.user)
+    if snippet.get('compute'):
+      interpreter = snippet['compute']
+    elif snippet.get('connector'):
+      interpreter = snippet['connector']
+    elif snippet.get('type') in ('hive-compute', 'impala-compute'):
+      interpreter = Compute.objects.get(id=snippet['id']).to_dict()
+    else:
+      interpreter = get_interpreter(connector_type=connector_name, user=request.user)
 
   interface = interpreter['interface']
 
@@ -439,13 +450,6 @@ def get_api(request, snippet):
     interface = snippet.get('interface') 
     interpreter['options'] = snippet.get('options')
 
-  if get_cluster_config(request.user)['has_computes']:
-    compute = json.loads(request.POST.get('cluster', '""'))  # Via Catalog autocomplete API or Notebook create sessions.
-    if compute == '""' or compute == 'undefined':
-      compute = None
-    if not compute and snippet.get('compute'):  # Via notebook.ko.js
-      interpreter['compute'] = snippet['compute']
-
   LOG.debug('Selected interpreter %s interface=%s compute=%s' % (
     interpreter['type'],
     interface,

+ 2 - 2
desktop/libs/notebook/src/notebook/connectors/base_tests.py

@@ -44,7 +44,7 @@ class TestNotebook(object):
   def test_get_api(self):
     request = Mock()
     snippet = {
-      'connector': {'optimizer': 'api'},
+      'connector': {'optimizer': 'api', 'interface': 'hiveserver2', 'type': 'hive-compute', 'dialect': 'hive'},
       'type': 'hive'  # Backward compatibility
     }
 
@@ -81,7 +81,7 @@ class TestNotebook(object):
     with patch('notebook.api.Document2.objects.get_by_uuid') as get_by_uuid:
       with patch('notebook.api.get_api') as get_api:
         with patch('notebook.api.Notebook') as NotebookMock:
-          get_api.return_value=Mock(
+          get_api.return_value = Mock(
             check_status=Mock(return_value={'status': 0})
           )
           resp = query.check_status(request=request, operation_id=operation_id)

+ 6 - 3
desktop/libs/notebook/src/notebook/connectors/hiveserver2.py

@@ -215,7 +215,7 @@ class HS2Api(Api):
 
     response = {
       'type': lang,
-      'id': session.id
+      'id': session.id if session else None
     }
 
     if not properties:
@@ -229,7 +229,7 @@ class HS2Api(Api):
         properties = self.get_properties(lang)
 
     response['properties'] = properties
-    response['configuration'] = json.loads(session.properties)
+    response['configuration'] = json.loads(session.properties) if session else None
     response['reuse_session'] = reuse_session
     response['session_id'] = ''
 
@@ -319,7 +319,10 @@ class HS2Api(Api):
     session = self._get_session(notebook, snippet['type'])
 
     query = self._prepare_hql_query(snippet, statement['statement'], session)
-    _session = self._get_session_by_id(notebook, snippet['type'])
+    compute = snippet.get('compute')
+    session_type = compute['name'] if compute else snippet['type']
+    _session = self._get_session_by_id(notebook, session_type)
+
 
     try:
       if statement.get('statement_id') == 0: # TODO: move this to client