فهرست منبع

HUE-8699 [core] Adding skeleton of Hue on Kubernetes

Romain Rigaux 6 سال پیش
والد
کامیت
2dcf14ef7c
46فایلهای تغییر یافته به همراه5687 افزوده شده و 0 حذف شده
  1. 290 0
      tools/kubernetes/README.md
  2. 6 0
      tools/kubernetes/helm/config.yaml
  3. 1 0
      tools/kubernetes/helm/frontend/.helmignore
  4. 5 0
      tools/kubernetes/helm/frontend/Chart.yaml
  5. 95 0
      tools/kubernetes/helm/frontend/README.md
  6. 30 0
      tools/kubernetes/helm/frontend/templates/NOTES.txt
  7. 42 0
      tools/kubernetes/helm/frontend/templates/hue-postgres.yaml
  8. 72 0
      tools/kubernetes/helm/frontend/templates/hue.yaml
  9. 72 0
      tools/kubernetes/helm/frontend/templates/provisioner.yaml
  10. 105 0
      tools/kubernetes/helm/frontend/templates/traefik.yaml
  11. 13 0
      tools/kubernetes/helm/frontend/values.yaml
  12. 1 0
      tools/kubernetes/helm/impala-engine/.helmignore
  13. 5 0
      tools/kubernetes/helm/impala-engine/Chart.yaml
  14. 98 0
      tools/kubernetes/helm/impala-engine/README.md
  15. 27 0
      tools/kubernetes/helm/impala-engine/templates/NOTES.txt
  16. 52 0
      tools/kubernetes/helm/impala-engine/templates/configmap-impala.yaml
  17. 34 0
      tools/kubernetes/helm/impala-engine/templates/deployment-statestore.yaml
  18. 65 0
      tools/kubernetes/helm/impala-engine/templates/deployment-worker.yaml
  19. 21 0
      tools/kubernetes/helm/impala-engine/templates/service-catalog.yaml
  20. 24 0
      tools/kubernetes/helm/impala-engine/templates/service-coordinator.yaml
  21. 16 0
      tools/kubernetes/helm/impala-engine/templates/service-statestore.yaml
  22. 22 0
      tools/kubernetes/helm/impala-engine/templates/service-worker.yaml
  23. 56 0
      tools/kubernetes/helm/impala-engine/templates/statefulset-coordinator.yaml
  24. 53 0
      tools/kubernetes/helm/impala-engine/templates/statefulset-statestore.yaml
  25. 17 0
      tools/kubernetes/helm/impala-engine/values.yaml
  26. 1 0
      tools/kubernetes/helm/mock-storage/.helmignore
  27. 5 0
      tools/kubernetes/helm/mock-storage/Chart.yaml
  28. 29 0
      tools/kubernetes/helm/mock-storage/README.md
  29. 83 0
      tools/kubernetes/helm/mock-storage/templates/hdfs.yaml
  30. 94 0
      tools/kubernetes/helm/mock-storage/templates/hive.yaml
  31. 10 0
      tools/kubernetes/helm/mock-storage/values.yaml
  32. 41 0
      tools/kubernetes/services/hue-frontend/Dockerfile
  33. 1922 0
      tools/kubernetes/services/hue-frontend/conf/hue.ini
  34. 44 0
      tools/kubernetes/services/hue-frontend/conf/log.conf
  35. 5 0
      tools/kubernetes/services/hue-frontend/startup.sh
  36. 1 0
      tools/kubernetes/services/mock-provisioner/.gitignore
  37. 36 0
      tools/kubernetes/services/mock-provisioner/Dockerfile
  38. 15 0
      tools/kubernetes/services/mock-provisioner/README.md
  39. 13 0
      tools/kubernetes/services/mock-provisioner/command.js
  40. 7 0
      tools/kubernetes/services/mock-provisioner/config
  41. 7 0
      tools/kubernetes/services/mock-provisioner/config.templ
  42. 250 0
      tools/kubernetes/services/mock-provisioner/model.js
  43. 1372 0
      tools/kubernetes/services/mock-provisioner/package-lock.json
  44. 26 0
      tools/kubernetes/services/mock-provisioner/package.json
  45. 419 0
      tools/kubernetes/services/mock-provisioner/provisioner.yaml
  46. 85 0
      tools/kubernetes/services/mock-provisioner/server.js

+ 290 - 0
tools/kubernetes/README.md

@@ -0,0 +1,290 @@
+# Cloud-Native Data Warehouse
+
+Cloud-native data warehouse app powered by Impala, Hue and Kubernetes.
+
+
+Goals
+
+* Rapidly install the product in the public and private cloud
+* Self service per-tenant data warehouses provisioniong
+* Browse and query data across multiple data warehouses from a unified UI
+* Self-heal on process or node failures
+* Gracefully scale down resources without query failures
+* Autoscale data warehouses and cluster nodes based on usage
+* Intelligently tune performance without human intervention
+
+Done
+
+* Single line install in any Kubernetes cluster
+* Create, scale up/down compute warehouse (with v1 graceful shutdown of executors)
+* Autoscale warehouse based on CPU (HPA)
+* Basic warehouse metrics like queries in flight, total number of queries... (note: to change to directly come the impalads)
+* Easy troubleshooting of queries: via humanized query profile, query improvement recommendations...
+
+Todo
+
+* [ ] Readiness/Liveness probes
+* [ ] Auto pause/Auto resume warehouses
+* [ ] Productionize provisioning service in Thunderhead
+* [ ] Add priority class and preemption support
+* [ ] Session cookie affinity for Hue when load-balanced
+* [ ] Validate external storage (HDFS, HMS)
+* [ ] Run containers as non root
+* [ ] Add proper metric endpoint to impalads and scrape every 5s
+* [ ] Persist DB
+* [ ] Log collection
+* [ ] Security: Istio, etc. TBD
+* [ ] Lot of stability testing when warehouses come up & down, shrink & grow...
+
+
+## Quick Start
+
+Assuming you have a Kubernetes cluster configured with [Helm][3] installed and images pushed (if not, check the [K8s Cluster](#K8s_Cluster) section below):
+
+1. Remote storage
+
+Boot a `finance` compute pointing to a remote storage (here in cluster `data-lake-1.gethue.com`):
+
+```
+helm install impala-engine --set-string metastore.uris=thrift://data-lake-1.gethue.com:9083,hdfs.namenode.host=data-lake-1.gethue.com,hdfs.namenode.port=8020,name=finance -n finance --repo=http://dataware-1.gethue.com:8879
+```
+
+And follow-up the instructions printed on the screen to connect and execute a SQL query. Just before doing this thought, double check that all the containers are running:
+
+```
+kubectl get pods
+NAME                                          READY   STATUS              RESTARTS   AGE
+impala-catalog-fin2-0                     1/1     Running   0          8s
+impala-coordinator-fin2-0                 1/1     Running   0          8s
+impala-statestore-fin2-84b9844686-rng9n   1/1     Running   0          8s
+impala-worker-fin2-7d6df9ff54-wpt5h       1/1     Running   0          8s
+```
+
+Afterwards, to delete the `finance` compute:
+
+```
+helm delete finance --purge
+```
+
+2. Local storage
+
+For quick use and pointing to a local storage, boot the local storage and warehouse:
+
+```
+helm repo add cloudera http://gethue.com:8879
+
+helm install cloudera/mock-storage
+helm install cloudera/impala-engine --set-string name=finance -n finance
+```
+
+And follow-up the instructions printed on the screen.
+
+Note:
+
+To serve the packages
+
+```
+mkdir repo
+
+helm package impala-engine -d repo
+helm package frontend -d repo
+helm package mock-storage -d repo
+
+helm serve --repo-path repo --address gethue.com:8879
+```
+
+3. Web UI
+
+Instead of using the CLI, use the Web UI to create the `finance` warehouse in 3 clicks:
+
+```
+helm install cloudera/frontend
+helm install cloudera/mock-storage
+
+```
+
+And follow-up the instructions printed on the screen.
+
+## Installation
+
+Get the Helm charts packages:
+
+```
+cd tools/kubernetes
+```
+
+We have no pods:
+
+```
+kubectl get pods
+No resources found.
+```
+
+Boot the warehouse app:
+
+```
+helm install mock-storage
+helm install frontend
+```
+
+Optional: make sure to set [config.yaml](helm/config.yaml) to match your configuration with S3 keys, e.g.:
+
+```
+helm upgrade -f config.yaml frontend
+```
+
+
+Now you should see something like:
+
+```
+kubectl get pods
+NAME                                          READY     STATUS    RESTARTS   AGE
+hdfs-datanode-285-0                           1/1       Running   0          4h
+hdfs-namenode-285-0                           1/1       Running   0          4h
+hive-78dbbdcf96-qrhtf                         2/2       Running   0          4h
+hue-f9bph                                     1/1       Running   0          4h
+hue-postgres-n65kz                            1/1       Running   0          4h
+provisioner-4xgb4                             1/1       Running   0          4h
+traefik-ingress-controller-657d9596f9-kzg7f   1/1       Running   0          4h
+```
+
+Optional: if not present, install Prometheus in order to get the warehouse metrics in the Frontend:
+
+```
+helm install stable/prometheus --name prometheus --namespace prometheus
+```
+
+Now, let's go querying!
+
+1. From CLI
+
+Via the Impala engine chart and boot a `finance` compute warehouse:
+
+```
+helm install impala-engine --set-string name=finance -n finance
+```
+
+Now you should see something like this, where this is one `finance` warehouse with 1 worker pod:
+
+```
+kubectl get pods
+NAME                                          READY     STATUS    RESTARTS   AGE
+hdfs-datanode-285-0                           1/1       Running   0          4h
+hdfs-namenode-285-0                           1/1       Running   0          4h
+hive-78dbbdcf96-qrhtf                         2/2       Running   0          4h
+hue-f9bph                                     1/1       Running   0          4h
+hue-postgres-n65kz                            1/1       Running   0          4h
+impala-catalog-finance-0                            1/1       Running   0          4h
+impala-coordinator-finance-0                        1/1       Running   0          4h
+impala-statestore-finance-84bcb88684-mlvr7          1/1       Running   0          4h
+impala-worker-finance-7d4fd7445f-6fjlt              1/1       Running   0          4h
+provisioner-4xgb4                             1/1       Running   0          4h
+traefik-ingress-controller-657d9596f9-kzg7f   1/1       Running   0          4h
+```
+
+Go to a worker node:
+
+```
+export WORKER_POD=$(kubectl get pods -l name=impala-worker-finance -o jsonpath="{.items[0].metadata.name"})
+kubectl exec -it $WORKER_POD bash
+```
+
+```
+impala-shell -i impala-coordinator-finance
+```
+
+And now you can type some SQL:
+
+```
+SHOW TABLES;
+CREATE TABLE logs (message string);
+```
+
+If S3 was configured, pick up some external tables here.
+
+Read more about the Compute Warehouse services at:
+
+* [Impala engine service](helm/impala-engine)
+* Altus CLI command (TBD)
+
+2. Manage and query the Data Warehouse via the Web UI or API:
+
+* [Web App](helm/frontend#Web)
+* [REST API](helm/frontend#API)
+
+
+## Architecture
+
+To understand the architecture, look at the `chart` folder.  Installing the chart installs Hue, HMS,
+PostgreSQL and an Impala provisioning service.  The Impala provisioning service is a thin Altus-style
+API that proxies the Kubernetes API.  For details see the OpenAPI provisioner [service spec](services/provisioner/provisioner.yaml).  Impala
+components (`impalad`, `statestored`, and `catalogd`) run as Kubernetes [Deployments][4].
+
+Kubernetes [lifecycle hooks][5] ensure graceful termination by leveraging [IMPALA-1760][2].
+
+Autoscaling of the underlying Kubernetes cluster relies on built in cluster autoscaling
+capabilities.  When `impalad` pods cannot be scheduled due to inadequate CPU or memory, new nodes
+are added automatically by the cluster autoscaler.
+
+The autoscaling design for Impala is TBD.  It could be accomplished with using [Horizontal Pod Autoscaling][7]
+and [custom metrics][8] or a custom controller.  A custom controller may
+be more appropriate if the long-term vision is to automatically tune the Impala cluster.
+
+
+## K8s Cluster
+
+### Ubuntu
+
+OS: Ubuntu 16.04 or 18.04
+Node count and size: 4 primary instances of m3.xlarge or 1 m4.2xlarge
+
+https://microk8s.io/#quick-start
+
+```
+sudo snap install microk8s --classicmicro
+
+snap alias microk8s.kubectl kubectl
+
+k8s.enable metrics-server dns
+```
+
+```
+sudo snap install helm --classic
+
+helm init
+```
+
+If in Dev, for having the provisioner run properly:
+
+```
+kubectl create clusterrolebinding serviceaccounts-cluster-admin --clusterrole=cluster-admin --group=system:serviceaccounts
+```
+
+### GKE
+
+Install Helm onto GKE cluster requires creating a service account with the correct
+permissions:
+
+```
+kubectl create serviceaccount --namespace kube-system tiller
+kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
+helm init --service-account tiller --upgrade
+```
+
+On GKE, this chart uses a LoadBalancer to route to Traefik rather than using the GKE
+HTTP LoadBalancer. This avoids creating global static ips.
+
+## Images
+
+All the images can currently can be built via the [services](services).
+
+
+[1]: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/
+[2]: https://jira.apache.org/jira/browse/IMPALA-1760
+[3]: https://helm.sh/
+[4]: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
+[5]: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/
+[6]: https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-ipvs
+[7]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
+[8]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics

+ 6 - 0
tools/kubernetes/helm/config.yaml

@@ -0,0 +1,6 @@
+registry: "docker-registry.infra.cloudera.com/cloudera/datawarehouse"
+tag: "v2"
+aws:
+  accessKeyId: ""
+  secretAccessKey: ""
+  region: "us-east-1"

+ 1 - 0
tools/kubernetes/helm/frontend/.helmignore

@@ -0,0 +1 @@
+.DS_Store

+ 5 - 0
tools/kubernetes/helm/frontend/Chart.yaml

@@ -0,0 +1,5 @@
+name: frontend
+description: Cloudera Data Warehouse UI
+version: 1.0.0
+appVersion: "1.0"
+apiVersion: v1

+ 95 - 0
tools/kubernetes/helm/frontend/README.md

@@ -0,0 +1,95 @@
+# Cloudera Data Warehouse Frontend Chart
+
+This is a MVP to get an Helm chart to boot the frontend of the Data Warehouse. It is the main dependency as
+the warehouses can then be created directly via the Web UI.
+
+
+## Install
+
+To boot a the Web UI:
+
+```
+helm install frontend --set-string helmRepo=http://dataware-1.vpc.cloudera.com:8879 -n frontend
+```
+
+Or copy [values.yaml](values.yaml) and edit and run:
+
+```
+helm install frontend -f values.yaml
+```
+
+E.g. by default it comes with the UI, a temporary DB and a REST API:
+
+```
+kubectl get pods
+NAME                                          READY   STATUS    RESTARTS   AGE
+hue-4n2ck                                     1/1       Running   0          3h
+hue-postgres-5jg77                            1/1       Running   0          12d
+provisioner-cp2df                             1/1       Running   0          12d
+traefik-ingress-controller-6fbd76695d-nkxnz   1/1       Running   0          12d
+```
+
+## Web App
+
+The URL of the frontend is printed when the chart is installed. If you missed it, you could:
+
+Currently get the URL via:
+
+```
+export WEB_HOST=$(kubectl get node -o jsonpath="{.items[0].metadata.name}")
+export WEB_PORT=$(kubectl get service hue -o jsonpath="{.spec.ports[*].nodePort}"
+```
+
+http://$WEB_HOST:$WEB_PORT
+
+On the Data Warehouse page, create warehouses that will spawn Impala compute containers.
+
+Query table via the Editor. If using S3, pick up some [CREATE TABLES here](http://github.mtv.cloudera.com/romain/SQL-test-cases/tree/master/altus-dw).
+If using HDFS, use the Data Import http://$WEB_HOST:$WEB_PORT/hue/importer.
+
+## API
+
+Same as the Web App, the API URL is listed at installation. To get it later, either port-forward the API or use the NodePort, e.g.:
+
+```
+export API_HOST=$(kubectl get node -o jsonpath="{.items[0].metadata.name}")
+export API_PORT=$(kubectl get service provisioner -o jsonpath="{.spec.ports[*].nodePort}")
+
+echo http://$API_HOST:$API_PORT
+```
+
+or port forward to localhost:
+
+```
+kubectl port-forward svc/provisioner 4747:4747 &
+```
+
+Make sure the API runs:
+
+```sh
+curl localhost:4747
+{"app":"cloudera-dw-provisioner","version":"0.1"}
+```
+
+List warehouses:
+
+```sh
+curl -X POST localhost:4747/dw/listClusters
+{"clusters":[{"cdhVersion":"CDH6.3","workerCpuCores":2,"workerMemoryInGib":4,"workerReplicas":1,"workerAutoResize":false,"workercurrentCPUUtilizationPercentage":0,"clusterName":"a6","name":"a6","crn":"a6","creationDate":"2018-12-21T03:50:04.525Z","status":"ONLINE","workerReplicasOnline":0,"coordinatorEndpoint":{"privateHost":"impala-coordinatora6","publicHost":"impala-coordinatora6","port":21050}}]}
+```
+
+Create a warehouse:
+
+Note: currently cluster names should be **alpha** only with possible **number** as suffix, e.g. finance, trailpay5...
+
+
+```sh
+curl -d '{"clusterName":"finance2", "workerCpuCores":1, "workerMemoryInGib":1, "workerReplicas":1}' -H "Content-Type: application/json" -X POST localhost:4747/dw/createCluster
+```
+
+See the complete list of operations at [provisioner.yaml](../../services/mock-provisioner/provisioner.yaml).
+
+## Follow-ups
+
+* Move provisioner to Thunderhead API
+* Productionize DB persistence

+ 30 - 0
tools/kubernetes/helm/frontend/templates/NOTES.txt

@@ -0,0 +1,30 @@
+Congratulations, you've launched the Cloudera Data Warehouse!
+
+To check the status of your installation run:
+
+  helm list {{ .Release.Name }}
+
+
+You should be able to execute queries via:
+
+  kubectl port-forward svc/hue 8888:8888 &
+
+  http://localhost:8888
+
+
+Or directly:
+
+  export WEB_HOST=$(kubectl get node -o jsonpath="{.items[0].metadata.name}")
+  export WEB_PORT=$(kubectl get service hue -o jsonpath="{.spec.ports[*].nodePort}" --namespace {{ .Release.Namespace }})
+
+  echo http://$WEB_HOST:$WEB_PORT
+
+
+The REST API is available at:
+
+  export API_HOST=$(kubectl get node -o jsonpath="{.items[0].metadata.name}")
+  export API_PORT=$(kubectl get service provisioner -o jsonpath="{.spec.ports[*].nodePort}" --namespace {{ .Release.Namespace }})
+
+  echo http://$API_HOST:$API_PORT
+
+Happy Querying!

+ 42 - 0
tools/kubernetes/helm/frontend/templates/hue-postgres.yaml

@@ -0,0 +1,42 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: hue-postgres
+spec:
+  ports:
+  - name: pgql
+    port: 5432
+  selector:
+    app: hue-postgres
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: hue-postgres
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: hue-postgres
+    spec:
+      containers:
+        - name: hue-postgres
+          image: postgres:9.5
+          env:
+            - name: POSTGRES_USER
+              value: hue
+            - name: POSTGRES_PASSWORD
+              value: hue
+            - name: POSTGRES_DB
+              value: hue
+            - name: PGDATA
+              value: /var/lib/postgresql/data/pgdata
+          ports:
+            - containerPort: 5432
+          volumeMounts:
+            - mountPath: /var/lib/postgresql/data
+              name: pg-data
+      volumes:
+        - name: pg-data
+          emptyDir: {}

+ 72 - 0
tools/kubernetes/helm/frontend/templates/hue.yaml

@@ -0,0 +1,72 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: hue-config
+data:
+  hue-ini: |
+    [metadata]
+    [[k8s]]
+    api_url={{ .Values.hue.provisionerApi }}
+    [[prometheus]]
+    api_url=http://{{ .Values.hue.prometheusHost }}/api
+
+    [aws]
+    [[aws_accounts]]
+    [[[default]]]
+    access_key_id={{ .Values.hue.awsAccessKeyId }}
+    secret_access_key={{ .Values.hue.awsSecretAccessKey }}
+    region={{ .Values.hue.awsRegion }}
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: hue
+spec:
+  replicas: {{ .Values.hue.replicas }}
+  template:
+    metadata:
+      labels:
+        app: hue
+    spec:
+      containers:
+        - name: hue
+          image: {{ .Values.registry }}/hue-frontend:{{ .Values.tag }}
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 8888
+          volumeMounts:
+          - name: config-volume
+            mountPath: /usr/share/hue/desktop/conf/z-hue.ini
+            subPath: hue-ini
+      volumes:
+        - name: config-volume
+          configMap:
+            name: hue-config
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: hue
+spec:
+  selector:
+    app: hue
+  ports:
+  - name: hue
+    port: 8888
+  type: NodePort
+---
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: hue
+  annotations:
+    kubernetes.io/ingress.class: traefik
+spec:
+  rules:
+  - host: altus.{{ .Values.domain }}
+    http:
+      paths:
+      - path: /
+        backend:
+          serviceName: hue
+          servicePort: hue

+ 72 - 0
tools/kubernetes/helm/frontend/templates/provisioner.yaml

@@ -0,0 +1,72 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: provisioner-config
+data:
+  config: |
+    var config = {};
+    
+    config.helmRepo = "{{ .Values.helmRepo }}";
+    config.registry = "{{ .Values.registry }}";
+    config.registryImpalaTag = "{{ .Values.registryImpalaTag }}";
+
+    module.exports = config;
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: provisioner
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: provisioner
+      # Until Impala has native metric support
+      annotations:
+        prometheus.io/scrape: 'true'
+        prometheus.io/port: '4747'
+    spec:
+      containers:
+        - name: provisioner
+          image: {{ .Values.registry }}/mock-provisioner:{{ .Values.tag }}
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 4747
+          volumeMounts:
+            - name: config-volume
+              mountPath: /app/config
+              subPath: config
+      volumes:
+        - name: config-volume
+          configMap:
+            name: provisioner-config
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: provisioner
+spec:
+  selector:
+    app: provisioner
+  ports:
+  - name: provisioner
+    port: 80
+    targetPort: 4747
+  type: NodePort
+---
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: provisioner
+  annotations:
+    kubernetes.io/ingress.class: traefik
+spec:
+  rules:
+  - host: provisioner.{{ .Values.domain }}
+    http:
+      paths:
+      - path: /
+        backend:
+          serviceName: provisioner
+          servicePort: provisioner

+ 105 - 0
tools/kubernetes/helm/frontend/templates/traefik.yaml

@@ -0,0 +1,105 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: traefik-ingress-controller
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: traefik-ingress-controller
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - services
+      - endpoints
+      - secrets
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - extensions
+    resources:
+      - ingresses
+    verbs:
+      - get
+      - list
+      - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: traefik-ingress-controller
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: traefik-ingress-controller
+subjects:
+- kind: ServiceAccount
+  name: traefik-ingress-controller
+  namespace: {{ .Release.Namespace }}
+---
+kind: Deployment
+apiVersion: extensions/v1beta1
+metadata:
+  name: traefik-ingress-controller
+  labels:
+    app: traefik-ingress-lb
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: traefik-ingress-lb
+  template:
+    metadata:
+      labels:
+        app: traefik-ingress-lb
+        name: traefik-ingress-lb
+    spec:
+      serviceAccountName: traefik-ingress-controller
+      terminationGracePeriodSeconds: 60
+      containers:
+      - image: traefik:v1.6.6
+        name: traefik-ingress-lb
+        ports:
+        - name: http
+          containerPort: 80
+        - name: admin
+          containerPort: 8080
+        args:
+        - --api
+        - --kubernetes
+        - --logLevel=INFO
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: traefik-ingress-service
+spec:
+  selector:
+    app: traefik-ingress-lb
+  ports:
+    - name: web
+      port: 80
+    - name: admin
+      port: 8080
+  type: LoadBalancer
+  #loadBalancerIp: {{ .Values.loadBalancerIp }} 
+---
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: traefik-admin-ui
+  annotations:
+    kubernetes.io/ingress.class: traefik
+spec:
+  rules:
+  - host: traefik.{{ .Values.domain }}
+    http:
+      paths:
+      - path: /
+        backend:
+          serviceName: traefik-ingress-service
+          servicePort: admin

+ 13 - 0
tools/kubernetes/helm/frontend/values.yaml

@@ -0,0 +1,13 @@
+registry: "docker-registry.infra.cloudera.com/cloudera/datawarehouse"
+helmRepo: "http://dataware-1.vpc.cloudera.com:8879"
+tag: "v2"
+registryImpalaTag: "v2"
+loadBalancerIp: "127.0.0.1"
+domain: "127.0.0.1.nip.io"
+hue:
+  replicas: 1
+  awsAccessKeyId: ""
+  awsSecretAccessKey: ""
+  awsRegion: "us-east-1"
+  prometheusHost: "prometheus-server.prometheus"
+  provisionerApi: "http://provisioner"

+ 1 - 0
tools/kubernetes/helm/impala-engine/.helmignore

@@ -0,0 +1 @@
+.DS_Store

+ 5 - 0
tools/kubernetes/helm/impala-engine/Chart.yaml

@@ -0,0 +1,5 @@
+name: impala-engine
+description: Cloudera Data Warehouse Impala Engine
+version: 1.0.0
+appVersion: "3.1.0-cdh6.x"
+apiVersion: v1

+ 98 - 0
tools/kubernetes/helm/impala-engine/README.md

@@ -0,0 +1,98 @@
+# Cloudera Impala Chart
+
+This is a MVP to get an Helm chart to get an Impala compute cluster ported to Kubernetes.
+It provides a basic Impala warehouses creation and autoscaling.
+
+All the interaction can be performed via native `kubectl` commands, or by interacting via the Frontend UI or REST API or command line.
+
+
+## Install
+
+To boot a 'finance' compute warehouse:
+
+```
+helm install impala-engine --set-string name=finance -n finance
+```
+
+Or copy impala-engine/values.yaml and edit and run:
+
+```
+helm install impala-engine -f values.yaml
+```
+
+E.g. by default with only 1 worker:
+
+```
+kubectl get pods
+NAME                                          READY   STATUS    RESTARTS   AGE
+impala-catalog-finance-0                      1/1     Running   0          5s
+impala-coordinator-finance-0                  1/1     Running   0          5s
+impala-statestore-finance-94c8c5f85-9hs9s     1/1     Running   0          5s
+impala-worker-finance-55df4576c8-67s8g        1/1     Running   0          5s
+```
+
+Note:
+
+Not tested yet with an external storage, so the mock storage (local HDFS/HMS) that can be installed via the
+[mock-storage](../mock-storage) chart is recommended.
+
+S3, HDFS, HMS minimal properties are configurable via [values.yaml](values.yaml).
+
+
+## Query
+
+Currently the querying can be done by going directly:
+
+* Opening-up the coordinator and pointing the shell to it
+
+```
+kubectl port-forward svc/impala-coordinator-finance 21000:21000 &
+impala-shell -i localhost
+```
+
+  (for connecting from a remote machine, need to upgrade kubectl to 13.1 to have binding option to --address 0.0.0.0. Note: NodePort might prevent more than one warehouse)
+* Log-in into a worker node and use the impala-shell which is there
+
+```
+kubectl exec -it impala-worker-finance-55df4576c8-67s8g bash
+impala-shell -i impala-coordinator-finance-0
+```
+
+* Install the Frontend chart and use the Web UI app
+
+## Autoscaling
+
+Can be done via native kubectl:
+
+```
+kubectl autoscale deployment impala-worker-finance --min=1 --max=3 --cpu-percent=80
+kubectl delete hpa impala-worker-finance
+```
+
+## Follow-ups
+
+There are a lot of possibilities to iterate on, e.g.:
+
+### All services
+
+* Do we want Services for all pods & ports? (probably only coordinators)
+* Hook-in to Tim's images (integrate basic graceful shutdown scripts...).
+* Iterate on naming convention. HDFS dependency removable
+* Could parameterize more some options (e.g. resource requests, pull image policy...)
+* Running as non root
+* Readiness Probes
+
+### Impala Server
+
+* Could split resource Request & Limit
+* Add HPA YAML config/parameters for autoscaling by default
+
+### Impala Catalog
+
+* Convert to Deployment
+* Need refactoring to get hive-site.xml information from properties
+
+### Impala Statestore
+
+* Convert to Deployment
+* Should be the same as Impala Server

+ 27 - 0
tools/kubernetes/helm/impala-engine/templates/NOTES.txt

@@ -0,0 +1,27 @@
+Congratulations, you've launched one Cloudera Data Warehouse Engine!
+
+To check the status of your installation run:
+
+  helm list {{ .Release.Name }}
+
+Also to double check that all the pods are 'READY':
+
+  kubectl get pods -l 'name in (impala-catalog-{{ .Values.name }},impala-statestore-{{ .Values.name }},impala-coordinator-{{ .Values.name }},impala-worker-{{ .Values.name }})'
+
+You should be able to execute queries via:
+
+  kubectl port-forward svc/impala-coordinator-{{ .Values.name }} 21000:21000 &
+  impala-shell -i localhost
+
+Or directly go into a worker node:
+
+  export WORKER_POD=$(kubectl get pods -l name=impala-worker-{{ .Values.name }} -o jsonpath="{.items[0].metadata.name"})
+  echo kubectl exec -it $WORKER_POD bash
+
+Then launch the shell:
+  
+  impala-shell -i impala-coordinator-{{ .Values.name }}
+
+And type some queries
+
+  SHOW TABLES;

+ 52 - 0
tools/kubernetes/helm/impala-engine/templates/configmap-impala.yaml

@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: impala-config
+data:
+  core-site.xml: |
+    <?xml version="1.0"?>
+    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+    <configuration>
+      <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://{{ .Values.hdfs.namenode.host }}:{{ .Values.hdfs.namenode.port }}</value>
+      </property>
+    </configuration>
+
+  hdfs-site.xml: |
+    <?xml version="1.0"?>
+    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+    <configuration>
+      {{ if .Values.aws.accessKeyId }}
+      <property>
+        <name>fs.s3a.access.key</name>
+        <value>{{ .Values.aws.accessKeyId }}</value>
+      </property>
+      {{ end }}
+      {{ if .Values.aws.accessKeyId }}
+      <property>
+        <name>fs.s3a.secret.key</name>
+        <value>{{ .Values.aws.secretAccessKey }}</value>
+      </property>
+      {{ end }}
+      <property>
+        <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
+        <value>true</value>
+        <description>Needed for Impala</description>
+      </property>
+      <property>
+        <name>dfs.client.file-block-storage-locations.timeout.millis</name>
+        <value>10000</value>
+      </property>
+    </configuration>
+
+  hive-site.xml: |
+    <configuration>
+      <property>
+        <name>hive.metastore.uris</name>
+        <value>{{ .Values.metastore.uris }}</value>
+        <description>IP address (or fully-qualified domain name) and port of the metastore host</description>
+      </property>
+    </configuration>

+ 34 - 0
tools/kubernetes/helm/impala-engine/templates/deployment-statestore.yaml

@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: impala-statestore-{{ .Values.name }}
+  labels:
+    name: impala-statestore-{{ .Values.name }}
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      name: impala-statestore-{{ .Values.name }}
+  template:
+    metadata:
+      labels:
+        name: impala-statestore-{{ .Values.name }}
+    spec:
+      containers:
+        - name: impala-statestore-{{ .Values.name }}
+          image: {{ .Values.registry }}/impala-statestore:{{ .Values.tag }}
+          imagePullPolicy: Always
+          resources:
+            requests:
+              memory: "512Mi"
+              cpu: "1000m"
+            limits:
+              memory: "512Mi"
+              cpu: "1000m"
+          ports:
+            - containerPort: 24000
+              name: im-ss
+              protocol: TCP
+            - containerPort: 25010
+              name: im-ss-web
+              protocol: TCP

+ 65 - 0
tools/kubernetes/helm/impala-engine/templates/deployment-worker.yaml

@@ -0,0 +1,65 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: impala-worker-{{ .Values.name }}
+spec:
+  replicas: {{ .Values.worker.replicas }}
+  selector:
+    matchLabels:
+      name: impala-worker-{{ .Values.name }}
+  template:
+    metadata:
+      labels:
+        name: impala-worker-{{ .Values.name }}
+    spec:
+      hostname: impala-worker-{{ .Values.name }}
+      subdomain: impala-worker-{{ .Values.name }}
+      containers:
+        - name: impala-worker-{{ .Values.name }}
+          image: {{ .Values.registry }}/impala-server:{{ .Values.tag }}
+          command: ['bash', '-c', '/opt/hadoop/run-worker.sh']
+          imagePullPolicy: Always
+          resources:
+            requests:
+              memory: "{{ .Values.worker.memoryRequest }}"
+              cpu: "{{ .Values.worker.cpuRequest }}"
+            limits:
+              memory: "{{ .Values.worker.memoryRequest }}"
+              cpu: "{{ .Values.worker.cpuRequest }}"
+          ports:
+            - containerPort: 21000
+              name: im-sr
+              protocol: TCP
+            - containerPort: 22000
+              name: im-sr2
+              protocol: TCP
+            - containerPort: 23000
+              name: im-sr3
+              protocol: TCP
+            - containerPort: 25000
+              name: im-sr-web
+              protocol: TCP
+          env:
+            - name: IMPALA_STATESTORE
+              value: impala-statestore-{{ .Values.name }}
+            - name: IMPALA_CATALOG
+              value: impala-catalog-{{ .Values.name }}
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+          lifecycle:
+           preStop:
+             exec:
+               command: ["/opt/hadoop/run-graceful-decommission.sh", "impala-coordinator-{{ .Values.name }}", "$POD_NAME"]
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/alternatives/impala-conf/hdfs-site.xml
+              subPath: hdfs-site.xml
+            - name: config-volume
+              mountPath: /etc/alternatives/impala-conf/core-site.xml
+              subPath: core-site.xml
+      volumes:
+        - name: config-volume
+          configMap:
+            name: impala-config

+ 21 - 0
tools/kubernetes/helm/impala-engine/templates/service-catalog.yaml

@@ -0,0 +1,21 @@
+kind: Service
+apiVersion: v1
+metadata:
+  name: impala-catalog-{{ .Values.name }}
+spec:
+  clusterIP: None
+  ports:
+    - port: 26000
+      targetPort: 26000
+      name: im-ct
+      protocol: TCP
+    - port: 24000
+      targetPort: 24000
+      name: im-ss
+      protocol: TCP
+    - port: 25020
+      targetPort: 25020
+      name: im-ct-web
+      protocol: TCP
+  selector:
+    name: impala-catalog-{{ .Values.name }}

+ 24 - 0
tools/kubernetes/helm/impala-engine/templates/service-coordinator.yaml

@@ -0,0 +1,24 @@
+kind: Service
+apiVersion: v1
+metadata:
+  name: impala-coordinator-{{ .Values.name }}
+spec:
+  clusterIP: None
+  ports:
+    - port: 21000
+      name: im-sr
+      protocol: TCP
+    - port: 21050
+      name: thrift
+      protocol: TCP
+    - port: 22000
+      name: state-heartbeat
+      protocol: TCP
+    - port: 23000
+      name: state-heartbeat-topic
+      protocol: TCP
+    - port: 25000
+      name: im-sr-web
+      protocol: TCP
+  selector:
+    name: impala-coordinator-{{ .Values.name }}

+ 16 - 0
tools/kubernetes/helm/impala-engine/templates/service-statestore.yaml

@@ -0,0 +1,16 @@
+kind: Service
+apiVersion: v1
+metadata:
+  name: impala-statestore-{{ .Values.name }}
+spec:
+  ports:
+    - port: 24000
+      targetPort: 24000
+      name: im-ss
+      protocol: TCP
+    - port: 25010
+      targetPort: 25010
+      name: im-ss-web
+      protocol: TCP
+  selector:
+    name: impala-statestore-{{ .Values.name }}

+ 22 - 0
tools/kubernetes/helm/impala-engine/templates/service-worker.yaml

@@ -0,0 +1,22 @@
+kind: Service
+apiVersion: v1
+metadata:
+  name: impala-worker-{{ .Values.name }}
+spec:
+  clusterIP: None
+  ports:
+    - port: 21000
+      targetPort: 21000
+      name: im-sr
+      protocol: TCP
+    - port: 21050
+      name: thrift
+      protocol: TCP
+    - port: 22000
+      name: state-heartbeat
+      protocol: TCP
+    - port: 23000
+      name: state-heartbeat-topic
+      protocol: TCP
+  selector:
+    name: impala-worker-{{ .Values.name }}

+ 56 - 0
tools/kubernetes/helm/impala-engine/templates/statefulset-coordinator.yaml

@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: impala-coordinator-{{ .Values.name }}
+spec:
+  serviceName: impala-coordinator-{{ .Values.name }}
+  replicas: 1
+  selector:
+    matchLabels:
+      name: impala-coordinator-{{ .Values.name }}
+  template:
+    metadata:
+      labels:
+        name: impala-coordinator-{{ .Values.name }}
+    spec:
+      containers:
+        - name: impala-coordinator-{{ .Values.name }}
+          image: {{ .Values.registry }}/impala-server:{{ .Values.tag }}
+          imagePullPolicy: Always
+          command: ['bash', '-c', '/opt/hadoop/run-coordinator.sh']
+          resources:
+            requests:
+              memory: "4096Mi"
+              cpu: "2000m"
+            limits:
+              memory: "4096Mi"
+              cpu: "2000m"
+          ports:
+            - containerPort: 21000
+              name: im-sr
+              protocol: TCP
+            - containerPort: 22000
+              name: im-sr2x
+              protocol: TCP
+            - containerPort: 23000
+              name: im-sr3
+              protocol: TCP
+            - containerPort: 25000
+              name: im-sr-web
+              protocol: TCP
+          env:
+            - name: IMPALA_STATESTORE
+              value: impala-statestore-{{ .Values.name }}
+            - name: IMPALA_CATALOG
+              value: impala-catalog-{{ .Values.name }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/alternatives/impala-conf/hdfs-site.xml
+              subPath: hdfs-site.xml
+            - name: config-volume
+              mountPath: /etc/alternatives/impala-conf/core-site.xml
+              subPath: core-site.xml
+      volumes:
+        - name: config-volume
+          configMap:
+            name: impala-config

+ 53 - 0
tools/kubernetes/helm/impala-engine/templates/statefulset-statestore.yaml

@@ -0,0 +1,53 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: impala-catalog-{{ .Values.name }}
+spec:
+  serviceName: impala-catalog-{{ .Values.name }}
+  replicas: 1
+  selector:
+    matchLabels:
+      name: impala-catalog-{{ .Values.name }}
+  template:
+    metadata:
+      labels:
+        name: impala-catalog-{{ .Values.name }}
+    spec:
+      containers:
+        - name: impala-catalog-{{ .Values.name }}
+          image: {{ .Values.registry }}/impala-catalog:{{ .Values.tag }}
+          imagePullPolicy: Always
+          resources:
+            requests:
+              memory: "1024Mi"
+              cpu: "1500m"
+            limits:
+              memory: "1024Mi"
+              cpu: "1500m"
+          ports:
+            - containerPort: 26000
+              name: im-ct
+              protocol: TCP
+            - containerPort: 24000
+              name: im-ss
+              protocol: TCP
+            - containerPort: 25020
+              name: im-ct-web
+              protocol: TCP
+          env:
+          - name: IMPALA_STATESTORE
+            value: impala-statestore-{{ .Values.name }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/alternatives/impala-conf/hdfs-site.xml
+              subPath: hdfs-site.xml
+            - name: config-volume
+              mountPath: /etc/alternatives/impala-conf/core-site.xml
+              subPath: core-site.xml
+            - name: config-volume
+              mountPath: /etc/alternatives/impala-conf/hive-site.xml
+              subPath: hive-site.xml
+      volumes:
+        - name: config-volume
+          configMap:
+            name: impala-config

+ 17 - 0
tools/kubernetes/helm/impala-engine/values.yaml

@@ -0,0 +1,17 @@
+registry: "docker-registry.infra.cloudera.com/cloudera/datawarehouse"
+tag: "v2"
+name: "datawarehouse"
+worker:
+  cpuRequest: "1000m"
+  memoryRequest: "2048Mi"
+  replicas: 1
+metastore:
+  uris: thrift://hive:9083
+hdfs:
+  namenode:
+    host: "hdfs-namenode"
+    port: 8020
+aws:
+  accessKeyId: ""
+  secretAccessKey: ""
+  region: "us-east-1"

+ 1 - 0
tools/kubernetes/helm/mock-storage/.helmignore

@@ -0,0 +1 @@
+.DS_Store

+ 5 - 0
tools/kubernetes/helm/mock-storage/Chart.yaml

@@ -0,0 +1,5 @@
+name: mock-storage
+description: Cloudera Data Warehouse storage
+version: 1.0.0
+appVersion: "1.0"
+apiVersion: v1

+ 29 - 0
tools/kubernetes/helm/mock-storage/README.md

@@ -0,0 +1,29 @@
+# Cloudera Data Warehouse Mock Storage Chart
+
+This chart schedules a mini HDFS and Hive to run inside the K8s cluster. It enables quick demoing of the Data Warehouse app.
+
+**Long term** is to also provide to use a remote storage and point to it via the [values.yaml](values.yaml) configuration.
+
+## Install
+
+To boot a the Web UI:
+
+```
+helm install mock-storage
+```
+
+Or copy [values.yaml](values.yaml) and edit and run:
+
+```
+helm install mock-storage -f values.yaml
+```
+
+And now you will get:
+
+```
+kubectl get pods
+NAME                                          READY   STATUS    RESTARTS   AGE
+hdfs-datanode-1-0                             1/1     Running   0          13d
+hdfs-namenode-1-0                             1/1     Running   0          13d
+hive-697f49cdbd-ghr6w                         2/2     Running   0          13d
+```

+ 83 - 0
tools/kubernetes/helm/mock-storage/templates/hdfs.yaml

@@ -0,0 +1,83 @@
+kind: Service
+apiVersion: v1
+metadata:
+  name: hdfs-namenode
+spec:
+  ports:
+    - port: 9820
+      targetPort: 9820
+      protocol: TCP
+      name: nm-rpc
+    - port: 9870
+      targetPort: 9870
+      protocol: TCP
+      name: nm-web-ui
+  selector:
+    name: hdfs-namenode-{{ .Values.hdfs.nametag }}
+---
+kind: StatefulSet
+apiVersion: apps/v1beta1
+metadata:
+  name: hdfs-namenode-{{ .Values.hdfs.nametag }}
+spec:
+  serviceName: hdfs-namenode-{{ .Values.hdfs.nametag }}
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        name: hdfs-namenode-{{ .Values.hdfs.nametag }}
+    spec:
+#      initContainers:
+#        - name: hdfs-namenode-format-{{ .Values.hdfs.nametag }}
+#          image: kubernetes:5000/cloudera/hdfs-namenode:{{ .Values.hdfs.tag }}
+#          imagePullPolicy: Always
+#          command: ['bash', '-c', 'if [ ! -f /dfs/nn/current/VERSION ]; then java -Dnn.hostname=`hostname` org.apache.hadoop.hdfs.server.namenode.NameNode -format; fi']
+#          volumeMounts:
+#            - name: hdfs-namenode
+#              mountPath: /dfs/nn
+      containers:
+        - name: hdfs-namenode-{{ .Values.hdfs.nametag }}
+          image: {{ .Values.registry }}/hdfs-namenode:{{ .Values.hdfs.tag }}
+          imagePullPolicy: Always
+          ports:
+              - containerPort: 9820
+                protocol: TCP
+                name: nm-rpc
+              - containerPort: 9870
+                protocol: TCP
+                name: nm-web-ui
+#          volumeMounts:
+#            - name: hdfs-namenode
+#              mountPath: /dfs/nn
+#      volumes:
+#        - name: hdfs-namenode
+#          hostPath:
+#            path: /tmp/dfs/nn-{{ .Values.hdfs.nametag }}
+---
+kind: StatefulSet
+apiVersion: apps/v1beta1
+metadata:
+  name: hdfs-datanode-{{ .Values.hdfs.nametag }}
+spec:
+  serviceName: hdfs-datanode-{{ .Values.hdfs.nametag }}
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        name: hdfs-datanode-{{ .Values.hdfs.nametag }}
+    spec:
+      containers:
+        - name: hdfs-datanode-{{ .Values.hdfs.nametag }}
+          image: {{ .Values.registry }}/hdfs-datanode:{{ .Values.hdfs.tag }}
+          imagePullPolicy: Always
+          ports:
+          - containerPort: 9864
+            protocol: TCP
+            name: dn-web-ui
+          volumeMounts:
+            - name: hdfs-datanode
+              mountPath: /dfs/dn
+      volumes:
+        - name: hdfs-datanode
+          hostPath:
+            path: /tmp/dfs/dn-{{ .Values.hdfs.nametag }}

+ 94 - 0
tools/kubernetes/helm/mock-storage/templates/hive.yaml

@@ -0,0 +1,94 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  name: hive-metastore-db-secrets
+type: Opaque
+data:
+  hive-metastore-db-root-password: {{ .Values.hive.metastore_db_root_password | b64enc }}
+  hive-metastore-db-user: {{ .Values.hive.metastore_db_user | b64enc }}
+  hive-metastore-db-password: {{ .Values.hive.metastore_db_password | b64enc }}
+  hive-metastore-db-name: {{ .Values.hive.metastore_db_name | b64enc }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: hive
+spec:
+  selector:
+    hadoop: hive
+  ports:
+  - protocol: TCP
+    port: 3306
+    targetPort: 3306
+    name: a
+  - protocol: TCP
+    port: 9083
+    targetPort: 9083
+    name: b
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  labels:
+    hadoop: hive
+  name: hive
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      hadoop: hive
+  template:
+    metadata:
+      labels:
+        hadoop: hive
+    spec:
+      restartPolicy: Always
+      containers:
+      - image: mysql:5.7
+        imagePullPolicy: Always
+        name: mysql
+        resources: {}
+        env:
+        - name: MYSQL_ROOT_PASSWORD
+          valueFrom:
+            secretKeyRef:
+              name: hive-metastore-db-secrets
+              key: hive-metastore-db-root-password
+        - name: MYSQL_USER
+          value: metastore
+        - name: MYSQL_PASSWORD
+          valueFrom:
+            secretKeyRef:
+              name: hive-metastore-db-secrets
+              key: hive-metastore-db-password
+        - name: MYSQL_DATABASE
+          valueFrom:
+            secretKeyRef:
+              name: hive-metastore-db-secrets
+              key: hive-metastore-db-name
+      - image: {{ .Values.registry }}/hive-server:{{ .Values.hive.tag }}
+        imagePullPolicy: Always
+        name: hive-server
+        resources: {}
+        command: ['bash', '-c', '/opt/hive/init.sh; hive --service metastore --hiveconf hive.root.logger=INFO,console']
+        env:
+        - name: MYSQL_USER
+          valueFrom:
+            secretKeyRef:
+              name: hive-metastore-db-secrets
+              key: hive-metastore-db-user
+        - name: MYSQL_PASSWORD
+          valueFrom:
+            secretKeyRef:
+              name: hive-metastore-db-secrets
+              key: hive-metastore-db-password
+        - name: MYSQL_DATABASE
+          valueFrom:
+            secretKeyRef:
+              name: hive-metastore-db-secrets
+              key: hive-metastore-db-name
+      volumes:
+        - name: hive-metastore-mysql
+          hostPath:
+            path: /tmp/hive-metastore-mysql2

+ 10 - 0
tools/kubernetes/helm/mock-storage/values.yaml

@@ -0,0 +1,10 @@
+registry: "docker-registry.infra.cloudera.com/cloudera/datawarehouse"
+hdfs:
+  tag: "v2"
+  nametag: "1"
+hive:
+  tag: "v2"
+  metastore_db_name: "metastore"
+  metastore_db_user: "root"
+  metastore_db_password: "hive"
+  metastore_db_root_password: "hive"

+ 41 - 0
tools/kubernetes/services/hue-frontend/Dockerfile

@@ -0,0 +1,41 @@
+FROM ubuntu:16.04
+
+RUN apt-get update -y && apt-get install -y \
+  build-essential \
+  libkrb5-dev \
+  libmysqlclient-dev \
+  libssl-dev \
+  libsasl2-dev \
+  libsasl2-modules-gssapi-mit \
+  libsqlite3-dev \
+  libtidy-0.99-0 \
+  libxml2-dev \
+  libxslt-dev \
+  libffi-dev \
+  libldap2-dev \
+  libpq-dev \
+  python-dev \
+  python-setuptools \
+  libgmp3-dev \
+  libz-dev \
+  software-properties-common \
+  git \
+  sudo \
+  maven \
+   && rm -rf /var/lib/apt/lists/*
+
+RUN add-apt-repository -y ppa:webupd8team/java
+RUN apt-get update -y
+RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
+RUN apt-get install -y oracle-java8-installer
+
+ADD hue /hue
+RUN cd /hue && PREFIX=/usr/share make install
+COPY conf /usr/share/hue/desktop/conf
+EXPOSE 8888
+RUN useradd -ms /bin/bash hue && chown -R hue /usr/share/hue
+WORKDIR  /usr/share/hue
+RUN ./build/env/bin/pip install psycopg2-binary
+# dev tools
+COPY ./startup.sh /usr/share/hue
+CMD ["./startup.sh"]

+ 1922 - 0
tools/kubernetes/services/hue-frontend/conf/hue.ini

@@ -0,0 +1,1922 @@
+# Hue configuration file
+# ===================================
+#
+# For complete documentation about the contents of this file, run
+#   $ <hue_root>/build/env/bin/hue config_help
+#
+# All .ini files under the current directory are treated equally.  Their
+# contents are merged to form the Hue configuration, which can
+# can be viewed on the Hue at
+#   http://<hue_host>:<port>/dump_config
+
+
+###########################################################################
+# General configuration for core Desktop features (authentication, etc)
+###########################################################################
+
+[desktop]
+  # Custom flags for Cloudera DW
+  is_multicluster_only=true
+  is_k8s_only=true
+
+  # Set this to a random string, the longer the better.
+  # This is used for secure hashing in the session store.
+  secret_key=kasdlfjknasdfl3hbaksk3bwkasdfkasdfba23asdf
+
+  # Execute this script to produce the Django secret key. This will be used when
+  # 'secret_key' is not set.
+  ## secret_key_script=
+
+  # Webserver listens on this address and port
+  http_host=0.0.0.0
+  http_port=8888
+
+  # Choose whether to enable the new Hue 4 interface.
+  # is_hue_4=true
+
+  # Choose whether to still allow users to enable the old Hue 3 interface.
+  ## disable_hue_3=true
+
+  # Choose whether the Hue pages are embedded or not. This will improve the rendering of Hue when added inside a
+  # container element.
+  ## is_embedded=false
+
+  # A comma-separated list of available Hue load balancers
+  ## hue_load_balancer=
+
+  # Time zone name
+  time_zone=America/Los_Angeles
+
+  # Enable or disable debug mode.
+  django_debug_mode=false
+
+  # Enable development mode, where notably static files are not cached.
+  ## dev=false
+
+  # Enable embedded development mode, where the page will be rendered inside a container div element.
+  ## dev_embedded=false
+
+  # Enable or disable database debug mode.
+  ## database_logging=false
+
+  # Whether to send debug messages from JavaScript to the server logs.
+  ## send_dbug_messages=false
+
+  # Enable or disable backtrace for server error
+  http_500_debug_mode=false
+
+  # Enable or disable memory profiling.
+  ## memory_profiler=false
+
+  # Enable or disable instrumentation. If django_debug_mode is True, this is automatically enabled
+  ## instrumentation=false
+
+  # Server email for internal error messages
+  ## django_server_email='hue@localhost.localdomain'
+
+  # Email backend
+  ## django_email_backend=django.core.mail.backends.smtp.EmailBackend
+
+  # Set to true to use CherryPy as the webserver, set to false
+  # to use Gunicorn as the webserver. Defaults to CherryPy if
+  # key is not specified.
+  ## use_cherrypy_server=true
+
+  # Gunicorn work class: gevent or evenlet, gthread or sync.
+  ## gunicorn_work_class=eventlet
+
+  # The number of Gunicorn worker processes. If not specified, it uses: (number of CPU * 2) + 1.
+  ## gunicorn_number_of_workers=None
+
+  # Webserver runs as this user
+  ## server_user=hue
+  ## server_group=hue
+
+  # This should be the Hue admin and proxy user
+  ## default_user=hue
+
+  # This should be the hadoop cluster admin
+  ## default_hdfs_superuser=hdfs
+
+  # If set to false, runcpserver will not actually start the web server.
+  # Used if Apache is being used as a WSGI container.
+  ## enable_server=yes
+
+  # Number of threads used by the CherryPy web server
+  ## cherrypy_server_threads=50
+
+  # This property specifies the maximum size of the receive buffer in bytes in thrift sasl communication,
+  # default value is 2097152 (2 MB), which equals to (2 * 1024 * 1024)
+  ## sasl_max_buffer=2097152
+
+  # Hue will try to get the actual host of the Service, even if it resides behind a load balancer.
+  # This will enable an automatic configuration of the service without requiring custom configuration of the service load balancer.
+  # This is available for the Impala service only currently. It is highly recommended to only point to a series of coordinator-only nodes only.
+  # default=false
+
+  # Filename of SSL Certificate
+  ## ssl_certificate=
+
+  # Filename of SSL RSA Private Key
+  ## ssl_private_key=
+
+  # Filename of SSL Certificate Chain
+  ## ssl_certificate_chain=
+
+  # SSL certificate password
+  ## ssl_password=
+
+  # Execute this script to produce the SSL password. This will be used when 'ssl_password' is not set.
+  ## ssl_password_script=
+
+  # X-Content-Type-Options: nosniff This is a HTTP response header feature that helps prevent attacks based on MIME-type confusion.
+  ## secure_content_type_nosniff=true
+
+  # X-Xss-Protection: \"1; mode=block\" This is a HTTP response header feature to force XSS protection.
+  ## secure_browser_xss_filter=true
+
+  # X-Content-Type-Options: nosniff This is a HTTP response header feature that helps prevent attacks based on MIME-type confusion.
+  ## secure_content_security_policy="script-src 'self' 'unsafe-inline' 'unsafe-eval' *.google-analytics.com *.doubleclick.net data:;img-src 'self' *.google-analytics.com *.doubleclick.net http://*.tile.osm.org *.tile.osm.org *.gstatic.com data:;style-src 'self' 'unsafe-inline' fonts.googleapis.com;connect-src 'self';frame-src *;child-src 'self' data: *.vimeo.com;object-src 'none'"
+
+  # Strict-Transport-Security HTTP Strict Transport Security(HSTS) is a policy which is communicated by the server to the user agent via HTTP response header field name "Strict-Transport-Security". HSTS policy specifies a period of time during which the user agent(browser) should only access the server in a secure fashion(https).
+  ## secure_ssl_redirect=False
+  ## secure_redirect_host=0.0.0.0
+  ## secure_redirect_exempt=[]
+  ## secure_hsts_seconds=31536000
+  ## secure_hsts_include_subdomains=true
+
+  # List of allowed and disallowed ciphers in cipher list format.
+  # See http://www.openssl.org/docs/apps/ciphers.html for more information on
+  # cipher list format. This list is from
+  # https://wiki.mozilla.org/Security/Server_Side_TLS v3.7 intermediate
+  # recommendation, which should be compatible with Firefox 1, Chrome 1, IE 7,
+  # Opera 5 and Safari 1.
+  ## ssl_cipher_list=ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS:!DH:!ADH
+
+  # Path to default Certificate Authority certificates.
+  ## ssl_cacerts=/etc/hue/cacerts.pem
+
+  # Choose whether Hue should validate certificates received from the server.
+  ## ssl_validate=true
+
+  # Default LDAP/PAM/.. username and password of the hue user used for authentications with other services.
+  # Inactive if password is empty.
+  # e.g. LDAP pass-through authentication for HiveServer2 or Impala. Apps can override them individually.
+  ## auth_username=hue
+  ## auth_password=
+
+  # Default encoding for site data
+  ## default_site_encoding=utf-8
+
+  # Help improve Hue with anonymous usage analytics.
+  # Use Google Analytics to see how many times an application or specific section of an application is used, nothing more.
+  ## collect_usage=true
+
+  # Tile layer server URL for the Leaflet map charts
+  # Read more on http://leafletjs.com/reference.html#tilelayer
+  # Make sure you add the tile domain to the img-src section of the 'secure_content_security_policy' configuration parameter as well.
+  ## leaflet_tile_layer=http://{s}.tile.osm.org/{z}/{x}/{y}.png
+
+  # The copyright message for the specified Leaflet maps Tile Layer
+  ## leaflet_tile_layer_attribution='&copy; <a href="http://osm.org/copyright">OpenStreetMap</a> contributors'
+
+  # All the map options accordingly to http://leafletjs.com/reference-0.7.7.html#map-options
+  # To change CRS, just use the name, ie. "EPSG4326"
+  ## leaflet_map_options='{}'
+
+  # All the tile layer options, accordingly to http://leafletjs.com/reference-0.7.7.html#tilelayer
+  ## leaflet_tile_layer_options='{}'
+
+  # X-Frame-Options HTTP header value. Use 'DENY' to deny framing completely
+  ## http_x_frame_options=SAMEORIGIN
+
+  # Enable X-Forwarded-Host header if the load balancer requires it.
+  ## use_x_forwarded_host=true
+
+  # Support for HTTPS termination at the load-balancer level with SECURE_PROXY_SSL_HEADER.
+  ## secure_proxy_ssl_header=false
+
+  # Comma-separated list of Django middleware classes to use.
+  # See https://docs.djangoproject.com/en/1.4/ref/middleware/ for more details on middlewares in Django.
+  ## middleware=desktop.auth.backend.LdapSynchronizationBackend
+
+  # Comma-separated list of regular expressions, which match the redirect URL.
+  # For example, to restrict to your local domain and FQDN, the following value can be used:
+  # ^\/.*$,^http:\/\/www.mydomain.com\/.*$
+  ## redirect_whitelist=^(\/[a-zA-Z0-9]+.*|\/)$
+
+  # Comma separated list of apps to not load at server startup.
+  # e.g.: pig,zookeeper
+  app_blacklist=security,rdbms,jobsub,pig,hbase,sqoop,zookeeper,spark,oozie,search
+
+  # Id of the cluster where Hue is located.
+  ## cluster_id='default'
+
+  # Choose whether to show the new SQL editor.
+  # use_new_editor=true
+
+  # Global setting to allow or disable end user downloads in all Hue.
+  # e.g. Query result in Editors and Dashboards, file in File Browser...
+  # enable_download=true
+
+  # Choose whether to enable SQL syntax check or not
+  # enable_sql_syntax_check=true
+
+  # Choose whether to show the improved assist panel and the right context panel
+  # use_new_side_panels=false
+
+  # Choose whether to use new charting library across the whole Hue.
+  # use_new_charts=false
+
+  # Editor autocomplete timeout (ms) when fetching columns, fields, tables etc.
+  # To disable this type of autocompletion set the value to 0.
+  ## editor_autocomplete_timeout=30000
+
+  # Enable saved default configurations for Hive, Impala, Spark, and Oozie.
+  ## use_default_configuration=false
+
+  # The directory where to store the auditing logs. Auditing is disable if the value is empty.
+  # e.g. /var/log/hue/audit.log
+  ## audit_event_log_dir=
+
+  # Size in KB/MB/GB for audit log to rollover.
+  ## audit_log_max_file_size=100MB
+
+  # Timeout in seconds for REST calls.
+  ## rest_conn_timeout=120
+
+  # A json file containing a list of log redaction rules for cleaning sensitive data
+  # from log files. It is defined as:
+  #
+  # {
+  #   "version": 1,
+  #   "rules": [
+  #     {
+  #       "description": "This is the first rule",
+  #       "trigger": "triggerstring 1",
+  #       "search": "regex 1",
+  #       "replace": "replace 1"
+  #     },
+  #     {
+  #       "description": "This is the second rule",
+  #       "trigger": "triggerstring 2",
+  #       "search": "regex 2",
+  #       "replace": "replace 2"
+  #     }
+  #   ]
+  # }
+  #
+  # Redaction works by searching a string for the [TRIGGER] string. If found,
+  # the [REGEX] is used to replace sensitive information with the
+  # [REDACTION_MASK].  If specified with 'log_redaction_string', the
+  # 'log_redaction_string' rules will be executed after the
+  # 'log_redaction_file' rules.
+  #
+  # For example, here is a file that would redact passwords and social security numbers:
+
+  # {
+  #   "version": 1,
+  #   "rules": [
+  #     {
+  #       "description": "Redact passwords",
+  #       "trigger": "password",
+  #       "search": "password=\".*\"",
+  #       "replace": "password=\"???\""
+  #     },
+  #     {
+  #       "description": "Redact social security numbers",
+  #       "trigger": "",
+  #       "search": "\d{3}-\d{2}-\d{4}",
+  #       "replace": "XXX-XX-XXXX"
+  #     }
+  #   ]
+  # }
+  ## log_redaction_file=
+
+  # Comma separated list of strings representing the host/domain names that the Hue server can serve.
+  # e.g.: localhost,domain1,*
+  allowed_hosts="*"
+
+  # Allow use django debug tool with Chrome browser for debugging issue, django_debug_mode must be true also
+  ## enable_django_debug_tool=false
+
+  # Comma separated list of users' username that allow to use django debug tool. If it is empty, all users are allowed.
+  ## django_debug_tool_users=
+
+  # Number of characters in rest api reponse calls to dump to the logs when debug is enabled. Set to -1 for entire response.
+  ## rest_response_size=2000
+
+  # Administrators
+  # ----------------
+  [[django_admins]]
+    ## [[[admin1]]]
+    ## name=john
+    ## email=john@doe.com
+
+  # UI customizations
+  # -------------------
+  [[custom]]
+
+    # Top banner HTML code
+    # e.g. <H4>Test Lab A2 Hue Services</H4>
+    ## banner_top_html='<div style="padding: 4px; text-align: center; background-color: #003F6C; color: #DBE8F1">This is Hue 4 Beta! - Please feel free to email any feedback / questions to <a href="mailto:team@gethue.com" target="_blank" style="color: #FFF; font-weight: bold">team@gethue.com</a> or <a href="https://twitter.com/gethue" target="_blank" style="color: #FFF; font-weight: bold">@gethue</a>.</div>'
+
+    # Login splash HTML code
+    # e.g. WARNING: You are required to have authorization before you proceed
+    ## login_splash_html=<h4>GetHue.com</h4><br/><br/>WARNING: You have accessed a computer managed by GetHue. You are required to have authorization from GetHue before you proceed.
+
+    # Cache timeout in milliseconds for the assist, autocomplete, etc.
+    # defaults to 10 days, set to 0 to disable caching
+    ## cacheable_ttl=864000000
+
+    # SVG code to replace the default Hue logo in the top bar and sign in screen
+    # e.g. <image xlink:href="/static/desktop/art/hue-logo-mini-white.png" x="0" y="0" height="40" width="160" />
+    ## logo_svg=
+
+  # Configuration options for user authentication into the web application
+  # ------------------------------------------------------------------------
+  [[auth]]
+
+    # Authentication backend. Common settings are:
+    # - django.contrib.auth.backends.ModelBackend (entirely Django backend)
+    # - desktop.auth.backend.AllowAllBackend (allows everyone)
+    # - desktop.auth.backend.AllowFirstUserDjangoBackend
+    #     (Default. Relies on Django and user manager, after the first login)
+    # - desktop.auth.backend.LdapBackend
+    # - desktop.auth.backend.PamBackend
+    # - desktop.auth.backend.SpnegoDjangoBackend
+    # - desktop.auth.backend.RemoteUserDjangoBackend
+    # - libsaml.backend.SAML2Backend
+    # - libopenid.backend.OpenIDBackend
+    # - liboauth.backend.OAuthBackend
+    # - desktop.auth.backend.OIDCBackend
+    #     (New oauth, support Twitter, Facebook, Google+ and Linkedin
+    # Multiple Authentication backends are supported by specifying a comma-separated list in order of priority.
+    # However, in order to enable OAuthBackend, it must be the ONLY backend configured.
+    ## backend=desktop.auth.backend.AllowFirstUserDjangoBackend
+
+    # Class which defines extra accessor methods for User objects.
+    ## user_aug=desktop.auth.backend.DefaultUserAugmentor
+
+    # The service to use when querying PAM.
+    ## pam_service=login
+
+    # When using the desktop.auth.backend.RemoteUserDjangoBackend, this sets
+    # the normalized name of the header that contains the remote user.
+    # The HTTP header in the request is converted to a key by converting
+    # all characters to uppercase, replacing any hyphens with underscores
+    # and adding an HTTP_ prefix to the name. So, for example, if the header
+    # is called Remote-User that would be configured as HTTP_REMOTE_USER
+    #
+    # Defaults to HTTP_REMOTE_USER
+    ## remote_user_header=HTTP_REMOTE_USER
+
+    # Ignore the case of usernames when searching for existing users.
+    # Supported in remoteUserDjangoBackend and SpnegoDjangoBackend
+    ## ignore_username_case=true
+
+    # Forcibly cast usernames to lowercase, takes precedence over force_username_uppercase
+    # Supported in remoteUserDjangoBackend and SpnegoDjangoBackend
+    ## force_username_lowercase=true
+
+    # Forcibly cast usernames to uppercase, cannot be combined with force_username_lowercase
+    ## force_username_uppercase=false
+
+    # Users will expire after they have not logged in for 'n' amount of seconds.
+    # A negative number means that users will never expire.
+    ## expires_after=-1
+
+    # Apply 'expires_after' to superusers.
+    ## expire_superusers=true
+
+    # Users will automatically be logged out after 'n' seconds of inactivity.
+    # A negative number means that idle sessions will not be timed out.
+    idle_session_timeout=-1
+
+    # Force users to change password on first login with desktop.auth.backend.AllowFirstUserDjangoBackend
+    ## change_default_password=false
+
+    # Number of login attempts allowed before a record is created for failed logins
+    ## login_failure_limit=3
+
+    # After number of allowed login attempts are exceeded, do we lock out this IP and optionally user agent?
+    ## login_lock_out_at_failure=false
+
+    # If set, defines period of inactivity in hours after which failed logins will be forgotten.
+    # A value of 0 or None will disable this check. Default: None
+    ## login_cooloff_time=None
+
+    # If True, lock out based on an IP address AND a user agent.
+    # This means requests from different user agents but from the same IP are treated differently.
+    ## login_lock_out_use_user_agent=false
+
+    # If True, lock out based on IP and user
+    ## login_lock_out_by_combination_user_and_ip=false
+
+    # If True, it will look for the IP address from the header defined at reverse_proxy_header.
+    ## behind_reverse_proxy=false
+
+    # If behind_reverse_proxy is True, it will look for the IP address from this header. Default: HTTP_X_FORWARDED_FOR
+    ## reverse_proxy_header=HTTP_X_FORWARDED_FOR
+
+  # Configuration options for connecting to LDAP and Active Directory
+  # -------------------------------------------------------------------
+  [[ldap]]
+
+    # The search base for finding users and groups
+    ## base_dn="DC=mycompany,DC=com"
+
+    # URL of the LDAP server
+    ## ldap_url=ldap://auth.mycompany.com
+
+    # The NT domain used for LDAP authentication
+    ## nt_domain=mycompany.com
+
+    # A PEM-format file containing certificates for the CA's that
+    # Hue will trust for authentication over TLS.
+    # The certificate for the CA that signed the
+    # LDAP server certificate must be included among these certificates.
+    # See more here http://www.openldap.org/doc/admin24/tls.html.
+    ## ldap_cert=
+    ## use_start_tls=true
+
+    # Distinguished name of the user to bind as -- not necessary if the LDAP server
+    # supports anonymous searches
+    ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
+
+    # Password of the bind user -- not necessary if the LDAP server supports
+    # anonymous searches
+    ## bind_password=
+
+    # Execute this script to produce the bind user password. This will be used
+    # when 'bind_password' is not set.
+    ## bind_password_script=
+
+    # Pattern for searching for usernames -- Use <username> for the parameter
+    # For use when using LdapBackend for Hue authentication
+    ## ldap_username_pattern="uid=<username>,ou=People,dc=mycompany,dc=com"
+
+    # Create users in Hue when they try to login with their LDAP credentials
+    # For use when using LdapBackend for Hue authentication
+    ## create_users_on_login = true
+
+    # Synchronize a users groups when they login
+    ## sync_groups_on_login=true
+
+    # A comma-separated list of Ldap groups with users that can login
+    ## login_groups=
+
+    # Ignore the case of usernames when searching for existing users in Hue.
+    ## ignore_username_case=true
+
+    # Force usernames to lowercase when creating new users from LDAP.
+    # Takes precedence over force_username_uppercase
+    ## force_username_lowercase=true
+
+    # Force usernames to uppercase, cannot be combined with force_username_lowercase
+    ## force_username_uppercase=false
+
+    # Use search bind authentication.
+    ## search_bind_authentication=true
+
+    # Choose which kind of subgrouping to use: nested or suboordinate (deprecated).
+    ## subgroups=suboordinate
+
+    # Define the number of levels to search for nested members.
+    ## nested_members_search_depth=10
+
+    # Whether or not to follow referrals
+    ## follow_referrals=false
+
+    # Enable python-ldap debugging.
+    ## debug=false
+
+    # Sets the debug level within the underlying LDAP C lib.
+    ## debug_level=255
+
+    # Possible values for trace_level are 0 for no logging, 1 for only logging the method calls with arguments,
+    # 2 for logging the method calls with arguments and the complete results and 9 for also logging the traceback of method calls.
+    ## trace_level=0
+
+    [[[users]]]
+
+      # Base filter for searching for users
+      ## user_filter="objectclass=*"
+
+      # The username attribute in the LDAP schema
+      ## user_name_attr=sAMAccountName
+
+    [[[groups]]]
+
+      # Base filter for searching for groups
+      ## group_filter="objectclass=*"
+
+      # The group name attribute in the LDAP schema
+      ## group_name_attr=cn
+
+      # The attribute of the group object which identifies the members of the group
+      ## group_member_attr=members
+
+    [[[ldap_servers]]]
+
+      ## [[[[mycompany]]]]
+
+        # The search base for finding users and groups
+        ## base_dn="DC=mycompany,DC=com"
+
+        # URL of the LDAP server
+        ## ldap_url=ldap://auth.mycompany.com
+
+        # The NT domain used for LDAP authentication
+        ## nt_domain=mycompany.com
+
+        # A PEM-format file containing certificates for the CA's that
+        # Hue will trust for authentication over TLS.
+        # The certificate for the CA that signed the
+        # LDAP server certificate must be included among these certificates.
+        # See more here http://www.openldap.org/doc/admin24/tls.html.
+        ## ldap_cert=
+        ## use_start_tls=true
+
+        # Distinguished name of the user to bind as -- not necessary if the LDAP server
+        # supports anonymous searches
+        ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
+
+        # Password of the bind user -- not necessary if the LDAP server supports
+        # anonymous searches
+        ## bind_password=
+
+        # Execute this script to produce the bind user password. This will be used
+        # when 'bind_password' is not set.
+        ## bind_password_script=
+
+        # Pattern for searching for usernames -- Use <username> for the parameter
+        # For use when using LdapBackend for Hue authentication
+        ## ldap_username_pattern="uid=<username>,ou=People,dc=mycompany,dc=com"
+
+        ## Use search bind authentication.
+        ## search_bind_authentication=true
+
+        # Whether or not to follow referrals
+        ## follow_referrals=false
+
+        # Enable python-ldap debugging.
+        ## debug=false
+
+        # Sets the debug level within the underlying LDAP C lib.
+        ## debug_level=255
+
+        # Possible values for trace_level are 0 for no logging, 1 for only logging the method calls with arguments,
+        # 2 for logging the method calls with arguments and the complete results and 9 for also logging the traceback of method calls.
+        ## trace_level=0
+
+        ## [[[[[users]]]]]
+
+          # Base filter for searching for users
+          ## user_filter="objectclass=Person"
+
+          # The username attribute in the LDAP schema
+          ## user_name_attr=sAMAccountName
+
+        ## [[[[[groups]]]]]
+
+          # Base filter for searching for groups
+          ## group_filter="objectclass=groupOfNames"
+
+          # The username attribute in the LDAP schema
+          ## group_name_attr=cn
+
+  # Configuration options for specifying the Source Version Control.
+  # ----------------------------------------------------------------
+  [[vcs]]
+
+  ## [[[git-read-only]]]
+      ## Base URL to Remote Server
+      # remote_url=https://github.com/cloudera/hue/tree/master
+
+      ## Base URL to Version Control API
+      # api_url=https://api.github.com
+  ## [[[github]]]
+
+      ## Base URL to Remote Server
+      # remote_url=https://github.com/cloudera/hue/tree/master
+
+      ## Base URL to Version Control API
+      # api_url=https://api.github.com
+
+      # These will be necessary when you want to write back to the repository.
+      ## Client ID for Authorized Application
+      # client_id=
+
+      ## Client Secret for Authorized Application
+      # client_secret=
+  ## [[[svn]]
+      ## Base URL to Remote Server
+      # remote_url=https://github.com/cloudera/hue/tree/master
+
+      ## Base URL to Version Control API
+      # api_url=https://api.github.com
+
+      # These will be necessary when you want to write back to the repository.
+      ## Client ID for Authorized Application
+      # client_id=
+
+      ## Client Secret for Authorized Application
+      # client_secret=
+
+  # Configuration options for specifying the Desktop Database. For more info,
+  # see http://docs.djangoproject.com/en/1.4/ref/settings/#database-engine
+  # ------------------------------------------------------------------------
+  [[database]]
+    # Database engine is typically one of:
+    # postgresql_psycopg2, mysql, sqlite3 or oracle.
+    #
+    # Note that for sqlite3, 'name', below is a path to the filename. For other backends, it is the database name
+    # Note for Oracle, options={"threaded":true} must be set in order to avoid crashes.
+    # Note for Oracle, you can use the Oracle Service Name by setting "host=" and "port=" and then "name=<host>:<port>/<service_name>".
+    # Note for MariaDB use the 'mysql' engine.
+    engine=postgresql_psycopg2
+    host=hue-postgres
+    port=5432
+    user=hue
+    password=hue
+    name=hue
+    # conn_max_age option to make database connection persistent value in seconds
+    # https://docs.djangoproject.com/en/1.9/ref/databases/#persistent-connections
+    ## conn_max_age=0
+    # Execute this script to produce the database password. This will be used when 'password' is not set.
+    ## password_script=/path/script
+    ## name=desktop/desktop.db
+    ## options={}
+    # Database schema, to be used only when public schema is revoked in postgres
+    ## schema=public
+
+  # Configuration options for specifying the Desktop session.
+  # For more info, see https://docs.djangoproject.com/en/1.4/topics/http/sessions/
+  # ------------------------------------------------------------------------
+  [[session]]
+    # The name of the cookie to use for sessions.
+    # This can have any value that is not used by the other cookie names in your application.
+    ## cookie_name=sessionid
+
+    # The cookie containing the users' session ID will expire after this amount of time in seconds.
+    # Default is 2 weeks.
+    ## ttl=1209600
+
+    # The cookie containing the users' session ID and csrf cookie will be secure.
+    # Should only be enabled with HTTPS.
+    ## secure=false
+
+    # The cookie containing the users' session ID and csrf cookie will use the HTTP only flag.
+    ## http_only=true
+
+    # Use session-length cookies. Logs out the user when she closes the browser window.
+    ## expire_at_browser_close=false
+
+    # If set, limits the number of concurrent user sessions. 1 represents 1 session per user. Default: 0 (unlimited sessions per user)
+    ## concurrent_user_session_limit=0
+
+  # Configuration options for connecting to an external SMTP server
+  # ------------------------------------------------------------------------
+  [[smtp]]
+
+    # The SMTP server information for email notification delivery
+    host=localhost
+    port=25
+    user=
+    password=
+
+    # Whether to use a TLS (secure) connection when talking to the SMTP server
+    tls=no
+
+    # Default email address to use for various automated notification from Hue
+    ## default_from_email=hue@localhost
+
+
+  # Configuration options for Kerberos integration for secured Hadoop clusters
+  # ------------------------------------------------------------------------
+  [[kerberos]]
+
+    # Path to Hue's Kerberos keytab file
+    ## hue_keytab=
+    # Kerberos principal name for Hue
+    ## hue_principal=hue/hostname.foo.com
+    # Frequency in seconds with which Hue will renew its keytab
+    ## keytab_reinit_frequency=3600
+    # Path to keep Kerberos credentials cached
+    ## ccache_path=/var/run/hue/hue_krb5_ccache
+    # Path to kinit
+    ## kinit_path=/path/to/kinit
+
+    # Mutual authentication from the server, attaches HTTP GSSAPI/Kerberos Authentication to the given Request object
+    ## mutual_authentication="OPTIONAL" or "REQUIRED" or "DISABLED"
+
+  # Configuration options for using OAuthBackend (Core) login
+  # ------------------------------------------------------------------------
+  [[oauth]]
+    # The Consumer key of the application
+    ## consumer_key=XXXXXXXXXXXXXXXXXXXXX
+
+    # The Consumer secret of the application
+    ## consumer_secret=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+
+    # The Request token URL
+    ## request_token_url=https://api.twitter.com/oauth/request_token
+
+    # The Access token URL
+    ## access_token_url=https://api.twitter.com/oauth/access_token
+
+    # The Authorize URL
+    ## authenticate_url=https://api.twitter.com/oauth/authorize
+
+  # Configuration options for using OIDCBackend (Core) login for SSO
+  # ------------------------------------------------------------------------
+  [[oidc]]
+    # The client ID as relay party set in OpenID provider
+    ## oidc_rp_client_id=XXXXXXXXXXXXXXXXXXXXX
+
+    # The client secret as relay party set in OpenID provider
+    ## oidc_rp_client_secret=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+
+    # The OpenID provider authoriation endpoint
+    ## oidc_op_authorization_endpoint=https://keycloak.example.com/auth/realms/Cloudera/protocol/openid-connect/auth
+
+    # The OpenID provider token endpoint
+    ## oidc_op_token_endpoint=https://keycloak.example.com/auth/realms/cloudera/protocol/openid-connect/token
+
+    # The OpenID provider user info endpoint
+    ## oidc_op_user_endpoint=https://keycloak.example.com/auth/realms/cloudera/protocol/openid-connect/userinfo
+
+    # The OpenID provider signing key in PEM or DER format
+    ## oidc_rp_idp_sign_key=/path/to/key_file
+
+    # The OpenID provider authoriation endpoint
+    ## oidc_op_jwks_endpoint=https://keycloak.example.com/auth/realms/Cloudera/protocol/openid-connect/certs
+
+    # Whether Hue as OpenID Connect client verify SSL cert
+    ## oidc_verify_ssl=true
+
+    # As relay party Hue URL path to redirect to after login
+    ## login_redirect_url=https://localhost:8888/oidc/callback/
+
+    # The OpenID provider URL path to redirect to after logout
+    ## logout_redirect_url=https://keycloak.example.com/auth/realms/cloudera/protocol/openid-connect/logout
+
+    # As relay party Hue URL path to redirect to after login
+    ## login_redirect_url_failure=https://localhost:8888/hue/oidc_failed/
+
+    # Create a new user from OpenID Connect on login if it doesn't exist
+    ## create_users_on_login=true
+
+    # The group of users will be created and updated as superuser. To use this feature, setup in Keycloak:
+    # 1. add the name of the group here
+    # 2. in Keycloak, go to your_realm --> your_clients --> Mappers, add a mapper
+    #      Mapper Type: Group Membership (this is predefined mapper type)
+    #      Token Claim Name: group_membership (required exact string)
+    ## superuser_group=hue_superusers
+
+  # Configuration options for Metrics
+  # ------------------------------------------------------------------------
+  [[metrics]]
+
+   # Enable the metrics URL "/desktop/metrics"
+   ## enable_web_metrics=True
+
+   # If specified, Hue will write metrics to this file.
+   ## location=/var/log/hue/metrics.json
+
+   # Time in milliseconds on how frequently to collect metrics
+   ## collection_interval=30000
+
+
+[[clusters]]
+    [[[engines]]]
+      type=altus-engines
+      hostname=...
+      auth_key_id=...
+      auth_key_secret=...
+      
+    [[[AltusV2]]]
+      type=altusv2
+      hostname=...
+      auth_key_id=...
+      auth_key_secret=...
+
+###########################################################################
+# Settings to configure the snippets available in the Notebook
+###########################################################################
+
+[notebook]
+
+  ## Show the notebook menu or not
+  # show_notebooks=true
+
+  ## Flag to enable the selection of queries from files, saved queries into the editor or as snippet.
+  # enable_external_statements=true
+
+  ## Flag to enable the bulk submission of queries as a background task through Oozie.
+  # enable_batch_execute=true
+
+  ## Flag to turn on the SQL indexer.
+  # enable_sql_indexer=false
+
+  ## Flag to turn on the Presentation mode of the editor.
+  # enable_presentation=true
+
+  ## Flag to enable the SQL query builder of the table assist.
+  enable_query_builder=false
+
+  ## Flag to enable the creation of a coordinator for the current SQL query.
+  # enable_query_scheduling=false
+
+  ## Main flag to override the automatic starting of the DBProxy server.
+  # enable_dbproxy_server=true
+
+  ## Classpath to be appended to the default DBProxy server classpath.
+  # dbproxy_extra_classpath=
+
+  ## Comma separated list of interpreters that should be shown on the wheel. This list takes precedence over the
+  ## order in which the interpreter entries appear. Only the first 5 interpreters will appear on the wheel.
+  # interpreters_shown_on_wheel=
+
+  enable_query_analysis=true
+
+  # One entry for each type of snippet.
+  [[interpreters]]
+    # Define the name and how to connect and execute the language.
+
+    # [[[hive]]]
+    #   # The name of the snippet.
+    #   name=Hive
+    #   # The backend connection to use to communicate with the server.
+    #   interface=hiveserver2
+
+    [[[impala]]]
+      name=Impala
+      interface=hiveserver2
+
+    # [[[sparksql]]]
+    #   name=SparkSql
+    #   interface=hiveserver2
+
+    # [[[spark]]]
+    #  name=Scala
+    #  interface=livy
+
+    # [[[pyspark]]]
+    #  name=PySpark
+    #  interface=livy
+
+    # [[[r]]]
+    #  name=R
+    #  interface=livy
+
+    # [[[jar]]]
+    # name=Spark Submit Jar
+    # interface=livy-batch
+
+    # [[[py]]]
+    # name=Spark Submit Python
+    # interface=livy-batch
+
+    # [[[text]]]
+    # name=Text
+    # interface=text
+
+    # [[[markdown]]]
+    #  name=Markdown
+    #  interface=text
+
+    # [[[mysql]]]
+    #  name = MySQL
+    #  interface=rdbms
+
+    # [[[sqlite]]]
+    #  name = SQLite
+    #  interface=rdbms
+
+    # [[[postgresql]]]
+    #  name = PostgreSQL
+    #  interface=rdbms
+
+    # [[[oracle]]]
+    #  name = Oracle
+    #  interface=rdbms
+
+    # [[[solr]]]
+    #  name = Solr SQL
+    #  interface=solr
+    #  ## Name of the collection handler
+    #  # options='{"collection": "default"}'
+
+    # [[[pig]]]
+    #  name=Pig
+    #  interface=oozie
+
+    # [[[java]]]
+    #  name=Java
+    #  interface=oozie
+
+    # [[[spark2]]]
+    #  name=Spark
+    #  interface=oozie
+
+    # [[[mapreduce]]]
+    #  name=MapReduce
+    #  interface=oozie
+
+    # [[[sqoop1]]]
+    #  name=Sqoop1
+    #  interface=oozie
+
+    # [[[distcp]]]
+    #  name=Distcp
+    #  interface=oozie
+
+    # [[[shell]]]
+    #  name=Shell
+    #  interface=oozie
+
+    # [[[mysql]]]
+    #   name=MySql JDBC
+    #   interface=jdbc
+    #   ## Specific options for connecting to the server.
+    #   ## The JDBC connectors, e.g. mysql.jar, need to be in the CLASSPATH environment variable.
+    #   ## If 'user' and 'password' are omitted, they will be prompted in the UI.
+    #   ## Option 'impersonation_property' used to configure outbound impersonation, e.g. "impersonation_property": "hive.server2.proxy.user".
+    #   options='{"url": "jdbc:mysql://localhost:3306/hue", "driver": "com.mysql.jdbc.Driver", "user": "root", "password": "root", "impersonation_property": ""}'
+
+
+###########################################################################
+# Settings to configure your Analytics Dashboards
+###########################################################################
+
+[dashboard]
+
+  # Activate the Dashboard link in the menu.
+  ## is_enabled=true
+
+  # Activate the SQL Dashboard (beta).
+  ## has_sql_enabled=false
+
+  # Activate the Query Builder (beta).
+  ## has_query_builder_enabled=false
+
+  # Activate the static report layout (beta).
+  ## has_report_enabled=false
+
+  # Activate the new grid layout system.
+  ## use_gridster=true
+
+  # Activate the widget filter and comparison (beta).
+  ## has_widget_filter=false
+
+  # Activate the tree widget (to drill down fields as dimensions, alpha).
+  ## has_tree_widget=false
+
+  [[engines]]
+
+    #  [[[solr]]]
+    #  Requires Solr 6+
+    ##  analytics=true
+    ##  nesting=false
+
+    #  [[[sql]]]
+    ##  analytics=true
+    ##  nesting=false
+
+
+###########################################################################
+# Settings to configure your Hadoop cluster.
+###########################################################################
+
+[hadoop]
+
+  # Configuration for HDFS NameNode
+  # ------------------------------------------------------------------------
+  [[hdfs_clusters]]
+    # HA support by using HttpFs
+
+    [[[default]]]
+      # Enter the filesystem uri
+      fs_defaultfs=hdfs://localhost:8020
+
+      # NameNode logical name.
+      ## logical_name=
+
+      # Use WebHdfs/HttpFs as the communication mechanism.
+      # Domain should be the NameNode or HttpFs host.
+      # Default port is 14000 for HttpFs.
+      ## webhdfs_url=http://localhost:50070/webhdfs/v1
+
+      # Change this if your HDFS cluster is Kerberos-secured
+      ## security_enabled=false
+
+      # In secure mode (HTTPS), if SSL certificates from YARN Rest APIs
+      # have to be verified against certificate authority
+      ## ssl_cert_ca_verify=True
+
+      # Directory of the Hadoop configuration
+      ## hadoop_conf_dir=$HADOOP_CONF_DIR when set or '/etc/hadoop/conf'
+
+  # Configuration for YARN (MR2)
+  # ------------------------------------------------------------------------
+  [[yarn_clusters]]
+
+    [[[default]]]
+      # Enter the host on which you are running the ResourceManager
+      ## resourcemanager_host=localhost
+
+      # The port where the ResourceManager IPC listens on
+      ## resourcemanager_port=8032
+
+      # Whether to submit jobs to this cluster
+      submit_to=False
+
+      # Resource Manager logical name (required for HA)
+      ## logical_name=
+
+      # Change this if your YARN cluster is Kerberos-secured
+      ## security_enabled=false
+
+      # URL of the ResourceManager API
+      ## resourcemanager_api_url=http://localhost:8088
+
+      # URL of the ProxyServer API
+      ## proxy_api_url=http://localhost:8088
+
+      # URL of the HistoryServer API
+      ## history_server_api_url=http://localhost:19888
+
+      # URL of the Spark History Server
+      ## spark_history_server_url=http://localhost:18088
+
+      # In secure mode (HTTPS), if SSL certificates from YARN Rest APIs
+      # have to be verified against certificate authority
+      ## ssl_cert_ca_verify=True
+
+    # HA support by specifying multiple clusters.
+    # Redefine different properties there.
+    # e.g.
+
+    # [[[ha]]]
+      # Resource Manager logical name (required for HA)
+      ## logical_name=my-rm-name
+
+      # Un-comment to enable
+      ## submit_to=True
+
+      # URL of the ResourceManager API
+      ## resourcemanager_api_url=http://localhost:8088
+
+      # ...
+
+
+###########################################################################
+# Settings to configure Beeswax with Hive
+###########################################################################
+
+[beeswax]
+
+  # Host where HiveServer2 is running.
+  # If Kerberos security is enabled, use fully-qualified domain name (FQDN).
+  ## hive_server_host=localhost
+
+  # Port where HiveServer2 Thrift server runs on.
+  ## hive_server_port=10000
+
+  # Hive configuration directory, where hive-site.xml is located
+  ## hive_conf_dir=/etc/hive/conf
+
+  # Timeout in seconds for thrift calls to Hive service
+  ## server_conn_timeout=120
+
+  # Choose whether to use the old GetLog() thrift call from before Hive 0.14 to retrieve the logs.
+  # If false, use the FetchResults() thrift call from Hive 1.0 or more instead.
+  ## use_get_log_api=false
+
+  # Limit the number of partitions that can be listed.
+  ## list_partitions_limit=10000
+
+  # The maximum number of partitions that will be included in the SELECT * LIMIT sample query for partitioned tables.
+  ## query_partitions_limit=10
+
+  # A limit to the number of rows that can be downloaded from a query before it is truncated.
+  # A value of -1 means there will be no limit.
+  ## download_row_limit=100000
+
+  # A limit to the number of bytes that can be downloaded from a query before it is truncated.
+  # A value of -1 means there will be no limit.
+  ## download_bytes_limit=-1
+
+  # Hue will try to close the Hive query when the user leaves the editor page.
+  # This will free all the query resources in HiveServer2, but also make its results inaccessible.
+  ## close_queries=false
+
+  # Hue will use at most this many HiveServer2 sessions per user at a time.
+  # For Tez, increase the number to more if you need more than one query at the time, e.g. 2 or 3 (Tez has a maximum of 1 query by session).
+  ## max_number_of_sessions=1
+
+  # Thrift version to use when communicating with HiveServer2.
+  # New column format is from version 7.
+  ## thrift_version=7
+
+  # A comma-separated list of white-listed Hive configuration properties that users are authorized to set.
+  ## config_whitelist=hive.map.aggr,hive.exec.compress.output,hive.exec.parallel,hive.execution.engine,mapreduce.job.queuename
+
+  # Override the default desktop username and password of the hue user used for authentications with other services.
+  # e.g. Used for LDAP/PAM pass-through authentication.
+  ## auth_username=hue
+  ## auth_password=
+
+  [[ssl]]
+    # Path to Certificate Authority certificates.
+    ## cacerts=/etc/hue/cacerts.pem
+
+    # Choose whether Hue should validate certificates received from the server.
+    ## validate=true
+
+
+###########################################################################
+# Settings to configure Metastore
+###########################################################################
+
+[metastore]
+  # Flag to turn on the new version of the create table wizard.
+  ## enable_new_create_table=true
+
+  # Flag to force all metadata calls (e.g. list tables, table or column details...) to happen via HiveServer2 if available instead of Impala.
+  ## force_hs2_metadata=false
+
+
+###########################################################################
+# Settings to configure Impala
+###########################################################################
+
+[impala]
+  # Host of the Impala Server (one of the Impalad)
+  ## server_host=localhost
+
+  # Port of the Impala Server
+  ## server_port=21050
+
+  # Kerberos principal
+  ## impala_principal=impala/hostname.foo.com
+
+  # Turn on/off impersonation mechanism when talking to Impala
+  ## impersonation_enabled=False
+
+  # Number of initial rows of a result set to ask Impala to cache in order
+  # to support re-fetching them for downloading them.
+  # Set to 0 for disabling the option and backward compatibility.
+  ## querycache_rows=50000
+
+  # Timeout in seconds for thrift calls
+  ## server_conn_timeout=120
+
+  # Hue will try to close the Impala query when the user leaves the editor page.
+  # This will free all the query resources in Impala, but also make its results inaccessible.
+  ## close_queries=true
+
+  # If > 0, the query will be timed out (i.e. cancelled) if Impala does not do any work
+  # (compute or send back results) for that query within QUERY_TIMEOUT_S seconds.
+  ## query_timeout_s=300
+
+  # If > 0, the session will be timed out (i.e. cancelled) if Impala does not do any work
+  # (compute or send back results) for that session within SESSION_TIMEOUT_S seconds (default 15 min).
+  ## session_timeout_s=900
+
+  # Override the desktop default username and password of the hue user used for authentications with other services.
+  # e.g. Used for LDAP/PAM pass-through authentication.
+  ## auth_username=hue
+  ## auth_password=
+
+  # Username and password for Impala Daemon Web interface for getting Impala queries in JobBrowser
+  # Set when webserver_htpassword_user and webserver_htpassword_password are set for Impala
+  ## daemon_api_username=
+  ## daemon_api_password=
+  # Execute this script to produce the password to avoid entering in clear text
+  ## daemon_api_password_script=
+
+  # A comma-separated list of white-listed Impala configuration properties that users are authorized to set.
+  # config_whitelist=debug_action,explain_level,mem_limit,optimize_partition_key_scans,query_timeout_s,request_pool
+
+  # Path to the impala configuration dir which has impalad_flags file
+  ## impala_conf_dir=${HUE_CONF_DIR}/impala-conf
+
+  [[ssl]]
+    # SSL communication enabled for this server.
+    ## enabled=false
+
+    # Path to Certificate Authority certificates.
+    ## cacerts=/etc/hue/cacerts.pem
+
+    # Choose whether Hue should validate certificates received from the server.
+    validate=false
+
+
+###########################################################################
+# Settings to configure the Spark application.
+###########################################################################
+
+[spark]
+  # The Livy Server URL.
+  ## livy_server_url=http://localhost:8998
+
+  # Configure Livy to start in local 'process' mode, or 'yarn' workers.
+  ## livy_server_session_kind=yarn
+
+  # Whether Livy requires client to perform Kerberos authentication.
+  ## security_enabled=false
+
+  # Host of the Sql Server
+  ## sql_server_host=localhost
+
+  # Port of the Sql Server
+  ## sql_server_port=10000
+
+  # Choose whether Hue should validate certificates received from the server.
+  ## ssl_cert_ca_verify=true
+
+
+###########################################################################
+# Settings to configure the Oozie app
+###########################################################################
+
+[oozie]
+  # Location on local FS where the examples are stored.
+  ## local_data_dir=..../examples
+
+  # Location on local FS where the data for the examples is stored.
+  ## sample_data_dir=...thirdparty/sample_data
+
+  # Location on HDFS where the oozie examples and workflows are stored.
+  # Parameters are $TIME and $USER, e.g. /user/$USER/hue/workspaces/workflow-$TIME
+  ## remote_data_dir=/user/hue/oozie/workspaces
+
+  # Maximum of Oozie workflows or coodinators to retrieve in one API call.
+  ## oozie_jobs_count=100
+
+  # Use Cron format for defining the frequency of a Coordinator instead of the old frequency number/unit.
+  ## enable_cron_scheduling=true
+
+  # Flag to enable the saved Editor queries to be dragged and dropped into a workflow.
+  ## enable_document_action=true
+
+  # Flag to enable Oozie backend filtering instead of doing it at the page level in Javascript. Requires Oozie 4.3+.
+  ## enable_oozie_backend_filtering=true
+
+  # Flag to enable the Impala action.
+  ## enable_impala_action=false
+
+  # Flag to enable the Altus action.
+  ## enable_altus_action=false
+
+
+###########################################################################
+# Settings to configure the Filebrowser app
+###########################################################################
+
+[filebrowser]
+  # Location on local filesystem where the uploaded archives are temporary stored.
+  ## archive_upload_tempdir=/tmp
+
+  # Show Download Button for HDFS file browser.
+  ## show_download_button=true
+
+  # Show Upload Button for HDFS file browser.
+  ## show_upload_button=true
+
+  # Flag to enable the extraction of a uploaded archive in HDFS.
+  ## enable_extract_uploaded_archive=true
+
+
+###########################################################################
+# Settings to configure Pig
+###########################################################################
+
+[pig]
+  # Path to directory with piggybank.jar on local filesystem.
+  ## local_sample_dir=/usr/share/hue/apps/pig/examples
+
+  # Location piggybank.jar will be copied to in HDFS.
+  ## remote_data_dir=/user/hue/pig/examples
+
+
+###########################################################################
+# Settings to configure Sqoop2
+###########################################################################
+
+[sqoop]
+  # If the Sqoop2 app is enabled. Sqoop2 project is deprecated. Sqoop1 is recommended.
+  ## is_enabled=false
+
+  # Sqoop server URL
+  ## server_url=http://localhost:12000/sqoop
+
+  # Path to configuration directory
+  ## sqoop_conf_dir=/etc/sqoop2/conf
+
+  # Choose whether Hue should validate certificates received from the server.
+  ## ssl_cert_ca_verify=true
+
+  # For autocompletion, fill out the librdbms section.
+
+###########################################################################
+# Settings to configure Proxy
+###########################################################################
+
+[proxy]
+  # Comma-separated list of regular expressions,
+  # which match 'host:port' of requested proxy target.
+  ## whitelist=(localhost|127\.0\.0\.1):(50030|50070|50060|50075)
+
+  # Comma-separated list of regular expressions,
+  # which match any prefix of 'host:port/path' of requested proxy target.
+  # This does not support matching GET parameters.
+  ## blacklist=
+
+
+###########################################################################
+# Settings to configure HBase Browser
+###########################################################################
+
+[hbase]
+  # Comma-separated list of HBase Thrift servers for clusters in the format of '(name|host:port)'.
+  # Use full hostname with security.
+  # If using Kerberos we assume GSSAPI SASL, not PLAIN.
+  ## hbase_clusters=(Cluster|localhost:9090)
+
+  # HBase configuration directory, where hbase-site.xml is located.
+  ## hbase_conf_dir=/etc/hbase/conf
+
+  # Hard limit of rows or columns per row fetched before truncating.
+  ## truncate_limit = 500
+
+  # 'framed' is used to chunk up responses, which is useful when used in conjunction with the nonblocking server in Thrift.
+  # 'buffered' used to be the default of the HBase Thrift Server.
+  ## thrift_transport=framed
+
+  # Choose whether Hue should validate certificates received from the server.
+  ## ssl_cert_ca_verify=true
+
+
+###########################################################################
+# Settings to configure Solr Search
+###########################################################################
+
+[search]
+
+  # URL of the Solr Server
+  ## solr_url=http://localhost:8983/solr/
+
+  # Requires FQDN in solr_url if enabled
+  ## security_enabled=false
+
+  ## Query sent when no term is entered
+  ## empty_query=*:*
+
+
+###########################################################################
+# Settings to configure Solr API lib
+###########################################################################
+
+[libsolr]
+
+  # Choose whether Hue should validate certificates received from the server.
+  ## ssl_cert_ca_verify=true
+
+  # Default path to Solr in ZooKeeper.
+  ## solr_zk_path=/solr
+
+
+###########################################################################
+# Settings to configure the Data Import Wizard
+###########################################################################
+
+[indexer]
+
+  # Filesystem directory containing Solr Morphline indexing libs.
+  ## config_indexer_libs_path=/tmp/smart_indexer_lib
+
+  # Filesystem directory containing JDBC libs.
+  ## config_jdbc_libs_path=/user/oozie/libext/jdbc_drivers
+
+  # Filesystem directory containing jar libs.
+  ## config_jars_libs_path=/user/oozie/libext/libs
+
+  # Flag to turn on the Solr Morphline indexer.
+  ## enable_scalable_indexer=true
+
+  # Flag to turn on Sqoop ingest.
+  ## enable_sqoop=true
+
+  # Flag to turn on Kafka topic ingest.
+  ## enable_kafka=false
+
+
+###########################################################################
+# Settings to configure Job Designer
+###########################################################################
+
+[jobsub]
+
+  # Location on local FS where examples and template are stored.
+  ## local_data_dir=..../data
+
+  # Location on local FS where sample data is stored
+  ## sample_data_dir=...thirdparty/sample_data
+
+
+###########################################################################
+# Settings to configure Job Browser.
+###########################################################################
+
+[jobbrowser]
+  # Share submitted jobs information with all users. If set to false,
+  # submitted jobs are visible only to the owner and administrators.
+  ## share_jobs=true
+
+  # Whether to disalbe the job kill button for all users in the jobbrowser
+  ## disable_killing_jobs=false
+
+  # Offset in bytes where a negative offset will fetch the last N bytes for the given log file (default 1MB).
+  ## log_offset=-1000000
+
+  # Maximum number of jobs to fetch and display when pagination is not supported for the type.
+  ## max_job_fetch=500
+
+  # Show the version 2 of app which unifies all the past browsers into one.
+  ## enable_v2=true
+
+  # Show the query section for listing and showing more troubleshooting information.
+  ## enable_query_browser=true
+
+
+###########################################################################
+# Settings to configure Sentry / Security App.
+###########################################################################
+
+[security]
+
+  # Use Sentry API V1 for Hive.
+  ## hive_v1=true
+
+  # Use Sentry API V2 for Hive.
+  ## hive_v2=false
+
+  # Use Sentry API V2 for Solr.
+  ## solr_v2=true
+
+
+###########################################################################
+# Settings to configure the Zookeeper application.
+###########################################################################
+
+[zookeeper]
+
+  [[clusters]]
+
+    [[[default]]]
+      # Zookeeper ensemble. Comma separated list of Host/Port.
+      # e.g. localhost:2181,localhost:2182,localhost:2183
+      ## host_ports=localhost:2181
+
+      # The URL of the REST contrib service (required for znode browsing).
+      ## rest_url=http://localhost:9998
+
+      # Name of Kerberos principal when using security.
+      ## principal_name=zookeeper
+
+
+###########################################################################
+# Settings for the User Admin application
+###########################################################################
+
+[useradmin]
+  # Default home directory permissions
+  ## home_dir_permissions=0755
+
+  # The name of the default user group that users will be a member of
+  ## default_user_group=default
+
+  [[password_policy]]
+    # Set password policy to all users. The default policy requires password to be at least 8 characters long,
+    # and contain both uppercase and lowercase letters, numbers, and special characters.
+
+    ## is_enabled=false
+    ## pwd_regex="^(?=.*?[A-Z])(?=(.*[a-z]){1,})(?=(.*[\d]){1,})(?=(.*[\W_]){1,}).{8,}$"
+    ## pwd_hint="The password must be at least 8 characters long, and must contain both uppercase and lowercase letters, at least one number, and at least one special character."
+    ## pwd_error_message="The password must be at least 8 characters long, and must contain both uppercase and lowercase letters, at least one number, and at least one special character."
+
+
+###########################################################################
+# Settings to configure liboozie
+###########################################################################
+
+[liboozie]
+  # The URL where the Oozie service runs on. This is required in order for
+  # users to submit jobs. Empty value disables the config check.
+  ## oozie_url=http://localhost:11000/oozie
+
+  # Requires FQDN in oozie_url if enabled
+  ## security_enabled=false
+
+  # Location on HDFS where the workflows/coordinator are deployed when submitted.
+  ## remote_deployement_dir=/user/hue/oozie/deployments
+
+
+###########################################################################
+# Settings for the AWS lib
+###########################################################################
+
+[aws]
+  [[aws_accounts]]
+    # Default AWS account
+    ## [[[default]]]
+      # AWS credentials
+      ## access_key_id=
+      ## secret_access_key=
+      ## security_token=
+
+      # Execute this script to produce the AWS access key ID.
+      ## access_key_id_script=/path/access_key_id.sh
+
+      # Execute this script to produce the AWS secret access key.
+      ## secret_access_key_script=/path/secret_access_key.sh
+
+      # Allow to use either environment variables or
+      # EC2 InstanceProfile to retrieve AWS credentials.
+      ## allow_environment_credentials=yes
+
+      # AWS region to use, if no region is specified, will attempt to connect to standard s3.amazonaws.com endpoint
+      ## region=us-east-1
+
+      # Endpoint overrides
+      ## host=
+
+      # Proxy address and port
+      ## proxy_address=
+      ## proxy_port=8080
+      ## proxy_user=
+      ## proxy_pass=
+
+      # Secure connections are the default, but this can be explicitly overridden:
+      ## is_secure=true
+
+      # The default calling format uses https://<bucket-name>.s3.amazonaws.com but
+      # this may not make sense if DNS is not configured in this way for custom endpoints.
+      # e.g. Use boto.s3.connection.OrdinaryCallingFormat for https://s3.amazonaws.com/<bucket-name>
+      ## calling_format=boto.s3.connection.OrdinaryCallingFormat
+
+###########################################################################
+# Settings for the Azure lib
+###########################################################################
+[azure]
+  [[azure_accounts]]
+    # Default Azure account
+    [[[default]]]
+      # Azure credentials
+      ## client_id=
+      # Execute this script to produce the ADLS client id.
+      ## client_id_script=/path/client_id.sh
+      ## client_secret=
+      # Execute this script to produce the ADLS client secret.
+      ## client_secret_script=/path/client_secret.sh
+      ## tenant_id=
+      # Execute this script to produce the ADLS tenant id.
+      ## tenant_id_script=/path/tenant_id.sh
+
+  [[adls_clusters]]
+    # Default ADLS cluster
+    [[[default]]]
+      ## fs_defaultfs=adl://<account_name>.azuredatalakestore.net
+      ## webhdfs_url=https://<account_name>.azuredatalakestore.net/webhdfs/v1
+
+###########################################################################
+# Settings for the Sentry lib
+###########################################################################
+
+[libsentry]
+  # Hostname or IP of server.
+  ## hostname=localhost
+
+  # Port the sentry service is running on.
+  ## port=8038
+
+  # Sentry configuration directory, where sentry-site.xml is located.
+  ## sentry_conf_dir=/etc/sentry/conf
+
+  # Number of seconds when the privilege list of a user is cached.
+  ## privilege_checker_caching=300
+
+
+###########################################################################
+# Settings to configure the ZooKeeper Lib
+###########################################################################
+
+[libzookeeper]
+  # ZooKeeper ensemble. Comma separated list of Host/Port.
+  # e.g. localhost:2181,localhost:2182,localhost:2183
+  ## ensemble=localhost:2181
+
+  # Name of Kerberos principal when using security.
+  ## principal_name=zookeeper
+
+
+###########################################################################
+# Settings for the RDBMS application
+###########################################################################
+
+[librdbms]
+  # The RDBMS app can have any number of databases configured in the databases
+  # section. A database is known by its section name
+  # (IE sqlite, mysql, psql, and oracle in the list below).
+
+  [[databases]]
+    # sqlite configuration.
+    ## [[[sqlite]]]
+      # Name to show in the UI.
+      ## nice_name=SQLite
+
+      # For SQLite, name defines the path to the database.
+      ## name=/tmp/sqlite.db
+
+      # Database backend to use.
+      ## engine=sqlite
+
+      # Database options to send to the server when connecting.
+      # https://docs.djangoproject.com/en/1.4/ref/databases/
+      ## options={}
+
+    # mysql, oracle, or postgresql configuration.
+    ## [[[mysql]]]
+      # Name to show in the UI.
+      ## nice_name="My SQL DB"
+
+      # For MySQL and PostgreSQL, name is the name of the database.
+      # For Oracle, Name is instance of the Oracle server. For express edition
+      # this is 'xe' by default.
+      ## name=mysqldb
+
+      # Database backend to use. This can be:
+      # 1. mysql
+      # 2. postgresql
+      # 3. oracle
+      ## engine=mysql
+
+      # IP or hostname of the database to connect to.
+      ## host=localhost
+
+      # Port the database server is listening to. Defaults are:
+      # 1. MySQL: 3306
+      # 2. PostgreSQL: 5432
+      # 3. Oracle Express Edition: 1521
+      ## port=3306
+
+      # Username to authenticate with when connecting to the database.
+      ## user=example
+
+      # Password matching the username to authenticate with when
+      # connecting to the database.
+      ## password=example
+
+      # Database options to send to the server when connecting.
+      # https://docs.djangoproject.com/en/1.4/ref/databases/
+      ## options={}
+
+
+###########################################################################
+# Settings to configure SAML
+###########################################################################
+
+[libsaml]
+  # Xmlsec1 binary path. This program should be executable by the user running Hue.
+  ## xmlsec_binary=/usr/local/bin/xmlsec1
+
+  # Entity ID for Hue acting as service provider.
+  # Can also accept a pattern where '<base_url>' will be replaced with server URL base.
+  ## entity_id="<base_url>/saml2/metadata/"
+
+  # Create users from SSO on login.
+  ## create_users_on_login=true
+
+  # Required attributes to ask for from IdP.
+  # This requires a comma separated list.
+  ## required_attributes=uid
+
+  # Optional attributes to ask for from IdP.
+  # This requires a comma separated list.
+  ## optional_attributes=
+
+  # IdP metadata in the form of a file. This is generally an XML file containing metadata that the Identity Provider generates.
+  ## metadata_file=
+
+  # Private key to encrypt metadata with.
+  ## key_file=
+
+  # Signed certificate to send along with encrypted metadata.
+  ## cert_file=
+
+  # Path to a file containing the password private key.
+  ## key_file_password=/path/key
+
+  # Execute this script to produce the private key password. This will be used when 'key_file_password' is not set.
+  ## key_file_password_script=/path/pwd.sh
+
+  # A mapping from attributes in the response from the IdP to django user attributes.
+  ## user_attribute_mapping={'uid': ('username', )}
+
+  # Have Hue initiated authn requests be signed and provide a certificate.
+  ## authn_requests_signed=false
+
+  # Have Hue initiated logout requests be signed and provide a certificate.
+  ## logout_requests_signed=false
+
+  # Username can be sourced from 'attributes' or 'nameid'.
+  ## username_source=attributes
+
+  # Performs the logout or not.
+  ## logout_enabled=true
+
+
+###########################################################################
+# Settings to configure OpenID
+###########################################################################
+
+[libopenid]
+  # (Required) OpenId SSO endpoint url.
+  ## server_endpoint_url=https://www.google.com/accounts/o8/id
+
+  # OpenId 1.1 identity url prefix to be used instead of SSO endpoint url
+  # This is only supported if you are using an OpenId 1.1 endpoint
+  ## identity_url_prefix=https://app.onelogin.com/openid/your_company.com/
+
+  # Create users from OPENID on login.
+  ## create_users_on_login=true
+
+  # Use email for username
+  ## use_email_for_username=true
+
+
+###########################################################################
+# Settings to configure OAuth
+###########################################################################
+
+[liboauth]
+  # NOTE:
+  # To work, each of the active (i.e. uncommented) service must have
+  # applications created on the social network.
+  # Then the "consumer key" and "consumer secret" must be provided here.
+  #
+  # The addresses where to do so are:
+  # Twitter:  https://dev.twitter.com/apps
+  # Google+ : https://cloud.google.com/
+  # Facebook: https://developers.facebook.com/apps
+  # Linkedin: https://www.linkedin.com/secure/developer
+  #
+  # Additionnaly, the following must be set in the application settings:
+  # Twitter:  Callback URL (aka Redirect URL) must be set to http://YOUR_HUE_IP_OR_DOMAIN_NAME/oauth/social_login/oauth_authenticated
+  # Google+ : CONSENT SCREEN must have email address
+  # Facebook: Sandbox Mode must be DISABLED
+  # Linkedin: "In OAuth User Agreement", r_emailaddress is REQUIRED
+
+  # The Consumer key of the application
+  ## consumer_key_twitter=
+  ## consumer_key_google=
+  ## consumer_key_facebook=
+  ## consumer_key_linkedin=
+
+  # The Consumer secret of the application
+  ## consumer_secret_twitter=
+  ## consumer_secret_google=
+  ## consumer_secret_facebook=
+  ## consumer_secret_linkedin=
+
+  # The Request token URL
+  ## request_token_url_twitter=https://api.twitter.com/oauth/request_token
+  ## request_token_url_google=https://accounts.google.com/o/oauth2/auth
+  ## request_token_url_linkedin=https://www.linkedin.com/uas/oauth2/authorization
+  ## request_token_url_facebook=https://graph.facebook.com/oauth/authorize
+
+  # The Access token URL
+  ## access_token_url_twitter=https://api.twitter.com/oauth/access_token
+  ## access_token_url_google=https://accounts.google.com/o/oauth2/token
+  ## access_token_url_facebook=https://graph.facebook.com/oauth/access_token
+  ## access_token_url_linkedin=https://api.linkedin.com/uas/oauth2/accessToken
+
+  # The Authenticate URL
+  ## authenticate_url_twitter=https://api.twitter.com/oauth/authorize
+  ## authenticate_url_google=https://www.googleapis.com/oauth2/v1/userinfo?access_token=
+  ## authenticate_url_facebook=https://graph.facebook.com/me?access_token=
+  ## authenticate_url_linkedin=https://api.linkedin.com/v1/people/~:(email-address)?format=json&oauth2_access_token=
+
+  # Username Map. Json Hash format.
+  # Replaces username parts in order to simplify usernames obtained
+  # Example: {"@sub1.domain.com":"_S1", "@sub2.domain.com":"_S2"}
+  # converts 'email@sub1.domain.com' to 'email_S1'
+  ## username_map={}
+
+  # Whitelisted domains (only applies to Google OAuth). CSV format.
+  ## whitelisted_domains_google=
+
+
+###########################################################################
+# Settings to configure Kafka
+###########################################################################
+
+[kafka]
+
+  [[kafka]]
+    # Enable the Kafka integration.
+    ## is_enabled=false
+
+    # Base URL of Kafka REST API.
+    ## api_url=http://localhost:8082
+
+
+###########################################################################
+# Settings to configure Metadata
+###########################################################################
+
+[metadata]
+  # TODO: shoud come from Helm
+  [[k8s]]
+  api_url=http://provisioner.107.178.211.187.nip.io
+
+  [[optimizer]]
+    # Hostname to Optimizer API or compatible service.
+    ## hostname=navoptapi.us-west-1.optimizer.altus.cloudera.com
+
+    # The name of the key of the service.
+    ## auth_key_id=e0819f3a-1e6f-4904-be69-5b704bacd1245
+
+    # The private part of the key associated with the auth_key.
+    ## auth_key_secret='-----BEGIN....'
+
+    # Execute this script to produce the auth_key secret. This will be used when `auth_key_secret` is not set.
+    ## auth_key_secret_script=/path/to/script.sh
+
+    # The name of the workload where queries are uploaded and optimizations are calculated from. Automatically guessed from auth_key and cluster_id if not specified.
+    ## tenant_id=
+
+    # Perform Sentry privilege filtering.
+    # Default to true automatically if the cluster is secure.
+    ## apply_sentry_permissions=False
+
+    # Cache timeout in milliseconds for the Optimizer metadata used in assist, autocomplete, etc.
+    # Defaults to 10 days, set to 0 to disable caching.
+    ## cacheable_ttl=864000000
+
+    # Automatically upload queries after their execution in order to improve recommendations.
+    ## auto_upload_queries=true
+
+    # Automatically upload queried tables DDL in order to improve recommendations.
+    ## auto_upload_ddl=true
+
+    # Automatically upload queried tables and columns stats in order to improve recommendations.
+    ## auto_upload_stats=false
+
+    # Allow admins to upload the last N executed queries in the quick start wizard. Use 0 to disable.
+    ## query_history_upload_limit=10000
+
+  [[navigator]]
+    # Navigator API URL (without version suffix).
+    ## api_url=http://localhost:7187/api
+
+    # Which authentication to use: CM or external via LDAP or SAML.
+    ## navmetadataserver_auth_type=CMDB
+
+    # Username of the CM user used for authentication.
+    ## navmetadataserver_cmdb_user=hue
+    # CM password of the user used for authentication.
+    ## navmetadataserver_cmdb_password=
+    # Execute this script to produce the CM password. This will be used when the plain password is not set.
+    # navmetadataserver_cmdb_password_script=
+
+    # Username of the LDAP user used for authentication.
+    ## navmetadataserver_ldap_user=hue
+    # LDAP password of the user used for authentication.
+    ## navmetadataserver_ldap_ppassword=
+    # Execute this script to produce the LDAP password. This will be used when the plain password is not set.
+    ## navmetadataserver_ldap_password_script=
+
+    # Username of the SAML user used for authentication.
+    ## navmetadataserver_saml_user=hue
+    ## SAML password of the user used for authentication.
+    # navmetadataserver_saml_password=
+    # Execute this script to produce the SAML password. This will be used when the plain password  is not set.
+    ## navmetadataserver_saml_password_script=
+
+    # Perform Sentry privilege filtering.
+    # Default to true automatically if the cluster is secure.
+    ## apply_sentry_permissions=False
+
+    # Max number of items to fetch in one call in object search.
+    ## fetch_size_search=450
+
+    # Max number of items to fetch in one call in object search autocomplete.
+    ## fetch_size_search_interactive=450
+
+    # If metadata search is enabled, also show the search box in the left assist.
+    ## enable_file_search=false

+ 44 - 0
tools/kubernetes/services/hue-frontend/conf/log.conf

@@ -0,0 +1,44 @@
+# Just log to stdout for Docker
+[logger_root]
+handlers=stdout
+
+[logger_access]
+handlers=stdout
+qualname=access
+
+[logger_django_auth_ldap]
+handlers=stdout
+qualname=django_auth_ldap
+
+[logger_kazoo_client]
+handlers=stdout
+qualname=kazoo.client
+
+[logger_djangosaml2]
+handlers=stdout
+qualname=djangosaml2
+
+[logger_django_db]
+handlers=stdout
+qualname=django.db.backends
+
+# Handlers
+[handler_stdout]
+level=INFO
+class=StreamHandler
+formatter=default
+args=(sys.stdout,)
+
+[formatter_default]
+class=desktop.log.formatter.Formatter
+format=[%(asctime)s] %(module)-12s %(levelname)-8s %(message)s
+datefmt=%d/%b/%Y %H:%M:%S %z
+
+[loggers]
+keys=root,access,django_auth_ldap,kazoo_client,djangosaml2,django_db
+
+[handlers]
+keys=stdout
+
+[formatters]
+keys=default

+ 5 - 0
tools/kubernetes/services/hue-frontend/startup.sh

@@ -0,0 +1,5 @@
+#!/bin/sh
+
+./build/env/bin/hue syncdb --noinput
+./build/env/bin/hue migrate
+./build/env/bin/supervisor

+ 1 - 0
tools/kubernetes/services/mock-provisioner/.gitignore

@@ -0,0 +1 @@
+/node_modules/

+ 36 - 0
tools/kubernetes/services/mock-provisioner/Dockerfile

@@ -0,0 +1,36 @@
+# Use an official Ubuntu Xenial as a parent image
+FROM ubuntu:16.04
+
+# Install Node.js 8 and npm 5
+RUN apt-get update
+RUN apt-get install -y build-essential curl gettext
+RUN curl -sL https://deb.nodesource.com/setup_8.x | bash
+RUN apt-get install -y nodejs
+
+RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
+RUN chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl
+
+# Install Helm
+ENV HELM_FILENAME helm-v2.10.0-linux-amd64.tar.gz
+ENV HELM_URL https://storage.googleapis.com/kubernetes-helm/${HELM_FILENAME}
+
+RUN echo $HELM_URL
+
+RUN curl -o /tmp/${HELM_FILENAME} ${HELM_URL} \
+  && tar -zxvf /tmp/${HELM_FILENAME} -C /tmp \
+  && mv /tmp/linux-amd64/helm /bin/helm \
+  && rm -rf /tmp/linux-amd64 \
+  && rm -rf /tmp/${HELM_FILENAME}
+
+RUN helm init --client-only
+
+# Set the working directory to /app
+WORKDIR /app
+
+# Copy the current directory contents into the container
+ADD . /app
+
+# Install any needed packages specified in requirements.txt
+RUN npm install
+
+CMD ["npm", "start"]

+ 15 - 0
tools/kubernetes/services/mock-provisioner/README.md

@@ -0,0 +1,15 @@
+# Provisioning Service
+
+A quick and dirty provisioning service to manage the lifecycle of Impala clusters.  See
+[provisioner.yaml](./provisioner.yaml) for the Altus-style API specification.  In a proper
+implementation this would be split into an Altus API gateway and backend provisioning service.
+
+## Development
+
+```sh
+npm install
+node server.js
+```
+
+## Deployment
+

+ 13 - 0
tools/kubernetes/services/mock-provisioner/command.js

@@ -0,0 +1,13 @@
+
+const util = require('util');
+const exec = util.promisify(require('child_process').exec);
+
+var exports = module.exports = {};
+
+exports.runCommand = async function(command) {
+    console.log("Running shell command: " + command);
+    const { stdout, stderr } = await exec(command, { shell: true });
+    if (stderr != "") {
+        console.error(`error running ${command}: ${stderr}`);
+    }
+};

+ 7 - 0
tools/kubernetes/services/mock-provisioner/config

@@ -0,0 +1,7 @@
+var config = {};
+
+config.helmRepo = "http://dataware-1.vpc.cloudera.com:8879"
+config.registry = "docker-registry.infra.cloudera.com/cloudera/datawarehouse";
+config.registryImpalaTag = "v2";
+
+module.exports = config;

+ 7 - 0
tools/kubernetes/services/mock-provisioner/config.templ

@@ -0,0 +1,7 @@
+var config = {};
+
+config.helmRepo = "http://dataware-1.vpc.cloudera.com:8879"
+config.registry = "docker-registry.infra.cloudera.com/cloudera/datawarehouse";
+config.registryImpalaTag = "v2";
+
+module.exports = config;

+ 250 - 0
tools/kubernetes/services/mock-provisioner/model.js

@@ -0,0 +1,250 @@
+
+const uuid = require('uuid/v4');
+const Client = require('kubernetes-client').Client;
+const config = require('kubernetes-client').config;
+var client;
+try {
+  client = new Client({ config: config.fromKubeconfig(), version: '1.10' });
+} catch(error) {
+  client = new Client({ config: config.getInCluster(), version: '1.10' });
+}
+
+const Command = require('./command.js');
+const Prometheus = require('prom-client');
+const request = require("request");
+var configuration = require('./config');
+
+
+const TENANT = "12a0079b-1591-4ca0-b721-a446bda74e67"
+
+// Store in memory for now.
+const clusters = {};
+
+// Basic CRUD operations, no validation, no pagination, etc.
+const model = {};
+
+//impala_queries_open_running
+//impala_queries_open_queued
+//impala_queries_closed_failed
+//...
+const impalaQueriesMetrics = new Prometheus.Gauge({
+  name: 'impala_queries',
+  help: 'Query metrics',
+  labelNames: ['datawarehouse', 'status']
+});
+const impalaQueriesCounter = new Prometheus.Gauge({
+  name: 'impala_queries_count',
+  help: 'Query metrics',
+  labelNames: ['datawarehouse']
+});
+
+
+function createCluster(options) {
+  const cluster = Object.assign({}, {
+    cdhVersion: "CDH6.3",
+    hdfsNamenodeHost: "hdfs-namenode",
+    hdfsNamenodePort: 9820, // 8020
+    // metastore.uris=thrift://spark2-envelope515-1.gce.cloudera.com:9083,hdfs.namenode.host=spark2-envelope515-1.gce.cloudera.com,hdfs.namenode.port=8020
+    workerCpuCores: 2,
+    workerMemoryInGib: 4,
+    workerReplicas: 1,
+    workerAutoResize: false,
+    workercurrentCPUUtilizationPercentage: 0
+  }, options);
+
+  cluster.name = cluster.clusterName;
+  cluster.crn = `crn:altus:dataware:k8s:${TENANT}:cluster:${cluster.clusterName}/${uuid()}`;
+  cluster.creationDate = new Date().toISOString();
+  cluster.status = "STARTING";
+  cluster.workerReplicasOnline = 0;
+  endpointHost = "impala-coordinator-" + cluster.clusterName;
+  cluster.coordinatorEndpoint = {privateHost: endpointHost, publicHost: endpointHost, port: 21050};
+
+  return cluster;
+}
+
+
+model.createCluster = async function(options) {
+  console.log("Create cluster: " + JSON.stringify(options));
+  const cluster = createCluster(options);
+
+  clusters[cluster.crn] = cluster;
+
+  await Command.runCommand(`helm install impala-engine --set-string registry=${configuration.registry},tag=${configuration.registryImpalaTag},name=${cluster.clusterName},worker.replicas=${cluster.workerReplicas},hdfs.namenode.host=${cluster.hdfsNamenodeHost},hdfs.namenode.port=${cluster.hdfsNamenodePort} -n ${cluster.clusterName} --repo=${configuration.helmRepo}`);
+
+  return {"cluster": cluster};
+};
+
+async function updateClusterStatus(cluster) {
+  if (cluster.status == 'TERMINATED') {
+    return
+  }
+
+  var statefulset;
+  try {
+   // TODO: use labels
+    statefulset = await client.apis.apps.v1.namespaces('default').deployments("impala-worker-" + cluster.clusterName).get()
+  }
+  catch(error) {
+   console.log(error);
+   // If killed manually?
+    //if (clusters.status == 'TERMINATING') {
+      cluster.status = 'TERMINATED';
+      cluster.workerReplicasOnline = 0;
+      return
+    //}
+  }
+
+  if (statefulset == null) {
+    cluster.status = "STARTING";
+    cluster.workerReplicasOnline = 0;
+    return
+  }
+
+  status = statefulset.body.status;
+  if (status.readyReplicas == null) {
+    // Statefulset when just launched will have undefined ready replicas.
+    return
+  }
+
+  if ((cluster.status == "SCALING_UP" || cluster.status == "SCALING_DOWN") && status.replicas != cluster.workerReplicas) {
+    // We are still in progress updating the cluster, don't update the status until statefulset replicas are updated.
+    return
+  }
+
+  if (status.replicas == status.readyReplicas) {
+    cluster.status = "ONLINE";
+  }
+
+  if (cluster.workerAutoResize) {
+    const hpa = await client.apis.autoscaling.v1.namespaces('default').horizontalpodautoscalers("impala-worker-" + cluster.clusterName).get()
+    hpaStatus = hpa.body.status;
+    // TODO: polish a bit status when hpa not fully running yet
+    cluster.workerReplicasOnline = hpaStatus.currentReplicas;
+    cluster.workerReplicas = hpaStatus.currentCPUUtilizationPercentage >= 0 ? hpaStatus.desiredReplicas : hpaStatus.currentReplicas;
+    cluster.workercurrentCPUUtilizationPercentage = hpaStatus.currentCPUUtilizationPercentage >= 0 ? hpaStatus.currentCPUUtilizationPercentage : "N/A";
+  } else {
+    cluster.workerReplicasOnline = status.readyReplicas;
+  }
+}
+
+model.describeCluster = async function(options) {
+  console.log("Describe cluster: " + JSON.stringify(options));
+  const cluster = clusters[options.clusterName];
+  if (cluster != undefined) {
+    updateClusterStatus(cluster);
+    return {"cluster": cluster};
+  } // TODO else
+
+  return {}
+}
+
+
+model.listClusters = async function(options) {
+  var promises = [];
+  var clusterNames = [];
+
+  for (var crn in clusters) {
+    promises.push(updateClusterStatus(clusters[crn]));
+    clusterNames.push(clusters[crn].name);
+  }
+
+  var statefulsets = await client.apis.apps.v1.namespaces('default').statefulsets().get(); // Also append DW clusters created outside of provisioner
+
+  statefulsets.body.items.forEach(function(statefulset) {
+    var name = statefulset['metadata']['name'].substring("impala-coordinator-".length);
+    if (statefulset['metadata']['name'].startsWith("impala-coordinator-") && clusterNames.indexOf(name) == -1) {
+      var cluster = createCluster({clusterName: name});
+      cluster.status = 'ONLINE';
+      cluster.crn = cluster.name // For now crn is the cluster name
+      clusters[cluster.crn] = cluster;
+    }
+  });
+
+  Promise.all(promises);
+
+  return Object.values(clusters);
+}
+
+model.updateCluster = async function(options) {
+  console.log("Update cluster: " + JSON.stringify(options));
+
+  var cluster = clusters[options.clusterName];
+  var command = "";
+
+  if (options.updateClusterAutoResizeChanged) {
+    cluster.workerAutoResize = options.updateClusterAutoResize;
+    if (options.updateClusterAutoResize) {
+      cluster.workerAutoResizeMin = options.updateClusterAutoResizeMin;
+      cluster.workerAutoResizeMax = options.updateClusterAutoResizeMax;
+      cluster.workerAutoResizeCpu = options.updateClusterAutoResizeCpu;
+      command = `kubectl autoscale deployment impala-worker-${cluster.clusterName} --min=${options.updateClusterAutoResizeMin} --max=${options.updateClusterAutoResizeMax} --cpu-percent=${options.updateClusterAutoResizeCpu}`;
+    } else {
+      command = `kubectl delete hpa impala-worker-${cluster.clusterName}`;
+    }
+    // TODO: check SCALING UP/DOWN status too
+  } else {
+    if (cluster.workerReplicas != options.workerReplicas) {
+      let originalReplicas = cluster.workerReplicas;
+      cluster.workerReplicas = options.workerReplicas;
+      if (cluster.workerReplicas > originalReplicas) {
+        cluster.status = "SCALING_UP";
+      } else if (cluster.workerReplicas < originalReplicas) {
+        cluster.status = "SCALING_DOWN";
+      }
+      await Command.runCommand(`helm upgrade ${cluster.clusterName} impala-engine --set-string registry=${configuration.registry},tag=${configuration.registryImpalaTag},name=${cluster.clusterName},worker.replicas=${cluster.workerReplicas},hdfs.namenode.host=${cluster.hdfsNamenodeHost},hdfs.namenode.port=${cluster.hdfsNamenodePort} --repo=${configuration.helmRepo}`);
+    }
+  }
+
+  if (command) {
+    await Command.runCommand(command);
+  }
+
+  return {"cluster": cluster};
+}
+
+model.deleteCluster = async function(options) {
+  console.log("Delete cluster: " + JSON.stringify(options));
+  const cluster = clusters[options.clusterName];
+  cluster.status = "TERMINATING";
+
+  await Command.runCommand(`helm delete --purge ${cluster.clusterName}`);
+  if (options.workerAutoResize) {
+    await Command.runCommand(`kubectl delete hpa impala-worker-${cluster.clusterName}`);
+  }
+
+  return;
+}
+
+
+model.getClusterMetrics = async function(options) {
+  console.log("Metric cluster: " + JSON.stringify(options));
+
+  var metrics = null;
+
+  Object.keys(clusters).forEach(function(key) {
+    var cluster = clusters[key];
+    console.log("Scrapping: " + key);
+    if (cluster.status != "TERMINATED") {
+      request({
+        url: `http://${cluster.coordinatorEndpoint.privateHost}:25000/metrics?json=true`,
+        json: true
+      }, function (error, response, body) {
+       console.log("Scapped: " + error + " " + response);
+       if (!error && response.statusCode === 200) {
+          metrics = body.metric_group.child_groups.filter(group => group.name == "impala-server")[0].metrics;
+
+          var metric = metrics.filter(metric => metric.name == "impala-server.num-queries-registered")[0];
+          impalaQueriesMetrics.labels(cluster.clusterName, 'num-queries-registered').set(metric.value);
+
+          var metric = metrics.filter(metric => metric.name == "impala-server.num-queries")[0];
+          impalaQueriesCounter.labels(cluster.clusterName).set(metric.value);
+        }
+      });
+    }
+  });
+
+  return {"metrics": metrics};
+}
+
+module.exports = model;

+ 1372 - 0
tools/kubernetes/services/mock-provisioner/package-lock.json

@@ -0,0 +1,1372 @@
+{
+  "name": "cloudera-dw-provisioner",
+  "version": "0.1.0",
+  "lockfileVersion": 1,
+  "requires": true,
+  "dependencies": {
+    "@sindresorhus/is": {
+      "version": "0.7.0",
+      "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.7.0.tgz",
+      "integrity": "sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow=="
+    },
+    "accepts": {
+      "version": "1.3.5",
+      "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz",
+      "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=",
+      "requires": {
+        "mime-types": "~2.1.18",
+        "negotiator": "0.6.1"
+      }
+    },
+    "aggregate-error": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-1.0.0.tgz",
+      "integrity": "sha1-iINE2tAiCnLjr1CQYRf0h3GSX6w=",
+      "requires": {
+        "clean-stack": "^1.0.0",
+        "indent-string": "^3.0.0"
+      }
+    },
+    "ajv": {
+      "version": "4.11.8",
+      "resolved": "https://registry.npmjs.org/ajv/-/ajv-4.11.8.tgz",
+      "integrity": "sha1-gv+wKynmYq5TvcIK8VlHcGc5xTY=",
+      "requires": {
+        "co": "^4.6.0",
+        "json-stable-stringify": "^1.0.1"
+      }
+    },
+    "argparse": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+      "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+      "requires": {
+        "sprintf-js": "~1.0.2"
+      }
+    },
+    "array-flatten": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+      "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
+    },
+    "asn1": {
+      "version": "0.2.4",
+      "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz",
+      "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==",
+      "requires": {
+        "safer-buffer": "~2.1.0"
+      }
+    },
+    "assert-plus": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz",
+      "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU="
+    },
+    "asynckit": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+      "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k="
+    },
+    "aws-sign2": {
+      "version": "0.7.0",
+      "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz",
+      "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg="
+    },
+    "aws4": {
+      "version": "1.8.0",
+      "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.8.0.tgz",
+      "integrity": "sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ=="
+    },
+    "base64url": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/base64url/-/base64url-3.0.0.tgz",
+      "integrity": "sha512-LIVmqIrIWuiqTvn4RzcrwCOuHo2DD6tKmKBPXXlr4p4n4l6BZBkwFTIa3zu1XkX5MbZgro4a6BvPi+n2Mns5Gg=="
+    },
+    "basic-auth": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.0.tgz",
+      "integrity": "sha1-AV2z81PgLlY3d1X5YnQuiYHnu7o=",
+      "requires": {
+        "safe-buffer": "5.1.1"
+      }
+    },
+    "bcrypt-pbkdf": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
+      "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=",
+      "optional": true,
+      "requires": {
+        "tweetnacl": "^0.14.3"
+      }
+    },
+    "bintrees": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/bintrees/-/bintrees-1.0.1.tgz",
+      "integrity": "sha1-DmVcm5wkNeqraL9AJyJtK1WjRSQ="
+    },
+    "body-parse": {
+      "version": "0.1.0",
+      "resolved": "https://registry.npmjs.org/body-parse/-/body-parse-0.1.0.tgz",
+      "integrity": "sha512-k0PDF7vZZpspXlwoM8ywh9PIHZokooS0Rek4M8Vekoro7XuuaWVhjgTpdzIRrfKj5oLQahwjn621/4kG4d91xw=="
+    },
+    "body-parser": {
+      "version": "1.18.3",
+      "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz",
+      "integrity": "sha1-WykhmP/dVTs6DyDe0FkrlWlVyLQ=",
+      "requires": {
+        "bytes": "3.0.0",
+        "content-type": "~1.0.4",
+        "debug": "2.6.9",
+        "depd": "~1.1.2",
+        "http-errors": "~1.6.3",
+        "iconv-lite": "0.4.23",
+        "on-finished": "~2.3.0",
+        "qs": "6.5.2",
+        "raw-body": "2.3.3",
+        "type-is": "~1.6.16"
+      },
+      "dependencies": {
+        "qs": {
+          "version": "6.5.2",
+          "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
+          "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA=="
+        }
+      }
+    },
+    "bytes": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
+      "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
+    },
+    "cacheable-request": {
+      "version": "2.1.4",
+      "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-2.1.4.tgz",
+      "integrity": "sha1-DYCIAbY0KtM8kd+dC0TcCbkeXD0=",
+      "requires": {
+        "clone-response": "1.0.2",
+        "get-stream": "3.0.0",
+        "http-cache-semantics": "3.8.1",
+        "keyv": "3.0.0",
+        "lowercase-keys": "1.0.0",
+        "normalize-url": "2.0.1",
+        "responselike": "1.0.2"
+      },
+      "dependencies": {
+        "lowercase-keys": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz",
+          "integrity": "sha1-TjNms55/VFfjXxMkvfb4jQv8cwY="
+        }
+      }
+    },
+    "caseless": {
+      "version": "0.12.0",
+      "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz",
+      "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw="
+    },
+    "clean-stack": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-1.3.0.tgz",
+      "integrity": "sha1-noIVAa6XmYbEax1m0tQy2y/UrjE="
+    },
+    "clone-response": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz",
+      "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=",
+      "requires": {
+        "mimic-response": "^1.0.0"
+      }
+    },
+    "co": {
+      "version": "4.6.0",
+      "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+      "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ="
+    },
+    "combined-stream": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.6.tgz",
+      "integrity": "sha1-cj599ugBrFYTETp+RFqbactjKBg=",
+      "requires": {
+        "delayed-stream": "~1.0.0"
+      }
+    },
+    "content-disposition": {
+      "version": "0.5.2",
+      "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
+      "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ="
+    },
+    "content-type": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+      "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
+    },
+    "cookie": {
+      "version": "0.3.1",
+      "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz",
+      "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s="
+    },
+    "cookie-signature": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+      "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
+    },
+    "core-util-is": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
+      "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
+    },
+    "dashdash": {
+      "version": "1.14.1",
+      "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz",
+      "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=",
+      "requires": {
+        "assert-plus": "^1.0.0"
+      }
+    },
+    "debug": {
+      "version": "2.6.9",
+      "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+      "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+      "requires": {
+        "ms": "2.0.0"
+      }
+    },
+    "decode-uri-component": {
+      "version": "0.2.0",
+      "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+      "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU="
+    },
+    "decompress-response": {
+      "version": "3.3.0",
+      "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz",
+      "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=",
+      "requires": {
+        "mimic-response": "^1.0.0"
+      }
+    },
+    "delayed-stream": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+      "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk="
+    },
+    "depd": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+      "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
+    },
+    "destroy": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
+      "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
+    },
+    "duplexer3": {
+      "version": "0.1.4",
+      "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz",
+      "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI="
+    },
+    "ecc-jsbn": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz",
+      "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=",
+      "optional": true,
+      "requires": {
+        "jsbn": "~0.1.0",
+        "safer-buffer": "^2.1.0"
+      }
+    },
+    "ee-first": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+      "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
+    },
+    "encodeurl": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+      "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
+    },
+    "es6-promise": {
+      "version": "4.2.5",
+      "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.5.tgz",
+      "integrity": "sha512-n6wvpdE43VFtJq+lUDYDBFUwV8TZbuGXLV4D6wKafg13ldznKsyEvatubnmUe31zcvelSzOHF+XbaT+Bl9ObDg=="
+    },
+    "escape-html": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+      "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
+    },
+    "esprima": {
+      "version": "4.0.1",
+      "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+      "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A=="
+    },
+    "etag": {
+      "version": "1.8.1",
+      "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+      "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
+    },
+    "express": {
+      "version": "4.16.3",
+      "resolved": "http://registry.npmjs.org/express/-/express-4.16.3.tgz",
+      "integrity": "sha1-avilAjUNsyRuzEvs9rWjTSL37VM=",
+      "requires": {
+        "accepts": "~1.3.5",
+        "array-flatten": "1.1.1",
+        "body-parser": "1.18.2",
+        "content-disposition": "0.5.2",
+        "content-type": "~1.0.4",
+        "cookie": "0.3.1",
+        "cookie-signature": "1.0.6",
+        "debug": "2.6.9",
+        "depd": "~1.1.2",
+        "encodeurl": "~1.0.2",
+        "escape-html": "~1.0.3",
+        "etag": "~1.8.1",
+        "finalhandler": "1.1.1",
+        "fresh": "0.5.2",
+        "merge-descriptors": "1.0.1",
+        "methods": "~1.1.2",
+        "on-finished": "~2.3.0",
+        "parseurl": "~1.3.2",
+        "path-to-regexp": "0.1.7",
+        "proxy-addr": "~2.0.3",
+        "qs": "6.5.1",
+        "range-parser": "~1.2.0",
+        "safe-buffer": "5.1.1",
+        "send": "0.16.2",
+        "serve-static": "1.13.2",
+        "setprototypeof": "1.1.0",
+        "statuses": "~1.4.0",
+        "type-is": "~1.6.16",
+        "utils-merge": "1.0.1",
+        "vary": "~1.1.2"
+      },
+      "dependencies": {
+        "body-parser": {
+          "version": "1.18.2",
+          "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.2.tgz",
+          "integrity": "sha1-h2eKGdhLR9hZuDGZvVm84iKxBFQ=",
+          "requires": {
+            "bytes": "3.0.0",
+            "content-type": "~1.0.4",
+            "debug": "2.6.9",
+            "depd": "~1.1.1",
+            "http-errors": "~1.6.2",
+            "iconv-lite": "0.4.19",
+            "on-finished": "~2.3.0",
+            "qs": "6.5.1",
+            "raw-body": "2.3.2",
+            "type-is": "~1.6.15"
+          }
+        },
+        "iconv-lite": {
+          "version": "0.4.19",
+          "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.19.tgz",
+          "integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ=="
+        },
+        "raw-body": {
+          "version": "2.3.2",
+          "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.2.tgz",
+          "integrity": "sha1-vNYMd9Prk83gBQKVw/N5OJvIj4k=",
+          "requires": {
+            "bytes": "3.0.0",
+            "http-errors": "1.6.2",
+            "iconv-lite": "0.4.19",
+            "unpipe": "1.0.0"
+          },
+          "dependencies": {
+            "depd": {
+              "version": "1.1.1",
+              "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.1.tgz",
+              "integrity": "sha1-V4O04cRZ8G+lyif5kfPQbnoxA1k="
+            },
+            "http-errors": {
+              "version": "1.6.2",
+              "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.2.tgz",
+              "integrity": "sha1-CgAsyFcHGSp+eUbO7cERVfYOxzY=",
+              "requires": {
+                "depd": "1.1.1",
+                "inherits": "2.0.3",
+                "setprototypeof": "1.0.3",
+                "statuses": ">= 1.3.1 < 2"
+              }
+            },
+            "setprototypeof": {
+              "version": "1.0.3",
+              "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.0.3.tgz",
+              "integrity": "sha1-ZlZ+NwQ+608E2RvWWMDL77VbjgQ="
+            }
+          }
+        }
+      }
+    },
+    "extend": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+      "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
+    },
+    "extsprintf": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz",
+      "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU="
+    },
+    "fast-deep-equal": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz",
+      "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ="
+    },
+    "fast-json-stable-stringify": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz",
+      "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I="
+    },
+    "finalhandler": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz",
+      "integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==",
+      "requires": {
+        "debug": "2.6.9",
+        "encodeurl": "~1.0.2",
+        "escape-html": "~1.0.3",
+        "on-finished": "~2.3.0",
+        "parseurl": "~1.3.2",
+        "statuses": "~1.4.0",
+        "unpipe": "~1.0.0"
+      }
+    },
+    "fluent-openapi": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/fluent-openapi/-/fluent-openapi-0.1.1.tgz",
+      "integrity": "sha512-JTnCiuaAm66gGuizv4g46MqN1QVldPxySn9DxZ+hCjMaEGFx6ciJhz0iMfS5iv1YKqZo5pBW6qFJ9Zu0jaqvaw==",
+      "requires": {
+        "lodash.merge": "^4.6.1"
+      }
+    },
+    "forever-agent": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz",
+      "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE="
+    },
+    "form-data": {
+      "version": "2.3.2",
+      "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.2.tgz",
+      "integrity": "sha1-SXBJi+YEwgwAXU9cI67NIda0kJk=",
+      "requires": {
+        "asynckit": "^0.4.0",
+        "combined-stream": "1.0.6",
+        "mime-types": "^2.1.12"
+      }
+    },
+    "forwarded": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
+      "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
+    },
+    "fresh": {
+      "version": "0.5.2",
+      "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+      "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
+    },
+    "from2": {
+      "version": "2.3.0",
+      "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
+      "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=",
+      "requires": {
+        "inherits": "^2.0.1",
+        "readable-stream": "^2.0.0"
+      }
+    },
+    "get-stream": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz",
+      "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ="
+    },
+    "getpass": {
+      "version": "0.1.7",
+      "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz",
+      "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=",
+      "requires": {
+        "assert-plus": "^1.0.0"
+      }
+    },
+    "got": {
+      "version": "8.3.2",
+      "resolved": "https://registry.npmjs.org/got/-/got-8.3.2.tgz",
+      "integrity": "sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw==",
+      "requires": {
+        "@sindresorhus/is": "^0.7.0",
+        "cacheable-request": "^2.1.1",
+        "decompress-response": "^3.3.0",
+        "duplexer3": "^0.1.4",
+        "get-stream": "^3.0.0",
+        "into-stream": "^3.1.0",
+        "is-retry-allowed": "^1.1.0",
+        "isurl": "^1.0.0-alpha5",
+        "lowercase-keys": "^1.0.0",
+        "mimic-response": "^1.0.0",
+        "p-cancelable": "^0.4.0",
+        "p-timeout": "^2.0.1",
+        "pify": "^3.0.0",
+        "safe-buffer": "^5.1.1",
+        "timed-out": "^4.0.1",
+        "url-parse-lax": "^3.0.0",
+        "url-to-options": "^1.0.1"
+      }
+    },
+    "har-schema": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz",
+      "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI="
+    },
+    "har-validator": {
+      "version": "5.1.0",
+      "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.0.tgz",
+      "integrity": "sha512-+qnmNjI4OfH2ipQ9VQOw23bBd/ibtfbVdK2fYbY4acTDqKTW/YDp9McimZdDbG8iV9fZizUqQMD5xvriB146TA==",
+      "requires": {
+        "ajv": "^5.3.0",
+        "har-schema": "^2.0.0"
+      },
+      "dependencies": {
+        "ajv": {
+          "version": "5.5.2",
+          "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz",
+          "integrity": "sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU=",
+          "requires": {
+            "co": "^4.6.0",
+            "fast-deep-equal": "^1.0.0",
+            "fast-json-stable-stringify": "^2.0.0",
+            "json-schema-traverse": "^0.3.0"
+          }
+        }
+      }
+    },
+    "has-symbol-support-x": {
+      "version": "1.4.2",
+      "resolved": "https://registry.npmjs.org/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz",
+      "integrity": "sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw=="
+    },
+    "has-to-string-tag-x": {
+      "version": "1.4.1",
+      "resolved": "https://registry.npmjs.org/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz",
+      "integrity": "sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw==",
+      "requires": {
+        "has-symbol-support-x": "^1.4.1"
+      }
+    },
+    "http-cache-semantics": {
+      "version": "3.8.1",
+      "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz",
+      "integrity": "sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w=="
+    },
+    "http-errors": {
+      "version": "1.6.3",
+      "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+      "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+      "requires": {
+        "depd": "~1.1.2",
+        "inherits": "2.0.3",
+        "setprototypeof": "1.1.0",
+        "statuses": ">= 1.4.0 < 2"
+      }
+    },
+    "http-signature": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz",
+      "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=",
+      "requires": {
+        "assert-plus": "^1.0.0",
+        "jsprim": "^1.2.2",
+        "sshpk": "^1.7.0"
+      }
+    },
+    "iconv-lite": {
+      "version": "0.4.23",
+      "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz",
+      "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==",
+      "requires": {
+        "safer-buffer": ">= 2.1.2 < 3"
+      }
+    },
+    "indent-string": {
+      "version": "3.2.0",
+      "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz",
+      "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok="
+    },
+    "inherits": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+      "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
+    },
+    "into-stream": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-3.1.0.tgz",
+      "integrity": "sha1-lvsKk2wSur1v8XUqF9BWFqvQlMY=",
+      "requires": {
+        "from2": "^2.1.1",
+        "p-is-promise": "^1.1.0"
+      }
+    },
+    "ipaddr.js": {
+      "version": "1.8.0",
+      "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.8.0.tgz",
+      "integrity": "sha1-6qM9bd16zo9/b+DJygRA5wZzix4="
+    },
+    "is-object": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/is-object/-/is-object-1.0.1.tgz",
+      "integrity": "sha1-iVJojF7C/9awPsyF52ngKQMINHA="
+    },
+    "is-plain-obj": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz",
+      "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4="
+    },
+    "is-retry-allowed": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.1.0.tgz",
+      "integrity": "sha1-EaBgVotnM5REAz0BJaYaINVk+zQ="
+    },
+    "is-typedarray": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
+      "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo="
+    },
+    "isarray": {
+      "version": "0.0.1",
+      "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+      "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8="
+    },
+    "isstream": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
+      "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo="
+    },
+    "isurl": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/isurl/-/isurl-1.0.0.tgz",
+      "integrity": "sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w==",
+      "requires": {
+        "has-to-string-tag-x": "^1.2.0",
+        "is-object": "^1.0.1"
+      }
+    },
+    "js-yaml": {
+      "version": "3.12.0",
+      "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz",
+      "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==",
+      "requires": {
+        "argparse": "^1.0.7",
+        "esprima": "^4.0.0"
+      }
+    },
+    "jsbn": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz",
+      "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=",
+      "optional": true
+    },
+    "json-buffer": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz",
+      "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg="
+    },
+    "json-schema": {
+      "version": "0.2.3",
+      "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz",
+      "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM="
+    },
+    "json-schema-traverse": {
+      "version": "0.3.1",
+      "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz",
+      "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A="
+    },
+    "json-stable-stringify": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz",
+      "integrity": "sha1-mnWdOcXy/1A/1TAGRu1EX4jE+a8=",
+      "requires": {
+        "jsonify": "~0.0.0"
+      }
+    },
+    "json-stringify-safe": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
+      "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus="
+    },
+    "jsonify": {
+      "version": "0.0.0",
+      "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.0.tgz",
+      "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM="
+    },
+    "jsprim": {
+      "version": "1.4.1",
+      "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz",
+      "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=",
+      "requires": {
+        "assert-plus": "1.0.0",
+        "extsprintf": "1.3.0",
+        "json-schema": "0.2.3",
+        "verror": "1.10.0"
+      }
+    },
+    "keyv": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz",
+      "integrity": "sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==",
+      "requires": {
+        "json-buffer": "3.0.0"
+      }
+    },
+    "kubernetes-client": {
+      "version": "6.1.0",
+      "resolved": "https://registry.npmjs.org/kubernetes-client/-/kubernetes-client-6.1.0.tgz",
+      "integrity": "sha512-H4Phnw7zWV3nYzvMqc84DDexr1Da8sRokPWoDLxLuVJSTPnqVuXpAPAWPqNcZzZdm4kWddTfKTjpg8iOYHiDDQ==",
+      "requires": {
+        "fluent-openapi": "0.1.1",
+        "js-yaml": "^3.10.0",
+        "openid-client": "^2.0.0",
+        "request": "^2.83.0"
+      }
+    },
+    "lodash": {
+      "version": "4.17.11",
+      "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
+      "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg=="
+    },
+    "lodash.assign": {
+      "version": "4.2.0",
+      "resolved": "https://registry.npmjs.org/lodash.assign/-/lodash.assign-4.2.0.tgz",
+      "integrity": "sha1-DZnzzNem0mHRm9rrkkUAXShYCOc="
+    },
+    "lodash.clone": {
+      "version": "4.5.0",
+      "resolved": "https://registry.npmjs.org/lodash.clone/-/lodash.clone-4.5.0.tgz",
+      "integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y="
+    },
+    "lodash.fill": {
+      "version": "3.4.0",
+      "resolved": "https://registry.npmjs.org/lodash.fill/-/lodash.fill-3.4.0.tgz",
+      "integrity": "sha1-o8dK5kDQU63w3CB5+HIHiOi/74U="
+    },
+    "lodash.flatten": {
+      "version": "4.4.0",
+      "resolved": "https://registry.npmjs.org/lodash.flatten/-/lodash.flatten-4.4.0.tgz",
+      "integrity": "sha1-8xwiIlqWMtK7+OSt2+8kCqdlph8="
+    },
+    "lodash.intersection": {
+      "version": "4.4.0",
+      "resolved": "https://registry.npmjs.org/lodash.intersection/-/lodash.intersection-4.4.0.tgz",
+      "integrity": "sha1-ChG6Yx0OlcI8fy9Mu5ppLtF45wU="
+    },
+    "lodash.merge": {
+      "version": "4.6.1",
+      "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.1.tgz",
+      "integrity": "sha512-AOYza4+Hf5z1/0Hztxpm2/xiPZgi/cjMqdnKTUWTBSKchJlxXXuUSxCCl8rJlf4g6yww/j6mA8nC8Hw/EZWxKQ=="
+    },
+    "lodash.omit": {
+      "version": "4.5.0",
+      "resolved": "https://registry.npmjs.org/lodash.omit/-/lodash.omit-4.5.0.tgz",
+      "integrity": "sha1-brGa5aHuHdnfC5aeZs4Lf6MLXmA="
+    },
+    "lodash.partialright": {
+      "version": "4.2.1",
+      "resolved": "https://registry.npmjs.org/lodash.partialright/-/lodash.partialright-4.2.1.tgz",
+      "integrity": "sha1-ATDYDoM2MmTUAHTzKbij56ihzEs="
+    },
+    "lodash.pick": {
+      "version": "4.4.0",
+      "resolved": "https://registry.npmjs.org/lodash.pick/-/lodash.pick-4.4.0.tgz",
+      "integrity": "sha1-UvBWEP/53tQiYRRB7R/BI6AwAbM="
+    },
+    "lodash.uniq": {
+      "version": "4.5.0",
+      "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
+      "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M="
+    },
+    "long": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
+      "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA=="
+    },
+    "lowercase-keys": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz",
+      "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA=="
+    },
+    "lru-cache": {
+      "version": "4.1.3",
+      "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.3.tgz",
+      "integrity": "sha512-fFEhvcgzuIoJVUF8fYr5KR0YqxD238zgObTps31YdADwPPAp82a4M8TrckkWyx7ekNlf9aBcVn81cFwwXngrJA==",
+      "requires": {
+        "pseudomap": "^1.0.2",
+        "yallist": "^2.1.2"
+      }
+    },
+    "media-typer": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+      "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
+    },
+    "merge-descriptors": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
+      "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
+    },
+    "methods": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+      "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
+    },
+    "mime": {
+      "version": "1.4.1",
+      "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz",
+      "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ=="
+    },
+    "mime-db": {
+      "version": "1.36.0",
+      "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.36.0.tgz",
+      "integrity": "sha512-L+xvyD9MkoYMXb1jAmzI/lWYAxAMCPvIBSWur0PZ5nOf5euahRLVqH//FKW9mWp2lkqUgYiXPgkzfMUFi4zVDw=="
+    },
+    "mime-types": {
+      "version": "2.1.20",
+      "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.20.tgz",
+      "integrity": "sha512-HrkrPaP9vGuWbLK1B1FfgAkbqNjIuy4eHlIYnFi7kamZyLLrGlo2mpcx0bBmNpKqBtYtAfGbodDddIgddSJC2A==",
+      "requires": {
+        "mime-db": "~1.36.0"
+      }
+    },
+    "mimic-response": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz",
+      "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ=="
+    },
+    "morgan": {
+      "version": "1.9.1",
+      "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.9.1.tgz",
+      "integrity": "sha512-HQStPIV4y3afTiCYVxirakhlCfGkI161c76kKFca7Fk1JusM//Qeo1ej2XaMniiNeaZklMVrh3vTtIzpzwbpmA==",
+      "requires": {
+        "basic-auth": "~2.0.0",
+        "debug": "2.6.9",
+        "depd": "~1.1.2",
+        "on-finished": "~2.3.0",
+        "on-headers": "~1.0.1"
+      }
+    },
+    "ms": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+      "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
+    },
+    "negotiator": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
+      "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk="
+    },
+    "node-forge": {
+      "version": "0.7.6",
+      "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.7.6.tgz",
+      "integrity": "sha512-sol30LUpz1jQFBjOKwbjxijiE3b6pjd74YwfD0fJOKPjF+fONKb2Yg8rYgS6+bK6VDl+/wfr4IYpC7jDzLUIfw=="
+    },
+    "node-jose": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/node-jose/-/node-jose-1.0.0.tgz",
+      "integrity": "sha512-RE3P8l60Rj9ELrpPmvw6sOQ1hSyYfmQdNUMCa4EN7nCE1ux5JVX+GfXv+mfUTEMhZwNMwxBtI0+X1CKKeukSVQ==",
+      "requires": {
+        "base64url": "^3.0.0",
+        "es6-promise": "^4.0.5",
+        "lodash.assign": "^4.0.8",
+        "lodash.clone": "^4.3.2",
+        "lodash.fill": "^3.2.2",
+        "lodash.flatten": "^4.2.0",
+        "lodash.intersection": "^4.1.2",
+        "lodash.merge": "^4.3.5",
+        "lodash.omit": "^4.2.1",
+        "lodash.partialright": "^4.1.3",
+        "lodash.pick": "^4.2.0",
+        "lodash.uniq": "^4.2.1",
+        "long": "^4.0.0",
+        "node-forge": "^0.7.1",
+        "uuid": "^3.0.1"
+      }
+    },
+    "normalize-url": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz",
+      "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==",
+      "requires": {
+        "prepend-http": "^2.0.0",
+        "query-string": "^5.0.1",
+        "sort-keys": "^2.0.0"
+      }
+    },
+    "oauth-sign": {
+      "version": "0.9.0",
+      "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz",
+      "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ=="
+    },
+    "object-assign": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+      "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM="
+    },
+    "oidc-token-hash": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/oidc-token-hash/-/oidc-token-hash-3.0.1.tgz",
+      "integrity": "sha512-oLnVSEcNZkw01sB5aFR+2iJmW4oyC1PIMJmd3FMBGDuPTy5ZtEuX5WNhKMRarJIMOq8NiOwIB6eJB9AhgYwBTg==",
+      "requires": {
+        "base64url": "^3.0.0"
+      }
+    },
+    "on-finished": {
+      "version": "2.3.0",
+      "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+      "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+      "requires": {
+        "ee-first": "1.1.1"
+      }
+    },
+    "on-headers": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.1.tgz",
+      "integrity": "sha1-ko9dD0cNSTQmUepnlLCFfBAGk/c="
+    },
+    "openid-client": {
+      "version": "2.4.1",
+      "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-2.4.1.tgz",
+      "integrity": "sha512-x4YkoRVGLWG7VfLdm6abHV8LpcWU6BSQYmUaQcZX9sLrFHuhytaoiz/4Lf6VtrSA6/9qL4OaUMe/mgCwh2LwCw==",
+      "requires": {
+        "base64url": "^3.0.0",
+        "got": "^8.3.1",
+        "lodash": "^4.17.10",
+        "lru-cache": "^4.1.3",
+        "node-jose": "^1.0.0",
+        "oidc-token-hash": "^3.0.1",
+        "p-any": "^1.1.0",
+        "uuid": "^3.2.1"
+      }
+    },
+    "p-any": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/p-any/-/p-any-1.1.0.tgz",
+      "integrity": "sha512-Ef0tVa4CZ5pTAmKn+Cg3w8ABBXh+hHO1aV8281dKOoUHfX+3tjG2EaFcC+aZyagg9b4EYGsHEjz21DnEE8Og2g==",
+      "requires": {
+        "p-some": "^2.0.0"
+      }
+    },
+    "p-cancelable": {
+      "version": "0.4.1",
+      "resolved": "http://registry.npmjs.org/p-cancelable/-/p-cancelable-0.4.1.tgz",
+      "integrity": "sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ=="
+    },
+    "p-finally": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
+      "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4="
+    },
+    "p-is-promise": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz",
+      "integrity": "sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4="
+    },
+    "p-some": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/p-some/-/p-some-2.0.1.tgz",
+      "integrity": "sha1-Zdh8ixVO289SIdFnd4ttLhUPbwY=",
+      "requires": {
+        "aggregate-error": "^1.0.0"
+      }
+    },
+    "p-timeout": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-2.0.1.tgz",
+      "integrity": "sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA==",
+      "requires": {
+        "p-finally": "^1.0.0"
+      }
+    },
+    "parseurl": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz",
+      "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M="
+    },
+    "path-to-regexp": {
+      "version": "0.1.7",
+      "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
+      "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
+    },
+    "performance-now": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz",
+      "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns="
+    },
+    "pify": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+      "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY="
+    },
+    "prepend-http": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz",
+      "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc="
+    },
+    "prettier": {
+      "version": "1.14.3",
+      "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.14.3.tgz",
+      "integrity": "sha512-qZDVnCrnpsRJJq5nSsiHCE3BYMED2OtsI+cmzIzF1QIfqm5ALf8tEJcO27zV1gKNKRPdhjO0dNWnrzssDQ1tFg==",
+      "dev": true
+    },
+    "process-nextick-args": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz",
+      "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw=="
+    },
+    "prom-client": {
+      "version": "11.2.0",
+      "resolved": "https://registry.npmjs.org/prom-client/-/prom-client-11.2.0.tgz",
+      "integrity": "sha512-4gUAq/GR5C8q5eWxOa7tA60AtmkMpbyBd/2btCayvd3h/7HzS0p/kESKRwggJgbFrfdhTCBpOwPAwKiI01Q0VQ==",
+      "requires": {
+        "tdigest": "^0.1.1"
+      }
+    },
+    "proxy-addr": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.4.tgz",
+      "integrity": "sha512-5erio2h9jp5CHGwcybmxmVqHmnCBZeewlfJ0pex+UW7Qny7OOZXTtH56TGNyBizkgiOwhJtMKrVzDTeKcySZwA==",
+      "requires": {
+        "forwarded": "~0.1.2",
+        "ipaddr.js": "1.8.0"
+      }
+    },
+    "pseudomap": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz",
+      "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM="
+    },
+    "psl": {
+      "version": "1.1.29",
+      "resolved": "https://registry.npmjs.org/psl/-/psl-1.1.29.tgz",
+      "integrity": "sha512-AeUmQ0oLN02flVHXWh9sSJF7mcdFq0ppid/JkErufc3hGIV/AMa8Fo9VgDo/cT2jFdOWoFvHp90qqBH54W+gjQ=="
+    },
+    "punycode": {
+      "version": "1.4.1",
+      "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
+      "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4="
+    },
+    "qs": {
+      "version": "6.5.1",
+      "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz",
+      "integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A=="
+    },
+    "query-string": {
+      "version": "5.1.1",
+      "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz",
+      "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==",
+      "requires": {
+        "decode-uri-component": "^0.2.0",
+        "object-assign": "^4.1.0",
+        "strict-uri-encode": "^1.0.0"
+      }
+    },
+    "range-parser": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz",
+      "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4="
+    },
+    "raw-body": {
+      "version": "2.3.3",
+      "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz",
+      "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==",
+      "requires": {
+        "bytes": "3.0.0",
+        "http-errors": "1.6.3",
+        "iconv-lite": "0.4.23",
+        "unpipe": "1.0.0"
+      }
+    },
+    "readable-stream": {
+      "version": "2.3.6",
+      "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
+      "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
+      "requires": {
+        "core-util-is": "~1.0.0",
+        "inherits": "~2.0.3",
+        "isarray": "~1.0.0",
+        "process-nextick-args": "~2.0.0",
+        "safe-buffer": "~5.1.1",
+        "string_decoder": "~1.1.1",
+        "util-deprecate": "~1.0.1"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+          "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
+        }
+      }
+    },
+    "request": {
+      "version": "2.88.0",
+      "resolved": "https://registry.npmjs.org/request/-/request-2.88.0.tgz",
+      "integrity": "sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg==",
+      "requires": {
+        "aws-sign2": "~0.7.0",
+        "aws4": "^1.8.0",
+        "caseless": "~0.12.0",
+        "combined-stream": "~1.0.6",
+        "extend": "~3.0.2",
+        "forever-agent": "~0.6.1",
+        "form-data": "~2.3.2",
+        "har-validator": "~5.1.0",
+        "http-signature": "~1.2.0",
+        "is-typedarray": "~1.0.0",
+        "isstream": "~0.1.2",
+        "json-stringify-safe": "~5.0.1",
+        "mime-types": "~2.1.19",
+        "oauth-sign": "~0.9.0",
+        "performance-now": "^2.1.0",
+        "qs": "~6.5.2",
+        "safe-buffer": "^5.1.2",
+        "tough-cookie": "~2.4.3",
+        "tunnel-agent": "^0.6.0",
+        "uuid": "^3.3.2"
+      },
+      "dependencies": {
+        "qs": {
+          "version": "6.5.2",
+          "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
+          "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA=="
+        },
+        "safe-buffer": {
+          "version": "5.1.2",
+          "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+          "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+        }
+      }
+    },
+    "responselike": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz",
+      "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=",
+      "requires": {
+        "lowercase-keys": "^1.0.0"
+      }
+    },
+    "safe-buffer": {
+      "version": "5.1.1",
+      "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz",
+      "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg=="
+    },
+    "safer-buffer": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+      "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+    },
+    "send": {
+      "version": "0.16.2",
+      "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz",
+      "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==",
+      "requires": {
+        "debug": "2.6.9",
+        "depd": "~1.1.2",
+        "destroy": "~1.0.4",
+        "encodeurl": "~1.0.2",
+        "escape-html": "~1.0.3",
+        "etag": "~1.8.1",
+        "fresh": "0.5.2",
+        "http-errors": "~1.6.2",
+        "mime": "1.4.1",
+        "ms": "2.0.0",
+        "on-finished": "~2.3.0",
+        "range-parser": "~1.2.0",
+        "statuses": "~1.4.0"
+      }
+    },
+    "serve-static": {
+      "version": "1.13.2",
+      "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz",
+      "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==",
+      "requires": {
+        "encodeurl": "~1.0.2",
+        "escape-html": "~1.0.3",
+        "parseurl": "~1.3.2",
+        "send": "0.16.2"
+      }
+    },
+    "setprototypeof": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+      "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
+    },
+    "sort-keys": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz",
+      "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=",
+      "requires": {
+        "is-plain-obj": "^1.0.0"
+      }
+    },
+    "sprintf-js": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+      "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw="
+    },
+    "sshpk": {
+      "version": "1.14.2",
+      "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.14.2.tgz",
+      "integrity": "sha1-xvxhZIo9nE52T9P8306hBeSSupg=",
+      "requires": {
+        "asn1": "~0.2.3",
+        "assert-plus": "^1.0.0",
+        "bcrypt-pbkdf": "^1.0.0",
+        "dashdash": "^1.12.0",
+        "ecc-jsbn": "~0.1.1",
+        "getpass": "^0.1.1",
+        "jsbn": "~0.1.0",
+        "safer-buffer": "^2.0.2",
+        "tweetnacl": "~0.14.0"
+      }
+    },
+    "statuses": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz",
+      "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew=="
+    },
+    "strict-uri-encode": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz",
+      "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM="
+    },
+    "string_decoder": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+      "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+      "requires": {
+        "safe-buffer": "~5.1.0"
+      }
+    },
+    "swagger-express-validator": {
+      "version": "0.1.0",
+      "resolved": "https://registry.npmjs.org/swagger-express-validator/-/swagger-express-validator-0.1.0.tgz",
+      "integrity": "sha512-HSwg8qHhZxl1EKPMTHaFkbbpvcSaXZ3y21g+8bB+HMCVxVFD1+FA8MC1Ix41Du82AFACaiSXriNTk86o+RQhNg==",
+      "requires": {
+        "ajv": "^4.10.3",
+        "debug": "^2.6.0",
+        "lodash": "^4.17.3",
+        "path-to-regexp": "^1.7.0",
+        "validator": "^6.2.1"
+      },
+      "dependencies": {
+        "path-to-regexp": {
+          "version": "1.7.0",
+          "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+          "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+          "requires": {
+            "isarray": "0.0.1"
+          }
+        }
+      }
+    },
+    "tdigest": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/tdigest/-/tdigest-0.1.1.tgz",
+      "integrity": "sha1-Ljyyw56kSeVdHmzZEReszKRYgCE=",
+      "requires": {
+        "bintrees": "1.0.1"
+      }
+    },
+    "timed-out": {
+      "version": "4.0.1",
+      "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-4.0.1.tgz",
+      "integrity": "sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8="
+    },
+    "tough-cookie": {
+      "version": "2.4.3",
+      "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.4.3.tgz",
+      "integrity": "sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ==",
+      "requires": {
+        "psl": "^1.1.24",
+        "punycode": "^1.4.1"
+      }
+    },
+    "tunnel-agent": {
+      "version": "0.6.0",
+      "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
+      "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=",
+      "requires": {
+        "safe-buffer": "^5.0.1"
+      }
+    },
+    "tweetnacl": {
+      "version": "0.14.5",
+      "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
+      "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=",
+      "optional": true
+    },
+    "type-is": {
+      "version": "1.6.16",
+      "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz",
+      "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==",
+      "requires": {
+        "media-typer": "0.3.0",
+        "mime-types": "~2.1.18"
+      }
+    },
+    "unpipe": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+      "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
+    },
+    "url-parse-lax": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz",
+      "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=",
+      "requires": {
+        "prepend-http": "^2.0.0"
+      }
+    },
+    "url-to-options": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/url-to-options/-/url-to-options-1.0.1.tgz",
+      "integrity": "sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k="
+    },
+    "util-deprecate": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+      "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
+    },
+    "utils-merge": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+      "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
+    },
+    "uuid": {
+      "version": "3.3.2",
+      "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz",
+      "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA=="
+    },
+    "validator": {
+      "version": "6.3.0",
+      "resolved": "https://registry.npmjs.org/validator/-/validator-6.3.0.tgz",
+      "integrity": "sha1-R84j7Y1Ord+p1LjvAHG2zxB418g="
+    },
+    "vary": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+      "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
+    },
+    "verror": {
+      "version": "1.10.0",
+      "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz",
+      "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=",
+      "requires": {
+        "assert-plus": "^1.0.0",
+        "core-util-is": "1.0.2",
+        "extsprintf": "^1.2.0"
+      }
+    },
+    "yallist": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz",
+      "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI="
+    }
+  }
+}

+ 26 - 0
tools/kubernetes/services/mock-provisioner/package.json

@@ -0,0 +1,26 @@
+{
+  "name": "cloudera-dw-provisioner",
+  "version": "0.1.0",
+  "description": "Cloudera Data Warehouse Provisioning Service",
+  "main": "server.js",
+  "scripts": {
+    "test": "echo \"Error: no test specified\" && exit 1",
+    "start": "node server.js"
+  },
+  "author": "Cloudera, Inc.",
+  "dependencies": {
+    "body-parse": "^0.1.0",
+    "body-parser": "^1.18.3",
+    "express": "^4.16.3",
+    "js-yaml": "^3.12.0",
+    "kubernetes-client": "^6.1.0",
+    "morgan": "^1.9.1",
+    "prom-client": "^11.2.0",
+    "request": "^2.88.0",
+    "swagger-express-validator": "^0.1.0",
+    "uuid": "^3.3.2"
+  },
+  "devDependencies": {
+    "prettier": "1.14.3"
+  }
+}

+ 419 - 0
tools/kubernetes/services/mock-provisioner/provisioner.yaml

@@ -0,0 +1,419 @@
+swagger: '2.0'
+x-interface-model: cluster
+x-endpoint-name: dw
+x-releases: DW,SDX
+info:
+  version: 0.1-SNAPSHOT
+  title: Cloudera Data Warehouse
+  license:
+    name: Apache 2.0
+  description: Cloudera Data Warehouse is a service that makes it easy to analyze large amounts of data efficiently.
+schemes:
+  - http
+consumes:
+  - application/json
+produces:
+  - application/json
+paths:
+  /dw/createCluster:
+    post:
+      summary: Creates a new cluster.
+      description: Creates a new cluster.
+      operationId: createCluster
+      x-entitlement: DW
+      parameters:
+        - name: input
+          in: body
+          required: true
+          schema:
+            $ref: '#/definitions/CreateClusterRequest'
+      responses:
+        200:
+          description: Expected response to a valid request.
+          schema:
+            $ref: '#/definitions/CreateClusterResponse'
+        default:
+          description: The default response on an error.
+          schema:
+            $ref: '#/definitions/Error'
+  /dw/deleteCluster:
+    post:
+      summary: Deletes a cluster.
+      description: Deletes a cluster.
+      operationId: deleteCluster
+      parameters:
+        - name: input
+          in: body
+          required: true
+          schema:
+            $ref: '#/definitions/DeleteClusterRequest'
+      responses:
+        200:
+          description: Expected response to a valid request.
+          schema:
+            $ref: '#/definitions/DeleteClusterResponse'
+        default:
+          description: The default response on an error.
+          schema:
+            $ref: '#/definitions/Error'
+  /dw/listClusters:
+    post:
+      summary: Lists clusters.
+      description: Lists clusters. If no cluster names are specified, the call lists all clusters.
+      operationId: listClusters
+      x-right: dw/listClusters
+      x-paginates: true
+      x-paging-default-max-items: 100
+      parameters:
+        - name: input
+          in: body
+          required: true
+          schema:
+            $ref: '#/definitions/ListClustersRequest'
+      responses:
+        200:
+          description: Expected response to a valid request.
+          schema:
+            $ref: '#/definitions/ListClustersResponse'
+        default:
+          description: The default response on an error.
+          schema:
+            $ref: '#/definitions/Error'
+  /dw/describeCluster:
+    post:
+      summary: Describe a cluster.
+      description: Describe a cluster.
+      operationId: describeCluster
+      x-right: dw/describeCluster
+      parameters:
+        - name: input
+          in: body
+          required: true
+          schema:
+            $ref: '#/definitions/DescribeClusterRequest'
+      responses:
+        200:
+          description: Expected response to a valid request.
+          schema:
+            $ref: '#/definitions/DescribeClusterResponse'
+        default:
+          description: The default response on an error.
+          schema:
+            $ref: '#/definitions/Error'
+  /dw/updateCluster:
+    post:
+      summary: Update a cluster.
+      description: Update a cluster.
+      operationId: updateCluster
+      x-right: dw/updateClusters
+      parameters:
+        - name: input
+          in: body
+          required: true
+          schema:
+            $ref: '#/definitions/UpdateClusterRequest'
+      responses:
+        200:
+          description: Expected response to a valid request.
+          schema:
+            $ref: '#/definitions/UpdateClusterResponse'
+        default:
+          description: The default response on an error.
+          schema:
+            $ref: '#/definitions/Error'
+definitions:
+  Error:
+    type: object
+    description: An object returned on an error.
+    properties:
+      code:
+        type: string
+        description: The error code.
+      message:
+        type: string
+        description: The error message.
+  ClusterSummary:
+    type: object
+    description: Information about a cluster.
+    required:
+      - clusterName
+      - crn
+      - creationDate
+      - cdhVersion
+      - workerCpuCores
+      - workerMemoryInGib
+      - workerReplicas
+      - workerReplicasOnline
+      - workerAutoResize
+      - status
+    properties:
+      clusterName:
+        type: string
+        description: The name of the cluster.
+      crn:
+        type: string
+        description: The CRN of the cluster.
+      creationDate:
+        type: string
+        format: date-time
+        description: The date when the cluster was created.
+      status:
+        $ref: '#/definitions/ClusterStatus'
+        description: The status of the cluster.
+      cdhVersion:
+        type: string
+        description: The CDH version for the cluster.
+      workerCpuCores:
+        type: integer
+        format: int32
+        description: CPU cores per worker.
+      workerMemoryInGib:
+        type: integer
+        format: int32
+        description: Memory in GiB per worker.
+      workerReplicas:
+        type: integer
+        format: int32
+        description: Number of worker replicas.
+      workerReplicasOnline:
+        type: integer
+        format: int32
+        description: Number of worker replicas currently online.
+      workerAutoResize:
+        type: boolean
+        description: If the number of worker replicas is auto scaling.
+  Cluster:
+    type: object
+    description: Information about a cluster.
+    required:
+      - clusterName
+      - crn
+      - creationDate
+      - cdhVersion
+      - workerCpuCores
+      - workerMemoryInGib
+      - workerReplicas
+      - workerReplicasOnline
+      - workerAutoResize
+      - status
+    properties:
+      clusterName:
+        type: string
+        description: The name of the cluster.
+      crn:
+        type: string
+        description: The CRN of the cluster.
+      creationDate:
+        type: string
+        format: date-time
+        description: The date when the cluster was created.
+      status:
+        $ref: '#/definitions/ClusterStatus'
+        description: The status of the cluster.
+      cdhVersion:
+        type: string
+        description: The CDH version for the cluster.
+      workerCpuCores:
+        type: integer
+        format: int32
+        description: CPU cores per worker.
+      workerMemoryInGib:
+        type: integer
+        format: int32
+        description: Memory in GiB per worker.
+      workerReplicas:
+        type: integer
+        format: int32
+        description: Number of worker replicas.
+      workerReplicasOnline:
+        type: integer
+        format: int32
+        description: Number of worker replicas currently online.
+      workerAutoResize:
+        type: boolean
+        description: If the number of worker replicas is auto scaling.
+      coordinatorEndpoint:
+        $ref: '#/definitions/Endpoint'
+        description: The connection endpoint for the cluster coordinator.
+      adminEndpoint:
+        $ref: '#/definitions/Endpoint'
+        description: The connection endpoint for the cluster administration UI.
+  Endpoint:
+    type: object
+    description: A network endpoint that has both a public and a private hostname or IP address.
+    required:
+      - privateHost
+      - publicHost
+      - port
+    properties:
+      privateHost:
+        type: string
+        description: A private IP address or hostname. This is only accessible from within the pod network.
+      publicHost:
+        type: string
+        description: A public IP address or hostname. This is routable from external services.
+      port:
+        type: integer
+        format: int32
+        description: A port.
+  ClusterStatus:
+    type: string
+    description: The status of a cluster.
+    enum:
+      - STARTING
+      - ONLINE
+      - SCALING_UP
+      - SCALING_DOWN
+      - TERMINATING
+      - STOPPED
+      - FAILED
+  CreateClusterRequest:
+    type: object
+    description: Request object for a create cluster request.
+    required:
+      - clusterName
+    properties:
+      clusterName:
+        type: string
+        description: The name of the cluster. This name must be unique, must have a maximum of 128 characters, and must contain only alphanumeric characters and hyphens. Names are case-sensitive.
+      cdhVersion:
+        type: string
+        description: The CDH version.
+      workerCpuCores:
+        type: integer
+        format: int32
+        description: CPU cores per worker.
+      workerMemoryInGib:
+        type: integer
+        format: int32
+        description: Memory in GiB per worker.
+      workerReplicas:
+        type: integer
+        format: int32
+        description: Number of worker replicas.
+  CreateClusterResponse:
+    type: object
+    description: Response object for a create cluster request.
+    required:
+      - cluster
+    properties:
+      cluster:
+        $ref: '#/definitions/Cluster'
+        description: The cluster that was created.
+  UpdateClusterRequest:
+    type: object
+    description: Request object for an update cluster request.
+    required:
+      - clusterName
+    properties:
+      clusterName:
+        type: string
+        description: The name of the cluster. This name must be unique, must have a maximum of 128 characters, and must contain only alphanumeric characters and hyphens. Names are case-sensitive.
+      cdhVersion:
+        type: string
+        description: The CDH version.
+      workerCpuCores:
+        type: integer
+        format: int32
+        description: CPU cores per worker.
+      workerMemoryInGib:
+        type: integer
+        format: int32
+        description: Memory in GiB per worker.
+      workerReplicas:
+        type: integer
+        format: int32
+        description: Number of worker replicas.
+      updateClusterAutoResizeChanged:
+        type: boolean
+        description: If we change the worker autoscaling policy.
+      updateClusterAutoResize:
+        type: boolean
+        description: If the autoscaling was turned off or on.
+      updateClusterAutoResizeMax:
+        type: integer
+        format: int32
+        description: Max number of replica when autoscaling if on.
+      updateClusterAutoResizeMin:
+        type: integer
+        format: int32
+        description: Max number of replica when autoscaling if on (optional).
+      updateClusterAutoResizeCpu:
+        type: integer
+        format: int32
+        description: CPU integer % (e.g. 80) when autoscaling if on (optional).
+  UpdateClusterResponse:
+    type: object
+    description: Response object for an update cluster request.
+    required:
+      - cluster
+    properties:
+      cluster:
+        $ref: '#/definitions/Cluster'
+        description: The cluster that was updated.
+  DeleteClusterRequest:
+    type: object
+    description: Request object for a delete cluster request.
+    required:
+      - clusterName
+    properties:
+      clusterName:
+        type: string
+        description: The name or CRN of the cluster.
+  DeleteClusterResponse:
+    type: object
+    description: Response object for a delete cluster request.
+  ListClustersRequest:
+    type: object
+    description: Request object for a list clusters request.
+    properties:
+      clusterNames:
+        type: array
+        items:
+          type: string
+        description: The names or CRNs of the clusters.
+      pageSize:
+        type: integer
+        format: int32
+        minimum: 1
+        maximum: 100
+        x-paging-page-size: true
+        description: The size of each page.
+      startingToken:
+        type: string
+        x-paging-input-token: true
+        description: A token to specify where to start paginating. This is the nextToken from a previously truncated response.
+  ListClustersResponse:
+    type: object
+    description: Response object for a list clusters request.
+    required:
+      - clusters
+    properties:
+      clusters:
+        type: array
+        items:
+          $ref: '#/definitions/ClusterSummary'
+        x-paging-result: true
+        description: The clusters.
+      nextToken:
+        type: string
+        x-paging-output-token: true
+        description: The token to use when requesting the next set of results. If not present, there are no additional results.
+  DescribeClusterRequest:
+    type: object
+    description: Request object for a describe cluster request.
+    required:
+      - clusterName
+    properties:
+      clusterName:
+        type: string
+        description: The name or CRN of the cluster.
+  DescribeClusterResponse:
+    type: object
+    description: Response object for a describe cluster request.
+    required:
+      - cluster
+    properties:
+      cluster:
+        $ref: '#/definitions/Cluster'
+        description: The cluster.

+ 85 - 0
tools/kubernetes/services/mock-provisioner/server.js

@@ -0,0 +1,85 @@
+// Quick and dirty provisioning service.
+
+const util = require("util");
+const express = require("express");
+const morgan = require('morgan');
+const bodyParser = require("body-parser");
+const validator = require("swagger-express-validator");
+const yaml = require("js-yaml");
+const fs = require("fs");
+const model = require("./model");
+const Prometheus = require('prom-client');
+
+
+// Get api spec, or throw exception on error
+let schema = null;
+try {
+  schema = yaml.safeLoad(fs.readFileSync("provisioner.yaml", "utf8"));
+} catch (e) {
+  console.error(e);
+  process.exit(1);
+}
+
+const app = express();
+app.use(bodyParser.json());
+app.use(morgan('tiny'));
+
+const opts = {
+  schema,
+  validateRequest: true,
+  validateResponse: true,
+  requestValidationFn: (req, data, errors) => {
+    console.error(
+      `failed request validation: ${req.method} ${
+        req.originalUrl
+      }\n ${util.inspect(errors)}`
+    );
+  },
+  responseValidationFn: (req, data, errors) => {
+    console.error(
+      `failed response validation: ${req.method} ${
+        req.originalUrl
+      }\n ${util.inspect(errors)}`
+    );
+  }
+};
+app.use(validator(opts));
+
+
+app.post("/dw/createCluster", async (req, res) => {
+  res.json(await model.createCluster(req.body));
+});
+
+app.post("/dw/describeCluster", async (req, res) => {
+  res.json(await model.describeCluster(req.body));
+});
+
+app.post("/dw/listClusters", async (req, res) => {
+  res.json({
+    clusters: await model.listClusters(req.body)
+  });
+});
+
+app.post("/dw/updateCluster", async (req, res) => {
+  res.json(await model.updateCluster(req.body));
+});
+
+app.post("/dw/deleteCluster", async (req, res) => {
+  res.json(await model.deleteCluster(req.body));
+});
+
+app.get("/", async (req, res) => {
+  res.json({app: "cloudera-dw-provisioner", version: "0.1"});
+});
+
+// Proxying metrics until natively supported by Impala
+app.get('/metrics', async (req, res) => {
+  await model.getClusterMetrics(req.body);
+
+  res.set('Content-Type', Prometheus.register.contentType)
+  res.end(Prometheus.register.metrics())
+})
+
+return app.listen(4747, "0.0.0.0", () =>
+  console.log("provisioning service listening on 0.0.0.0:4747")
+);