z-hue-overrides.ini 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. # Lightweight Hue configuration file
  2. # ==================================
  3. [desktop]
  4. # Set this to a random string, the longer the better.
  5. secret_key=kasdlfjknasdfl3hbaksk3bwkasdfkasdfba23asdf
  6. # Webserver listens on this address and port
  7. http_host=0.0.0.0
  8. http_port=8888
  9. # Time zone name
  10. time_zone=America/Los_Angeles
  11. # Enable or disable debug mode.
  12. django_debug_mode=false
  13. # Enable or disable backtrace for server error
  14. http_500_debug_mode=false
  15. app_blacklist=search,hbase,security
  16. # Configuration options for specifying the Desktop Database. For more info,
  17. # see http://docs.djangoproject.com/en/1.11/ref/settings/#database-engine
  18. # ------------------------------------------------------------------------
  19. [[database]]
  20. # Database engine is typically one of:
  21. # postgresql_psycopg2, mysql, sqlite3 or oracle.
  22. #
  23. # Note that for sqlite3, 'name', below is a path to the filename. For other backends, it is the database name
  24. # Note for Oracle, options={"threaded":true} must be set in order to avoid crashes.
  25. # Note for Oracle, you can use the Oracle Service Name by setting "host=" and "port=" and then "name=<host>:<port>/<service_name>".
  26. # Note for MariaDB use the 'mysql' engine.
  27. # engine=postgresql_psycopg2
  28. # host=hue-postgres
  29. # port=5432
  30. # user=hue
  31. # password=hue
  32. # name=hue
  33. # engine=mysql
  34. # host=database
  35. # port=3306
  36. # user=root
  37. # password=secret
  38. # name=hue
  39. ###########################################################################
  40. # Settings to configure the snippets available in the Notebook
  41. ###########################################################################
  42. # [notebook]
  43. # One entry for each type of snippet.
  44. # [[interpreters]]
  45. # Define the name and how to connect and execute the language.
  46. # https://docs.gethue.com/administrator/configuration/editor/
  47. # Example for Docker compose
  48. # [[[mysql]]]
  49. # name = MySQL
  50. # interface=sqlalchemy
  51. # ## https://docs.sqlalchemy.org/en/latest/dialects/mysql.html
  52. # options='{"url": "mysql://root:secret@database:3306/hue"}'
  53. # ## options='{"url": "mysql://${USER}:${PASSWORD}@localhost:3306/hue"}'
  54. # [[[hive]]]
  55. # name=Hive
  56. # interface=hiveserver2
  57. # [[[impala]]]
  58. # name=Impala
  59. # interface=hiveserver2
  60. # [[[sparksql]]]
  61. # name = Spark Sql
  62. # interface=sqlalchemy
  63. # options='{"url": "hive://user:password@localhost:10000/database"}'
  64. # [[[sparksql]]]
  65. # name=SparkSql
  66. # interface=livy
  67. # [[[spark]]]
  68. # name=Scala
  69. # interface=livy
  70. # [[[pyspark]]]
  71. # name=PySpark
  72. # interface=livy
  73. # [[[r]]]
  74. # name=R
  75. # interface=livy
  76. # [[jar]]]
  77. # name=Spark Submit Jar
  78. # interface=livy-batch
  79. # [[[py]]]
  80. # name=Spark Submit Python
  81. # interface=livy-batch
  82. # [[[text]]]
  83. # name=Text
  84. # interface=text
  85. # [[[markdown]]]
  86. # name=Markdown
  87. # interface=text
  88. # [[[sqlite]]]
  89. # name = SQLite
  90. # interface=rdbms
  91. # [[[postgresql]]]
  92. # name = PostgreSQL
  93. # interface=rdbms
  94. # [[[oracle]]]
  95. # name = Oracle
  96. # interface=rdbms
  97. # [[[solr]]]
  98. # name = Solr SQL
  99. # interface=solr
  100. # ## Name of the collection handler
  101. # # options='{"collection": "default"}'
  102. # [[[pig]]]
  103. # name=Pig
  104. # interface=oozie
  105. # [[[java]]]
  106. # name=Java
  107. # interface=oozie
  108. # [[[spark2]]]
  109. # name=Spark
  110. # interface=oozie
  111. # [[[mapreduce]]]
  112. # name=MapReduce
  113. # interface=oozie
  114. # [[[sqoop1]]]
  115. # name=Sqoop1
  116. # interface=oozie
  117. # [[[distcp]]]
  118. # name=Distcp
  119. # interface=oozie
  120. # [[[shell]]]
  121. # name=Shell
  122. # interface=oozie
  123. # [[[presto]]]
  124. # name=Presto SQL
  125. # interface=presto
  126. # ## Specific options for connecting to the Presto server.
  127. # ## The JDBC driver presto-jdbc.jar need to be in the CLASSPATH environment variable.
  128. # ## If 'user' and 'password' are omitted, they will be prompted in the UI.
  129. # options='{"url": "jdbc:presto://localhost:8080/catalog/schema", "driver": "io.prestosql.jdbc.PrestoDriver", "user": "root", "password": "root"}'
  130. # [[[clickhouse]]]
  131. # name=ClickHouse
  132. # interface=jdbc
  133. # ## Specific options for connecting to the ClickHouse server.
  134. # ## The JDBC driver clickhouse-jdbc.jar and its related jars need to be in the CLASSPATH environment variable.
  135. # options='{"url": "jdbc:clickhouse://localhost:8123", "driver": "ru.yandex.clickhouse.ClickHouseDriver", "user": "readonly", "password": ""}'
  136. [dashboard]
  137. # Activate the SQL Dashboard (beta).
  138. has_sql_enabled=true
  139. [hadoop]
  140. # Configuration for HDFS NameNode
  141. # ------------------------------------------------------------------------
  142. [[hdfs_clusters]]
  143. # HA support by using HttpFs
  144. # [[[default]]]
  145. # Enter the filesystem uri
  146. # fs_defaultfs=hdfs://localhost:8020
  147. # Use WebHdfs/HttpFs as the communication mechanism.
  148. # Domain should be the NameNode or HttpFs host.
  149. # Default port is 14000 for HttpFs.
  150. ## webhdfs_url=http://localhost:50070/webhdfs/v1
  151. # Configuration for YARN (MR2)
  152. # ------------------------------------------------------------------------
  153. [[yarn_clusters]]
  154. # [[[default]]]
  155. # Enter the host on which you are running the ResourceManager
  156. ## resourcemanager_host=localhost
  157. # The port where the ResourceManager IPC listens on
  158. ## resourcemanager_port=8032
  159. # URL of the ResourceManager API
  160. ## resourcemanager_api_url=http://localhost:8088
  161. # URL of the ProxyServer API
  162. ## proxy_api_url=http://localhost:8088
  163. # URL of the HistoryServer API
  164. ## history_server_api_url=http://localhost:19888
  165. # URL of the Spark History Server
  166. ## spark_history_server_url=http://localhost:18088
  167. ###########################################################################
  168. # Settings to configure Beeswax with Hive
  169. ###########################################################################
  170. [beeswax]
  171. # Host where HiveServer2 is running.
  172. # If Kerberos security is enabled, use fully-qualified domain name (FQDN).
  173. ## hive_server_host=localhost
  174. # Port where HiveServer2 Thrift server runs on.
  175. ## hive_server_port=10000
  176. ###########################################################################
  177. # Settings to configure Impala
  178. ###########################################################################
  179. [impala]
  180. # Host of the Impala Server (one of the Impalad)
  181. ## server_host=localhost
  182. # Port of the Impala Server
  183. ## server_port=21050
  184. ###########################################################################
  185. # Settings to configure the Spark application.
  186. ###########################################################################
  187. [spark]
  188. # The Livy Server URL.
  189. ## livy_server_url=http://localhost:8998
  190. # Configure Livy to start in local 'process' mode, or 'yarn' workers.
  191. ## livy_server_session_kind=yarn
  192. # Whether Livy requires client to perform Kerberos authentication.
  193. ## security_enabled=false
  194. # Host of the Sql Server
  195. ## sql_server_host=localhost
  196. # Port of the Sql Server
  197. ## sql_server_port=10000
  198. # Choose whether Hue should validate certificates received from the server.
  199. ## ssl_cert_ca_verify=true
  200. ###########################################################################
  201. # Settings to configure HBase Browser
  202. ###########################################################################
  203. [hbase]
  204. # Comma-separated list of HBase Thrift servers for clusters in the format of '(name|host:port)'.
  205. ## hbase_clusters=(Cluster|localhost:9090)
  206. ###########################################################################
  207. # Settings to configure Solr Search
  208. ###########################################################################
  209. [search]
  210. # URL of the Solr Server
  211. ## solr_url=http://localhost:8983/solr/
  212. ###########################################################################
  213. # Settings to configure liboozie
  214. ###########################################################################
  215. [liboozie]
  216. # The URL where the Oozie service runs on. This is required in order for
  217. # users to submit jobs. Empty value disables the config check.
  218. ## oozie_url=http://localhost:11000/oozie
  219. ###########################################################################
  220. # Settings for the AWS lib
  221. ###########################################################################
  222. [aws]
  223. [[aws_accounts]]
  224. # Default AWS account
  225. ## [[[default]]]
  226. # AWS credentials
  227. ## access_key_id=
  228. ## secret_access_key=
  229. ## security_token=
  230. # Execute this script to produce the AWS access key ID.
  231. ## access_key_id_script=/path/access_key_id.sh
  232. # Execute this script to produce the AWS secret access key.
  233. ## secret_access_key_script=/path/secret_access_key.sh
  234. # Allow to use either environment variables or
  235. # EC2 InstanceProfile to retrieve AWS credentials.
  236. ## allow_environment_credentials=yes
  237. # AWS region to use, if no region is specified, will attempt to connect to standard s3.amazonaws.com endpoint
  238. ## region=us-east-1
  239. # Endpoint overrides
  240. ## host=
  241. # Proxy address and port
  242. ## proxy_address=
  243. ## proxy_port=8080
  244. ## proxy_user=
  245. ## proxy_pass=
  246. # Secure connections are the default, but this can be explicitly overridden:
  247. ## is_secure=true
  248. ###########################################################################
  249. # Settings for the Azure lib
  250. ###########################################################################
  251. [azure]
  252. [[azure_accounts]]
  253. # Default Azure account
  254. [[[default]]]
  255. # Azure credentials
  256. ## client_id=
  257. # Execute this script to produce the ADLS client id.
  258. ## client_id_script=/path/client_id.sh
  259. ## client_secret=
  260. # Execute this script to produce the ADLS client secret.
  261. ## client_secret_script=/path/client_secret.sh
  262. ## tenant_id=
  263. # Execute this script to produce the ADLS tenant id.
  264. ## tenant_id_script=/path/tenant_id.sh
  265. [[adls_clusters]]
  266. # Default ADLS cluster
  267. [[[default]]]
  268. ## fs_defaultfs=adl://<account_name>.azuredatalakestore.net
  269. ## webhdfs_url=https://<account_name>.azuredatalakestore.net/webhdfs/v1
  270. ###########################################################################
  271. # Settings to configure the ZooKeeper Lib
  272. ###########################################################################
  273. [libzookeeper]
  274. # ZooKeeper ensemble. Comma separated list of Host/Port.
  275. # e.g. localhost:2181,localhost:2182,localhost:2183
  276. ## ensemble=localhost:2181
  277. ###########################################################################
  278. # Settings to configure Kafka
  279. ###########################################################################
  280. [kafka]
  281. [[kafka]]
  282. # Enable the Kafka integration.
  283. ## is_enabled=false
  284. # Base URL of Kafka REST API.
  285. ## api_url=http://localhost:8082
  286. ###########################################################################
  287. # Settings to configure Metadata
  288. ###########################################################################
  289. [metadata]
  290. [[navigator]]
  291. # Navigator API URL (without version suffix).
  292. ## api_url=http://localhost:7187/api
  293. # Which authentication to use: CM or external via LDAP or SAML.
  294. ## navmetadataserver_auth_type=CMDB
  295. # Username of the CM user used for authentication.
  296. ## navmetadataserver_cmdb_user=hue
  297. # CM password of the user used for authentication.
  298. ## navmetadataserver_cmdb_password=
  299. # Execute this script to produce the CM password. This will be used when the plain password is not set.
  300. # navmetadataserver_cmdb_password_script=