| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548 |
- #####################################
- # DEVELOPMENT EDITION
- #####################################
- # Hue configuration file
- # ===================================
- #
- # For complete documentation about the contents of this file, run
- # $ <hue_root>/build/env/bin/hue config_help
- #
- # All .ini files under the current directory are treated equally. Their
- # contents are merged to form the Hue configuration, which can
- # can be viewed on the Hue at
- # http://<hue_host>:<port>/dump_config
- ###########################################################################
- # General configuration for core Desktop features (authentication, etc)
- ###########################################################################
- [desktop]
- send_dbug_messages=1
- # To show database transactions, set database_logging to 1
- database_logging=0
- # Set this to a random string, the longer the better.
- # This is used for secure hashing in the session store.
- secret_key=
- # Webserver listens on this address and port
- http_host=0.0.0.0
- http_port=8000
- # Time zone name
- time_zone=America/Los_Angeles
- # Turn off debug
- django_debug_mode=1
- # Turn off backtrace for server error
- http_500_debug_mode=1
- # Server email for internal error messages
- ## django_server_email='hue@localhost.localdomain'
- # Email backend
- ## django_email_backend=django.core.mail.backends.smtp.EmailBackend
- # Set to true to use CherryPy as the webserver, set to false
- # to use Spawning as the webserver. Defaults to Spawning if
- # key is not specified.
- ## use_cherrypy_server = false
- # Webserver runs as this user
- ## server_user=hue
- ## server_group=hue
- # If set to false, runcpserver will not actually start the web server.
- # Used if Apache is being used as a WSGI container.
- ## enable_server=yes
- # Number of threads used by the CherryPy web server
- ## cherrypy_server_threads=10
- # Filename of SSL Certificate
- ## ssl_certificate=
- # Filename of SSL RSA Private Key
- ## ssl_private_key=
- # Default encoding for site data
- ## default_site_encoding=utf-8
- # Administrators
- # ----------------
- [[django_admins]]
- ## [[[admin1]]]
- ## name=john
- ## email=john@doe.com
- # UI customizations
- # -------------------
- [[custom]]
- # Top banner HTML code
- ## banner_top_html=
- # Configuration options for user authentication into the web application
- # ------------------------------------------------------------------------
- [[auth]]
- # Authentication backend. Common settings are:
- # - django.contrib.auth.backends.ModelBackend (entirely Django backend)
- # - desktop.auth.backend.AllowAllBackend (allows everyone)
- # - desktop.auth.backend.AllowFirstUserDjangoBackend
- # (Default. Relies on Django and user manager, after the first login)
- # - desktop.auth.backend.LdapBackend
- # - desktop.auth.backend.PamBackend
- # - desktop.auth.backend.SpnegoDjangoBackend
- # - desktop.auth.backend.RemoteUserDjangoBackend
- # - desktop.auth.backend.OAuthBackend
- ## backend=desktop.auth.backend.AllowFirstUserDjangoBackend
- ## pam_service=login
- # When using the desktop.auth.backend.RemoteUserDjangoBackend, this sets
- # the normalized name of the header that contains the remote user.
- # The HTTP header in the request is converted to a key by converting
- # all characters to uppercase, replacing any hyphens with underscores
- # and adding an HTTP_ prefix to the name. So, for example, if the header
- # is called Remote-User that would be configured as HTTP_REMOTE_USER
- #
- # Defaults to HTTP_REMOTE_USER
- ## remote_user_header=HTTP_REMOTE_USER
- # Configuration options for connecting to LDAP and Active Directory
- # -------------------------------------------------------------------
- [[ldap]]
- # The search base for finding users and groups
- ## base_dn="DC=mycompany,DC=com"
- # The NT domain to connect to (only for use with Active Directory)
- ## nt_domain=mycompany.com
- # URL of the LDAP server
- ## ldap_url=ldap://auth.mycompany.com
- # A PEM-format file containing certificates for the CA's that
- # Hue will trust for authentication over TLS.
- # The certificate for the CA that signed the
- # LDAP server certificate must be included among these certificates.
- # See more here http://www.openldap.org/doc/admin24/tls.html.
- ## ldap_cert=
- ## use_start_tls=true
- # Distinguished name of the user to bind as -- not necessary if the LDAP server
- # supports anonymous searches
- ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
- # Password of the bind user -- not necessary if the LDAP server supports
- # anonymous searches
- ## bind_password=
- # Pattern for searching for usernames -- Use <username> for the parameter
- # For use when using LdapBackend for Hue authentication
- ## ldap_username_pattern="uid=<username>,ou=People,dc=mycompany,dc=com"
- # Create users in Hue when they try to login with their LDAP credentials
- # For use when using LdapBackend for Hue authentication
- ## create_users_on_login = true
- [[[users]]]
- # Base filter for searching for users
- ## user_filter="objectclass=*"
- # The username attribute in the LDAP schema
- ## user_name_attr=sAMAccountName
- [[[groups]]]
- # Base filter for searching for groups
- ## group_filter="objectclass=*"
- # The username attribute in the LDAP schema
- ## group_name_attr=cn
- # Configuration options for specifying the Desktop Database. For more info,
- # see http://docs.djangoproject.com/en/1.1/ref/settings/#database-engine
- # ------------------------------------------------------------------------
- [[database]]
- # Database engine is typically one of:
- # postgresql_psycopg2, mysql, or sqlite3
- #
- # Note that for sqlite3, 'name', below is a filename;
- # for other backends, it is the database name.
- ## engine=sqlite3
- ## host=
- ## port=
- ## user=
- ## password=
- ## name=
- # Configuration options for connecting to an external SMTP server
- # ------------------------------------------------------------------------
- [[smtp]]
- # The SMTP server information for email notification delivery
- host=localhost
- port=25
- user=
- password=
- # Whether to use a TLS (secure) connection when talking to the SMTP server
- tls=no
- # Default email address to use for various automated notification from Hue
- ## default_from_email=hue@localhost
- # Configuration options for Kerberos integration for secured Hadoop clusters
- # ------------------------------------------------------------------------
- [[kerberos]]
- # Path to Hue's Kerberos keytab file
- ## hue_keytab=
- # Kerberos principal name for Hue
- ## hue_principal=hue/hostname.foo.com
- # Path to kinit
- ## kinit_path=/path/to/kinit
- # Configuration options for using OAuthBackend login
- # ------------------------------------------------------------------------
- [[oauth]]
- # The Consumer key of the application
- ## consumer_key=XXXXXXXXXXXXXXXXXXXXX
- # The Consumer secret of the application
- ## consumer_secret=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- # The Request token URL
- ## request_token_url=https://api.twitter.com/oauth/request_token
- # The Access token URL
- ## access_token_url=https://api.twitter.com/oauth/access_token
- # The Authorize URL
- ## authenticate_url=https://api.twitter.com/oauth/authorize
- ###########################################################################
- # Settings to configure your Hadoop cluster.
- ###########################################################################
- [hadoop]
- # Configuration for HDFS NameNode
- # ------------------------------------------------------------------------
- [[hdfs_clusters]]
- [[[default]]]
- # Enter the filesystem uri
- fs_defaultfs=hdfs://localhost:8020
- # Use WebHdfs/HttpFs as the communication mechanism. To fallback to
- # using the Thrift plugin (used in Hue 1.x), this must be uncommented
- # and explicitly set to the empty value.
- ## webhdfs_url=
- ## security_enabled=false
- # Settings about this HDFS cluster. If you install HDFS in a
- # different location, you need to set the following.
- # Defaults to $HADOOP_HDFS_HOME or /usr/lib/hadoop-hdfs
- ## hadoop_hdfs_home=/usr/lib/hadoop-hdfs
- # Defaults to $HADOOP_BIN or /usr/bin/hadoop
- ## hadoop_bin=/usr/bin/hadoop
- # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
- ## hadoop_conf_dir=/etc/hadoop/conf
- # Configuration for MapReduce JobTracker
- # ------------------------------------------------------------------------
- [[mapred_clusters]]
- [[[default]]]
- # Enter the host on which you are running the Hadoop JobTracker
- jobtracker_host=localhost
- # The port where the JobTracker IPC listens on
- jobtracker_port=8021
- # Thrift plug-in port for the JobTracker
- ## thrift_port=9290
- # Whether to submit jobs to this cluster
- ## submit_to=False
- ## security_enabled=false
- # Settings about this MR1 cluster. If you install MR1 in a
- # different location, you need to set the following.
- # Defaults to $HADOOP_MR1_HOME or /usr/lib/hadoop-0.20-mapreduce
- ## hadoop_mapred_home=/usr/lib/hadoop-0.20-mapreduce
- # Defaults to $HADOOP_BIN or /usr/bin/hadoop
- ## hadoop_bin=/usr/bin/hadoop
- # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
- ## hadoop_conf_dir=/etc/hadoop/conf
- # Configuration for Yarn
- # ------------------------------------------------------------------------
- [[yarn_clusters]]
- [[[default]]]
- # Enter the host on which you are running the ResourceManager
- resourcemanager_host=localhost
- # The port where the ResourceManager IPC listens on
- resourcemanager_port=8032
- # Whether to submit jobs to this cluster
- ## submit_to=False
- ## security_enabled=false
- # Settings about this MR2 cluster. If you install MR2 in a
- # different location, you need to set the following.
- # Defaults to $HADOOP_MR2_HOME or /usr/lib/hadoop-mapreduce
- ## hadoop_mapred_home=/usr/lib/hadoop-mapreduce
- # Defaults to $HADOOP_BIN or /usr/bin/hadoop
- ## hadoop_bin=/usr/bin/hadoop
- # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
- ## hadoop_conf_dir=/etc/hadoop/conf
- # URL of the ResourceManager API
- ## resourcemanager_api_url=http://localhost:8088
- # URL of the ProxyServer API
- ## proxy_api_url=http://localhost:8088
- # URL of the HistoryServer API
- history_server_api_url=http://localhost:19888
- # URL of the NodeManager API
- node_manager_api_url=http://localhost:8042
- ###########################################################################
- # Settings to configure liboozie
- ###########################################################################
- [liboozie]
- # The URL where the Oozie service runs on. This is required in order for
- # users to submit jobs.
- ## oozie_url=http://localhost:11000/oozie
- ## security_enabled=false
- # Location on HDFS where the workflows/coordinator are deployed when submitted.
- ## remote_deployement_dir=/user/hue/oozie/deployments
- ###########################################################################
- # Settings to configure the Oozie app
- ###########################################################################
- [oozie]
- # Location on local FS where the examples are stored.
- ## local_data_dir=..../examples
- # Location on local FS where the data for the examples is stored.
- ## sample_data_dir=...thirdparty/sample_data
- # Location on HDFS where the oozie examples and workflows are stored.
- ## remote_data_dir=/user/hue/oozie/workspaces
- # Share workflows and coordinators information with all users. If set to false,
- # they will be visible only to the owner and administrators.
- ## share_jobs=True
- # Maximum of Oozie workflows or coodinators to retrieve in one API call.
- ## oozie_jobs_count=100
- ###########################################################################
- # Settings to configure Beeswax
- ###########################################################################
- [beeswax]
- # Host where Beeswax server Thrift daemon is running.
- # If Kerberos security is enabled, the fully-qualified domain name (FQDN) is
- # required, even if the Thrift daemon is running on the same host as Hue.
- ## beeswax_server_host=<FQDN of Beeswax Server>
- # Port where Beeswax Thrift server runs on.
- ## beeswax_server_port=8002
- # Host where internal metastore Thrift daemon is running.
- ## beeswax_meta_server_host=localhost
- # Configure the port the internal metastore daemon runs on.
- # Used only if hive.metastore.local is true.
- ## beeswax_meta_server_port=8003
- # Hive home directory
- ## hive_home_dir=/usr/lib/hive
- # Hive configuration directory, where hive-site.xml is located
- ## hive_conf_dir=/etc/hive/conf
- # Timeout in seconds for thrift calls to beeswax service
- ## beeswax_server_conn_timeout=120
- # Timeout in seconds for thrift calls to the hive metastore
- ## metastore_conn_timeout=10
- # Maximum Java heapsize (in megabytes) used by Beeswax Server.
- # Note that the setting of HADOOP_HEAPSIZE in $HADOOP_CONF_DIR/hadoop-env.sh
- # may override this setting.
- ## beeswax_server_heapsize=1000
- # Share saved queries with all users. If set to false, saved queries are
- # visible only to the owner and administrators.
- ## share_saved_queries=true
- # The backend to contact for queries/metadata requests
- # Choices are 'beeswax' (default), 'hiveserver2'.
- ## server_interface=beeswax
- # Time in milliseconds for Beeswax to persist queries in its cache.
- # 7*24*60*60*1000 = 1 week
- ## beeswax_running_query_lifetime=604800000L
- ###########################################################################
- # Settings to configure Impala
- ###########################################################################
- [impala]
- # Host of the Impala Server
- ## server_host=localhost
- # Port of the Impala Server when using Beeswax Thrift interface
- ## server_port=21000
- # Port of the Impala Server when using Hive Server 2 Thrift interface
- ## server_port=21050
- # Kerberos principal
- ## impala_principal=impala/hostname.foo.com
- ###########################################################################
- # Settings to configure Job Designer
- ###########################################################################
- [jobsub]
- # Location on HDFS where the jobsub examples and templates are stored.
- ## remote_data_dir=/user/hue/jobsub
- # Location on local FS where examples and template are stored.
- ## local_data_dir=..../data
- # Location on local FS where sample data is stored
- ## sample_data_dir=...thirdparty/sample_data
- ###########################################################################
- # Settings to configure Job Browser
- ###########################################################################
- [jobbrowser]
- # Share submitted jobs information with all users. If set to false,
- # submitted jobs are visible only to the owner and administrators.
- ## share_jobs=true
- ###########################################################################
- # Settings to configure the Shell application
- ###########################################################################
- [shell]
- # The shell_buffer_amount specifies the number of bytes of output per shell
- # that the Shell app will keep in memory. If not specified, it defaults to
- # 524288 (512 MiB).
- ## shell_buffer_amount=100
- # If you run Hue against a Hadoop cluster with Kerberos security enabled, the
- # Shell app needs to acquire delegation tokens for the subprocesses to work
- # correctly. These delegation tokens are stored as temporary files in some
- # directory. You can configure this directory here. If not specified, it
- # defaults to /tmp/hue_delegation_tokens.
- ## shell_delegation_token_dir=/tmp/hue_delegation_tokens
- [[ shelltypes ]]
- # Define and configure a new shell type "flume"
- # ------------------------------------------------------------------------
- [[[ flume ]]]
- nice_name = "Flume Shell"
- command = "/usr/bin/flume shell"
- help = "The command-line Flume client interface."
- [[[[ environment ]]]]
- # You can specify environment variables for the Flume shell
- # in this section.
- # Define and configure a new shell type "pig"
- # ------------------------------------------------------------------------
- [[[ pig ]]]
- nice_name = "Pig Shell (Grunt)"
- command = "/usr/bin/pig -l /dev/null"
- help = "The command-line interpreter for Pig"
- [[[[ environment ]]]]
- # You can specify environment variables for the Pig shell
- # in this section. Note that JAVA_HOME must be configured
- # for the Pig shell to run.
- [[[[[ JAVA_HOME ]]]]]
- value = "/usr/lib/jvm/java-6-sun"
- # Define and configure a new shell type "hbase"
- # ------------------------------------------------------------------------
- [[[ hbase ]]]
- nice_name = "HBase Shell"
- command = "/usr/bin/hbase shell"
- help = "The command-line HBase client interface."
- [[[[ environment ]]]]
- # You can configure environment variables for the HBase shell
- # in this section.
- # Define and configure a new shell type "Sqoop 2"
- # ------------------------------------------------------------------------
- [[[ sqoop2 ]]]
- nice_name = "Sqoop 2 Shell"
- command = "/usr/bin/sqoop2"
- help = "The command-line Sqoop 2 client."
- [[[[ environment ]]]]
- # You can configure environment variables for the Sqoop 2 shell
- # in this section.
- # Define and configure a new shell type "bash" for testing only
- # ------------------------------------------------------------------------
- [[[ bash ]]]
- nice_name = "Bash (Test only!!!)"
- command = "/bin/bash"
- help = "A shell that does not depend on Hadoop components"
- ###########################################################################
- # Settings for the User Admin application
- ###########################################################################
- [useradmin]
- # The name of the default user group that users will be a member of
- ## default_user_group=default
|