Explorar o código

[hadoop] Remove NN_HTTP_PORT (again)

Currently, NN_HTTP_PORT is used to infer the webhdfs address, which is
problematic. It's not very useful without knowing the NN's hostname. Besides,
this assumes that people are running webhdfs instead of httpfs. It's better
to make people specify the webhdfs/httpfs url.
bc Wong %!s(int64=13) %!d(string=hai) anos
pai
achega
e216f0bf7f

+ 5 - 2
desktop/conf.dist/hue.ini

@@ -181,8 +181,6 @@
     [[[default]]]
       # Enter the filesystem uri
       fs_defaultfs=hdfs://localhost:8020
-      # The NameNode http port
-      ## http_port=50070
 
       # Change this if your HDFS cluster is Kerberos-secured
       ## security_enabled=false
@@ -222,6 +220,11 @@
       # Whether to submit jobs to this cluster
       ## submit_to=False
 
+
+###########################################################################
+# Settings to configure Job Designer.
+###########################################################################
+
 [jobsub]
   # The URL where the Oozie service runs on. This is required in order for
   # users to submit jobs.

+ 0 - 2
desktop/conf/pseudo-distributed.ini.tmpl

@@ -189,8 +189,6 @@
     [[[default]]]
       # Enter the filesystem uri
       fs_defaultfs=hdfs://localhost:8020
-      # The NameNode http port
-      ## http_port=50070
 
       # Use WebHdfs/HttpFs as the communication mechanism. To fallback to
       # using the Thrift plugin (used in Hue 1.x), this must be uncommented

+ 4 - 6
desktop/libs/hadoop/src/hadoop/conf.py

@@ -20,6 +20,8 @@ import fnmatch
 import logging
 import os
 
+DEFAULT_NN_HTTP_PORT = 50070
+
 HADOOP_HOME = Config(
   key="hadoop_home",
   default=os.environ.get("HADOOP_HOME", "/usr/lib/hadoop"),
@@ -133,15 +135,11 @@ HDFS_CLUSTERS = UnspecifiedConfigSection(
       NN_HDFS_PORT=Config("hdfs_port", help="Hadoop IPC port for the name node", default=8020,
                             type=int),
       # End deprecation
-      NN_HTTP_PORT=Config("http_port", help="Hadoop HTTP port for the name node", default=50070,
-                            type=int),
       FS_DEFAULTFS=Config("fs_defaultfs", help="The equivalent of fs.defaultFS (aka fs.default.name)",
                           default="hdfs://localhost:8020"),
       WEBHDFS_URL=Config("webhdfs_url",
-                         help="The URL to WebHDFS/HttpFs service. Defaults to " +
-                         "the WebHDFS URL on the NameNode. To use the legacy " +
-                         "Thrift plugin communication mechanism, this must be " +
-                         "set to an empty value.",
+                         help="The URL to WebHDFS/HttpFS service. Defaults to " +
+                         "the WebHDFS URL on the NameNode.",
                          type=str, default=None),
       NN_KERBEROS_PRINCIPAL=Config("nn_kerberos_principal", help="Kerberos principal for NameNode",
                                    default="hdfs", type=str),

+ 3 - 1
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py

@@ -33,6 +33,8 @@ from hadoop.fs.hadoopfs import Hdfs
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.fs.webhdfs_types import WebHdfsStat, WebHdfsContentSummary
 
+import hadoop.conf
+
 
 DEFAULT_HDFS_SUPERUSER = 'hdfs'
 
@@ -552,7 +554,7 @@ def _get_service_url(hdfs_config):
   fs_defaultfs = hdfs_config.FS_DEFAULTFS.get()
   netloc = Hdfs.urlsplit(fs_defaultfs)[1]
   host = netloc.split(':')[0]
-  port = hdfs_config.NN_HTTP_PORT.get()
+  port = hadoop.conf.DEFAULT_NN_HTTP_PORT
   return "http://%s:%s/webhdfs/v1" % (host, port)
 
 

+ 2 - 1
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -458,9 +458,10 @@ def shared_cluster():
       LOG.exception("Failed to fully bring up test cluster: %s" % (ex,))
 
     # Fix config to reflect the cluster setup.
+    webhdfs_url = "http://localhost:%s/webhdfs/v1" % (cluster.dfs_http_port,)
     closers = [
       hadoop.conf.HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing(cluster.fs_default_name),
-      hadoop.conf.HDFS_CLUSTERS['default'].NN_HTTP_PORT.set_for_testing(cluster.dfs_http_port),
+      hadoop.conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(webhdfs_url),
       hadoop.conf.MR_CLUSTERS['default'].JT_HOST.set_for_testing('localhost'),
       hadoop.conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(cluster.jt_thrift_port),
     ]