Ver código fonte

Finish securing Thrift path

Todd Lipcon 15 anos atrás
pai
commit
c45794dd16

+ 20 - 6
desktop/core/src/desktop/lib/thrift_util.py

@@ -108,7 +108,7 @@ class ConnectionPooler(object):
           q = Queue.Queue(self.poolsize)
           q = Queue.Queue(self.poolsize)
           self.pooldict[(conf.host, conf.port)] = q
           self.pooldict[(conf.host, conf.port)] = q
           for i in xrange(self.poolsize):
           for i in xrange(self.poolsize):
-            client = construct_client(conf)
+            client = construct_superclient(conf)
             client.CID = i
             client.CID = i
             q.put(client, False)
             q.put(client, False)
       finally:
       finally:
@@ -146,11 +146,21 @@ class ConnectionPooler(object):
     """
     """
     self.pooldict[(host, port)].put(client)
     self.pooldict[(host, port)].put(client)
 
 
-def construct_client(conf):
+def construct_superclient(conf):
   """
   """
   Constructs a thrift client, lazily.
   Constructs a thrift client, lazily.
   """
   """
+  service, protocol, transport = connect_to_thrift(conf)
+  return SuperClient(service, transport, timeout_seconds=conf.timeout_seconds)
+
 
 
+def connect_to_thrift(conf):
+  """
+  Connect to a thrift endpoint as determined by the 'conf' parameter.
+  Note that this does *not* open the transport.
+
+  Returns a tuple of (service, protocol, transport)
+  """
   def sasl_factory():
   def sasl_factory():
     saslc = sasl.Client()
     saslc = sasl.Client()
     saslc.setAttr("host", conf.host)
     saslc.setAttr("host", conf.host)
@@ -158,16 +168,20 @@ def construct_client(conf):
     saslc.init()
     saslc.init()
     return saslc
     return saslc
 
 
-  logging.info("service: %s   host: %s" % (conf.kerberos_principal, conf.host))
   sock = TSocket(conf.host, conf.port)
   sock = TSocket(conf.host, conf.port)
   if conf.timeout_seconds:
   if conf.timeout_seconds:
     # Thrift trivia: You can do this after the fact with
     # Thrift trivia: You can do this after the fact with
     # self.wrapped.transport._TBufferedTransport__trans.setTimeout(seconds*1000)
     # self.wrapped.transport._TBufferedTransport__trans.setTimeout(seconds*1000)
     sock.setTimeout(conf.timeout_seconds*1000.0)
     sock.setTimeout(conf.timeout_seconds*1000.0)
-  transport = TSaslClientTransport(sasl_factory, "GSSAPI", sock)
-  protocol = TBinaryProtocolAccelerated(transport)
+  if conf.use_sasl:
+    transport = TSaslClientTransport(sasl_factory, "GSSAPI", sock)
+  else:
+    transport = TBufferedTransport(sock)
+
+  protocol = TBinaryProtocol(transport)
   service = conf.klass(protocol)
   service = conf.klass(protocol)
-  return SuperClient(service, transport, timeout_seconds=conf.timeout_seconds)
+  return service, protocol, transport
+
 
 
 _connection_pool = ConnectionPooler()
 _connection_pool = ConnectionPooler()
 
 

+ 10 - 0
desktop/libs/hadoop/java/src/java/org/apache/hadoop/thriftfs/ThriftUtils.java

@@ -25,10 +25,12 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.thriftfs.api.Block;
 import org.apache.hadoop.thriftfs.api.Block;
 import org.apache.hadoop.thriftfs.api.Constants;
 import org.apache.hadoop.thriftfs.api.Constants;
 import org.apache.hadoop.thriftfs.api.ContentSummary;
 import org.apache.hadoop.thriftfs.api.ContentSummary;
@@ -185,6 +187,14 @@ public class ThriftUtils {
     }
     }
 
 
     TTransport t = new TSocket(addr.getHostName(), addr.getPort());
     TTransport t = new TSocket(addr.getHostName(), addr.getPort());
+    if (UserGroupInformation.isSecurityEnabled()) {
+      t = new HadoopThriftAuthBridge.Client()
+        .createClientTransport(
+          conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
+          addr.getHostName(),
+          "KERBEROS", t);
+    }
+
     t.open();
     t.open();
     TProtocol p = new TBinaryProtocol(t);
     TProtocol p = new TBinaryProtocol(t);
     return new Namenode.Client(p);
     return new Namenode.Client(p);

+ 2 - 0
desktop/libs/hadoop/src/hadoop/conf.py

@@ -117,6 +117,8 @@ HDFS_CLUSTERS = UnspecifiedConfigSection(
                             type=int),
                             type=int),
       NN_KERBEROS_PRINCIPAL=Config("nn_kerberos_principal", help="Kerberos principal for NameNode",
       NN_KERBEROS_PRINCIPAL=Config("nn_kerberos_principal", help="Kerberos principal for NameNode",
                                    default="hdfs", type=str),
                                    default="hdfs", type=str),
+      DN_KERBEROS_PRINCIPAL=Config("nn_kerberos_principal", help="Kerberos principal for DataNode",
+                                   default="hdfs", type=str),
       SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
       SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
                               default=False, type=bool),
                               default=False, type=bool),
     )
     )

+ 21 - 11
desktop/libs/hadoop/src/hadoop/fs/hadoopfs.py

@@ -26,6 +26,7 @@ import random
 import stat as statconsts
 import stat as statconsts
 import subprocess
 import subprocess
 import sys
 import sys
+import time
 import urlparse
 import urlparse
 import threading
 import threading
 
 
@@ -175,7 +176,8 @@ class HadoopFileSystem(object):
   """
   """
 
 
   def __init__(self, host, thrift_port, hdfs_port=8020,
   def __init__(self, host, thrift_port, hdfs_port=8020,
-               kerberos_principal="hdfs",
+               nn_kerberos_principal="hdfs",
+               dn_kerberos_principal="hdfs",
                security_enabled=False,
                security_enabled=False,
                hadoop_bin_path="hadoop"):
                hadoop_bin_path="hadoop"):
     """
     """
@@ -190,7 +192,8 @@ class HadoopFileSystem(object):
     self.thrift_port = thrift_port
     self.thrift_port = thrift_port
     self.hdfs_port = hdfs_port
     self.hdfs_port = hdfs_port
     self.security_enabled = security_enabled
     self.security_enabled = security_enabled
-    self.kerberos_principal = kerberos_principal
+    self.nn_kerberos_principal = nn_kerberos_principal
+    self.dn_kerberos_principal = dn_kerberos_principal
     self.hadoop_bin_path = hadoop_bin_path
     self.hadoop_bin_path = hadoop_bin_path
     self._resolve_hadoop_path()
     self._resolve_hadoop_path()
 
 
@@ -198,7 +201,7 @@ class HadoopFileSystem(object):
       Namenode.Client, host, thrift_port,
       Namenode.Client, host, thrift_port,
       service_name="HDFS Namenode HUE Plugin",
       service_name="HDFS Namenode HUE Plugin",
       use_sasl=security_enabled,
       use_sasl=security_enabled,
-      kerberos_principal=kerberos_principal,
+      kerberos_principal=nn_kerberos_principal,
       timeout_seconds=NN_THRIFT_TIMEOUT)
       timeout_seconds=NN_THRIFT_TIMEOUT)
 
 
     # The file systems are cached globally.  We store
     # The file systems are cached globally.  We store
@@ -214,7 +217,8 @@ class HadoopFileSystem(object):
                thrift_port=fs_config.NN_THRIFT_PORT.get(),
                thrift_port=fs_config.NN_THRIFT_PORT.get(),
                hdfs_port=fs_config.NN_HDFS_PORT.get(),
                hdfs_port=fs_config.NN_HDFS_PORT.get(),
                security_enabled=fs_config.SECURITY_ENABLED.get(),
                security_enabled=fs_config.SECURITY_ENABLED.get(),
-               kerberos_principal=fs_config.NN_KERBEROS_PRINCIPAL.get(),
+               nn_kerberos_principal=fs_config.NN_KERBEROS_PRINCIPAL.get(),
+               dn_kerberos_principal=fs_config.DN_KERBEROS_PRINCIPAL.get(),
                hadoop_bin_path=hadoop_bin_path)
                hadoop_bin_path=hadoop_bin_path)
 
 
 
 
@@ -567,14 +571,20 @@ class HadoopFileSystem(object):
     return ret
     return ret
 
 
   def _connect_dn(self, node):
   def _connect_dn(self, node):
-    sock = TSocket.TSocket(node.host, node.thriftPort)
-    sock.setTimeout(int(DN_THRIFT_TIMEOUT * 1000))
-    transport = TTransport.TBufferedTransport(sock)
-    protocol = TBinaryProtocol.TBinaryProtocol(transport)
-    client = Datanode.Client(protocol)
+    dn_conf = thrift_util.ConnectionConfig(
+      Datanode.Client,
+      node.host,
+      node.thriftPort,
+      "HDFS Datanode Thrift",
+      use_sasl=self.security_enabled,
+      kerberos_principal=self.dn_kerberos_principal,
+      timeout_seconds=DN_THRIFT_TIMEOUT)
+
+    service, protocol, transport = \
+        thrift_util.connect_to_thrift(dn_conf)
     transport.open()
     transport.open()
-    client.close = lambda: transport.close()
-    return client
+    service.close = lambda: transport.close()
+    return service
 
 
   @staticmethod
   @staticmethod
   def _unpack_stat(stat):
   def _unpack_stat(stat):