Ver código fonte

[thriftfs] Remove irrelevant parts of thriftfs plugin

* The JT plugin stays, for MR1 job info gathering.
bc Wong 13 anos atrás
pai
commit
905a8c2a9c

+ 1 - 1
desktop/libs/hadoop/java/pom.xml

@@ -42,7 +42,7 @@
   <dependencies>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
+      <artifactId>hadoop-client</artifactId>
     </dependency>
     <dependency>
       <groupId>org.slf4j</groupId>

+ 4 - 5
desktop/libs/hadoop/java/src/main/java/org/apache/hadoop/mapred/ThriftJobTrackerPlugin.java

@@ -39,6 +39,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.Cluster;
 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
@@ -339,7 +340,7 @@ public class ThriftJobTrackerPlugin extends JobTrackerPlugin implements Configur
             tcs.setReduceTasks(cs.getReduceTasks());
             tcs.setMaxMapTasks(cs.getMaxMapTasks());
             tcs.setMaxReduceTasks(cs.getMaxReduceTasks());
-            tcs.setState(cs.getJobTrackerState() == State.INITIALIZING ? JobTrackerState.INITIALIZING :
+            tcs.setState(cs.getJobTrackerStatus() == Cluster.JobTrackerStatus.INITIALIZING ? JobTrackerState.INITIALIZING :
                 JobTrackerState.RUNNING);
             tcs.setUsedMemory(cs.getUsedMemory());
             tcs.setMaxMemory(cs.getMaxMemory());
@@ -1037,10 +1038,8 @@ public class ThriftJobTrackerPlugin extends JobTrackerPlugin implements Configur
             }
 
             ThriftJobCounterRollups ret = new ThriftJobCounterRollups();
-            Counters mapCounters = new Counters();
-            jip.getMapCounters(mapCounters);
-            Counters reduceCounters = new Counters();
-            jip.getReduceCounters(reduceCounters);
+            Counters mapCounters = jip.getMapCounters();
+            Counters reduceCounters = jip.getReduceCounters();
             ret.mapCounters = new ThriftGroupList(
                 JTThriftUtils.toThrift(mapCounters));
             ret.reduceCounters = new ThriftGroupList(

+ 0 - 263
desktop/libs/hadoop/java/src/main/java/org/apache/hadoop/thriftfs/DatanodePlugin.java

@@ -1,263 +0,0 @@
-/**
- * Licensed to Cloudera, Inc. under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  Cloudera, Inc. licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.thriftfs;
-
-import java.io.EOFException;
-import java.nio.ByteBuffer;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.security.PrivilegedExceptionAction;
-import java.util.zip.CRC32;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.BlockReader;
-import org.apache.hadoop.hdfs.DFSClient.RemoteBlockReader;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.thriftfs.api.Block;
-import org.apache.hadoop.thriftfs.api.BlockData;
-import org.apache.hadoop.thriftfs.api.Datanode;
-import org.apache.hadoop.thriftfs.api.IOException;
-import org.apache.hadoop.thriftfs.api.Namenode;
-import org.apache.hadoop.thriftfs.api.RequestContext;
-import org.apache.thrift.TException;
-import org.apache.thrift.TProcessor;
-import org.apache.thrift.TProcessorFactory;
-import org.apache.thrift.transport.TTransport;
-
-public class DatanodePlugin
-  extends org.apache.hadoop.hdfs.server.datanode.DatanodePlugin
-  implements Configurable {
-
-  /**
-   * Default address and port this server will bind to, in case nothing is found
-   * in the configuration object.
-   */
-  public static final String DEFAULT_THRIFT_ADDRESS = "0.0.0.0:0";
-
-  private DataNode datanode;
-  private Thread registerThread;
-  private volatile boolean register;
-
-  static final Log LOG = LogFactory.getLog(DatanodePlugin.class);
-
-  private ThriftPluginServer thriftServer;
-
-  private Configuration conf;
-
-  public DatanodePlugin() {
-  }
-
-
-  class ThriftHandler extends ThriftHandlerBase implements Datanode.Iface {
-
-    private int bufferSize;
-    private CRC32 summer;
-
-    public ThriftHandler(ThriftServerContext context) {
-      super(context);
-      this.bufferSize = conf.getInt("io.file.buffer.size", 4096);
-      this.summer = new CRC32();
-    }
-
-    public BlockData readBlock(RequestContext ctx, final Block block, final long offset,
-        final int length) throws IOException, TException {
-      LOG.debug("readBlock(" + block.blockId + "," + offset + "," + length + "): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<BlockData>() {
-        public BlockData run() throws IOException {
-
-          BlockData ret = new BlockData();
-          BlockReader reader = null;
-          try {
-            Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>();
-            token.decodeFromUrlString(block.token);
-            reader = RemoteBlockReader.newBlockReader(getSocket(), block.path, block.blockId,
-                token, block.genStamp, offset, length, bufferSize, true, serverContext
-                    .getClientName());
-            byte[] buf = new byte[length];
-            int n = reader.read(buf, 0, length);
-            if (n == -1) {
-              throw new EOFException("EOF reading " + length + " bytes at offset " + offset
-                  + " from " + block);
-            }
-            LOG.debug("readBlock(" + block.blockId + ", " + offset + ", " + length + "): Read " + n
-                + " bytes");
-
-            if (n == length) {
-              // If we read exactly the same number of bytes that was asked for,
-              // we can simply return the buffer directly
-              ret.data = ByteBuffer.wrap(buf);
-            } else {
-              assert n < length;
-              // If we read fewer bytes than they asked for, we need to write
-              // back a smaller byte array. With the appropriate thrift hook
-              // we could avoid this copy, too.
-              byte[] data = new byte[n];
-              System.arraycopy(buf, 0, ret.data, 0, n);
-              ret.data = ByteBuffer.wrap(data);
-            }
-            ret.length = n;
-
-            summer.update(ret.data.array());
-            ret.crc = (int) summer.getValue();
-            summer.reset();
-            LOG.debug("readBlock(" + block.blockId + ", " + offset + ", " + length + "): CRC32: "
-                + ret.crc);
-          } catch (Throwable t) {
-            LOG.warn("readBlock(" + block.blockId + ", " + offset + ", " + length + "): Failed", t);
-            throw ThriftUtils.toThrift(t);
-          } finally {
-            if (reader != null) {
-              try {
-                reader.close();
-              } catch (Throwable t) {
-                LOG.warn("readBlock(" + block.blockId + ", " + offset + ", " + length
-                    + "): Cannot close block reader", t);
-              }
-            }
-          }
-          return ret;
-        }
-      });
-    }
-
-    private Socket getSocket() throws java.io.IOException {
-      InetSocketAddress addr = datanode.getSelfAddr();
-      return new Socket(addr.getAddress(), addr.getPort());
-    }
-  }
-
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  public Configuration getConf() {
-    return conf;
-  }
-
-
-  @Override
-  public void start(Object service) {
-    ThriftUtils.initConfigResource();
-    this.datanode = (DataNode)service;
-    try {
-      InetSocketAddress address = NetUtils.createSocketAddr(
-        conf.get(ThriftFsConfig.DFS_THRIFT_DATANODE_ADDR_KEY, DEFAULT_THRIFT_ADDRESS));
-
-      thriftServer = new ThriftPluginServer(
-        address, new ProcessorFactory());
-      thriftServer.setConf(conf);
-      thriftServer.start();
-    } catch (Exception e) {
-      throw new RuntimeException("Could not start Thrift Datanode Plugin", e);
-    }
-  }
-
-  @Override
-  public void initialRegistrationComplete() {
-    registerWithNameNode();
-  }
-
-  @Override
-  public void reregistrationComplete() {
-    registerWithNameNode();
-  }
-
-  private void registerWithNameNode() {
-    register = true;
-    registerThread = new Thread(new Runnable() {
-        public void run() {
-          Namenode.Client namenode = null;
-          String name = datanode.dnRegistration.getName();
-          String storageId = datanode.dnRegistration.getStorageID();
-          Random random = null;
-
-          while (register) {
-            try {
-              if (namenode == null) {
-                namenode = ThriftUtils.createNamenodeClient(conf);
-              }
-              namenode.datanodeUp(name, storageId, thriftServer.getPort());
-              register = false;
-              LOG.info("Datanode " + name + " registered Thrift port " +
-                       thriftServer.getPort());
-            } catch (Throwable t) {
-              // Try again in 30-90 seconds
-              if (random == null) {
-                random = new Random();
-              }
-              long sleepTime = (long) (30 + 60.0 * random.nextFloat());
-              LOG.info("Datanode registration failed. Will retry again in " + sleepTime + " seconds", t);
-              namenode = null;
-              try {
-                Thread.sleep(sleepTime * 1000);
-              } catch (InterruptedException e) {}
-            }
-          }
-        }
-      });
-    registerThread.start();
-  }
-
-  @Override
-  public void stop() {
-    register = false;
-    try {
-      registerThread.join();
-    } catch (Throwable t) {}
-
-    try {
-      Namenode.Client namenode = ThriftUtils.createNamenodeClient(conf);
-      namenode.datanodeDown(datanode.dnRegistration.getName(),
-                            datanode.dnRegistration.getStorageID(),
-                            thriftServer.getPort());
-    } catch (Throwable t) {}
-
-    thriftServer.stop();
-  }
-
-  @Override
-  public void close() {
-    stop();
-  }
-
-  class ProcessorFactory extends TProcessorFactory {
-
-    ProcessorFactory() {
-      super(null);
-    }
-
-    @Override
-    public TProcessor getProcessor(TTransport t) {
-      ThriftServerContext context = new ThriftServerContext(t);
-
-      Datanode.Iface impl =
-        ThriftUtils.SecurityCheckingProxy.create(
-          conf,
-          new ThriftHandler(context),
-          Datanode.Iface.class);
-      return new Datanode.Processor(impl);
-    }
-  }
-}

+ 0 - 484
desktop/libs/hadoop/java/src/main/java/org/apache/hadoop/thriftfs/NamenodePlugin.java

@@ -1,484 +0,0 @@
-/**
- * Licensed to Cloudera, Inc. under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  Cloudera, Inc. licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.thriftfs;
-
-import java.io.FileNotFoundException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.security.PrivilegedAction;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.thriftfs.api.Block;
-import org.apache.hadoop.thriftfs.api.Constants;
-import org.apache.hadoop.thriftfs.api.ContentSummary;
-import org.apache.hadoop.thriftfs.api.IOException;
-import org.apache.hadoop.thriftfs.api.Namenode;
-import org.apache.hadoop.thriftfs.api.RequestContext;
-import org.apache.hadoop.thriftfs.api.Stat;
-import org.apache.hadoop.thriftfs.api.ThriftDelegationToken;
-import org.apache.thrift.TException;
-import org.apache.thrift.TProcessor;
-import org.apache.thrift.TProcessorFactory;
-import org.apache.thrift.transport.TTransport;
-
-public class NamenodePlugin extends org.apache.hadoop.hdfs.server.namenode.NamenodePlugin implements
-    Configurable {
-
-  /**
-   * Default address and port this server will bind to, in case nothing is found
-   * in the configuration object.
-   */
-  public static final String DEFAULT_THRIFT_ADDRESS = "0.0.0.0:10090";
-
-  private NameNode namenode;
-
-  private static Map<DatanodeID, Integer> thriftPorts = Collections.synchronizedMap(new HashMap<DatanodeID, Integer>());
-
-  static final Log LOG = LogFactory.getLog(NamenodePlugin.class);
-
-  private Configuration conf;
-  private ThriftPluginServer thriftServer;
-
-  /** Java server-side implementation of the 'Namenode' Thrift interface. */
-  class ThriftHandler extends ThriftHandlerBase implements Namenode.Iface {
-
-    public ThriftHandler(ThriftServerContext context) {
-      super(context);
-    }
-
-    public void chmod(RequestContext ctx, final String path, final short mode) throws IOException {
-      LOG.debug("chmod(" + path + ", " + mode + "): Entering");
-      assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Void>() {
-        public Void run() throws java.io.IOException {
-          namenode.setPermission(path, new FsPermission(mode));
-          return null;
-        }
-      });
-    }
-
-    public void chown(RequestContext ctx, final String path, final String owner, final String group)
-        throws IOException {
-      LOG.debug("chown(" + path + "," + owner + "," + group + "): Entering");
-      assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Void>() {
-        public Void run() throws java.io.IOException {
-          // XXX Looks like namenode.setOwner() does not complain about
-          // this...
-          if (owner == null && group == null) {
-            throw new IllegalArgumentException("Both 'owner' and 'group' are null");
-          }
-          namenode.setOwner(path, owner, group);
-          return null;
-        }
-      });
-    }
-
-    public List<Long> df(RequestContext ctx) {
-      LOG.debug("Entering df()");
-      return assumeUserContextAndExecute(ctx, new PrivilegedAction<List<Long>>() {
-        public List<Long> run() {
-          long[] stats = namenode.getStats();
-          List<Long> ret = new ArrayList<Long>();
-          // capacityTotal
-          ret.add(stats[0]);
-          // capacityUsed
-          ret.add(stats[1]);
-          // capacityRemaining
-          ret.add(stats[2]);
-          LOG.debug("df(): Returning " + ret);
-          return ret;
-        }
-      });
-    }
-
-    public void enterSafeMode(RequestContext ctx) throws IOException {
-      LOG.debug("enterSafeMode(): Entering");
-      assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Void>() {
-        public Void run() throws java.io.IOException {
-          namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-          return null;
-        }
-      });
-    }
-
-    public List<Block> getBlocks(RequestContext ctx, final String path, final long offset,
-        final long length) throws IOException {
-      LOG.debug("getBlocks(" + path + "," + offset + "," + length + "): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<List<Block>>() {
-        public List<Block> run() throws java.io.IOException {
-          List<Block> ret = new ArrayList<Block>();
-          LocatedBlocks blocks = namenode.getBlockLocations(path, offset, length);
-          if (blocks != null) {
-            // blocks may be null if offset is past the end of the file
-            for (LocatedBlock b : blocks.getLocatedBlocks()) {
-              ret.add(ThriftUtils.toThrift(b, path, thriftPorts));
-            }
-          }
-          LOG.debug("getBlocks(" + path + "," + offset + "," + length + "): Returning " + ret);
-          return ret;
-        }
-      });
-    }
-
-    public long getPreferredBlockSize(RequestContext ctx, final String path) throws IOException {
-      LOG.debug("getPreferredBlockSize(" + path + "): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Long>() {
-        public Long run() throws java.io.IOException {
-          long ret = namenode.getPreferredBlockSize(path);
-          LOG.debug("getPreferredBlockSize(" + path + "): Returning " + ret);
-          return ret;
-        }
-      });
-    }
-
-    public boolean isInSafeMode(RequestContext ctx) throws IOException {
-      LOG.debug("isInSafeMode(): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Boolean>() {
-        public Boolean run() throws java.io.IOException {
-          boolean ret = namenode.setSafeMode(SafeModeAction.SAFEMODE_GET);
-          LOG.debug("isInSafeMode(): Returning " + ret);
-          return ret;
-        }
-      });
-    }
-
-    public void leaveSafeMode(RequestContext ctx) throws IOException {
-      LOG.debug("leaveSafeMode(): Entering");
-      assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Void>() {
-        public Void run() throws java.io.IOException {
-          namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-          return null;
-        }
-      });
-    }
-
-    public List<Stat> ls(RequestContext ctx, final String path) throws IOException {
-      LOG.debug("ls(" + path + "):Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<List<Stat>>() {
-        public List<Stat> run() throws java.io.IOException {
-          List<Stat> ret = new ArrayList<Stat>();
-          byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
-          DirectoryListing listing;
-          do {
-            listing = namenode.getListing(path, lastReturnedName);
-            if (listing == null) {
-              throw new FileNotFoundException("Not found: " + path);
-            }
-            for (HdfsFileStatus f : listing.getPartialListing()) {
-              ret.add(fileStatusToStat(f, path));
-            }
-            lastReturnedName = listing.getLastName();
-          } while (listing.hasMore());
-          LOG.debug("ls(" + path + "): Returning " + ret);
-          return ret;
-        }
-      });
-    }
-
-    public boolean mkdirhier(RequestContext ctx, final String path, final short perms)
-        throws IOException {
-      LOG.debug("mkdirhier(" + path + ", " + perms + "): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Boolean>() {
-        public Boolean run() throws java.io.IOException {
-          boolean ret = namenode.mkdirs(path, new FsPermission(perms));
-          LOG.debug("mkdirhier(" + path + ", " + perms + "): Returning " + ret);
-          return ret;
-        }
-      });
-    }
-
-    public void refreshNodes(RequestContext ctx) throws IOException {
-      LOG.debug("refreshNodes(): Entering");
-      assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Void>() {
-        public Void run() throws java.io.IOException {
-          namenode.refreshNodes();
-          return null;
-        }
-      });
-    }
-
-    public boolean rename(RequestContext ctx, final String path, final String newPath)
-        throws IOException {
-      LOG.debug("rename(" + path + ", " + newPath + "): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Boolean>() {
-        public Boolean run() throws java.io.IOException {
-          boolean ret = namenode.rename(path, newPath);
-          LOG.debug("rename(" + path + ", " + newPath + "): Returning " + ret);
-          return ret;
-        }
-      });
-    }
-
-    public void reportBadBlocks(RequestContext ctx, final List<Block> blocks) throws IOException {
-      LOG.debug("reportBadBlocks(" + blocks + "): Entering");
-      assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Void>() {
-        public Void run() throws java.io.IOException {
-          int n = blocks.size();
-          LocatedBlock[] lb = new LocatedBlock[n];
-          for (int i = 0; i < n; ++i) {
-            lb[i] = ThriftUtils.fromThrift(blocks.get(i));
-          }
-          namenode.reportBadBlocks(lb);
-          return null;
-        }
-      });
-    }
-
-    public void setQuota(RequestContext ctx, final String path, long namespaceQuota,
-        long diskspaceQuota) throws IOException {
-      LOG.debug("setQuota(" + path + "," + namespaceQuota + "," + diskspaceQuota + "): Entering");
-      if (namespaceQuota == Constants.QUOTA_DONT_SET) {
-        namespaceQuota = FSConstants.QUOTA_DONT_SET;
-      }
-      if (namespaceQuota == Constants.QUOTA_RESET) {
-        namespaceQuota = FSConstants.QUOTA_RESET;
-      }
-      if (diskspaceQuota == Constants.QUOTA_DONT_SET) {
-        diskspaceQuota = FSConstants.QUOTA_DONT_SET;
-      }
-      if (diskspaceQuota == Constants.QUOTA_RESET) {
-        diskspaceQuota = FSConstants.QUOTA_RESET;
-      }
-      final long finalNamespaceQuota = namespaceQuota;
-      final long finalDiskspaceQuota = diskspaceQuota;
-      assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Void>() {
-        public Void run() throws java.io.IOException {
-          LOG.debug("setQuota(" + path + "," + finalNamespaceQuota + "," + finalDiskspaceQuota
-              + "): Quota values translated");
-          namenode.setQuota(path, finalNamespaceQuota, finalDiskspaceQuota);
-          return null;
-        }
-      });
-    }
-
-    public boolean setReplication(RequestContext ctx, final String path, final short repl)
-        throws IOException {
-      LOG.debug("setReplication(" + path + "," + repl + "): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Boolean>() {
-        public Boolean run() throws java.io.IOException {
-          return namenode.setReplication(path, repl);
-        }
-      });
-    }
-
-    public Stat stat(RequestContext ctx, final String path) throws IOException {
-      LOG.debug("stat(" + path + "): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Stat>() {
-        public Stat run() throws java.io.IOException {
-          Stat ret = fileStatusToStat(namenode.getFileInfo(path), path);
-          LOG.debug("stat(" + path + "): Returning " + ret);
-          return ret;
-        }
-      });
-    }
-
-    public ContentSummary getContentSummary(RequestContext ctx, final String path)
-        throws IOException {
-      LOG.debug("getContentSummary(" + path + "): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<ContentSummary>() {
-        public ContentSummary run() throws java.io.IOException {
-          ContentSummary cs = getContentSummary(path);
-          LOG.debug("getContentSummary(" + path + "): Returning " + cs);
-          return cs;
-        }
-      });
-    }
-
-    public List<ContentSummary> multiGetContentSummary(RequestContext ctx, final List<String> paths)
-        throws IOException {
-      LOG.debug("multiGetContentSummary(" + paths + "): Entering");
-      return assumeUserContextAndExecute(ctx,
-          new PrivilegedExceptionAction<List<ContentSummary>>() {
-            public List<ContentSummary> run() throws java.io.IOException {
-              List<ContentSummary> ret = new ArrayList<ContentSummary>();
-              for (String path : paths) {
-                ret.add(getContentSummary(path));
-              }
-              LOG.debug("multiGetContentSummary(" + paths + "): Returning " + ret);
-              return ret;
-            }
-          });
-    }
-
-    private ContentSummary getContentSummary(String path) throws java.io.IOException {
-      return ThriftUtils.toThrift(namenode.getContentSummary(path), path);
-    }
-
-    public boolean unlink(RequestContext ctx, final String path, final boolean recursive)
-        throws IOException {
-      LOG.debug("unlink(" + path + "," + recursive + "): Entering");
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Boolean>() {
-        public Boolean run() throws java.io.IOException, TException {
-          boolean ret = namenode.delete(path, recursive);
-          LOG.debug("unlink(" + path + "," + recursive + "): Returning " + ret);
-          return ret;
-        }
-      });
-    }
-
-    public void utime(RequestContext ctx, final String path, final long atime, final long mtime)
-        throws IOException {
-      LOG.debug("utime(" + path + "," + atime + "," + mtime + "): Entering");
-      assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<Void>() {
-        public Void run() throws java.io.IOException {
-          if (mtime == -1 && atime == -1) {
-            LOG.debug("utime(" + path + "," + atime + "," + mtime
-                + "): Setting mtime and atime to now");
-            long now = System.currentTimeMillis();
-            namenode.setTimes(path, now, now);
-          } else {
-            namenode.setTimes(path, mtime, atime);
-          }
-          return null;
-        }
-      });
-    }
-
-    private Stat fileStatusToStat(HdfsFileStatus f, String parentPath) throws java.io.IOException {
-      if (f == null) {
-        throw new FileNotFoundException();
-      }
-
-      Stat st = new Stat();
-      st.path = f.getFullPath(new Path(parentPath)).toString();
-      st.isDir = f.isDir();
-      st.atime = f.getAccessTime();
-      st.mtime = f.getModificationTime();
-      st.perms = f.getPermission().toShort();
-      st.owner = f.getOwner();
-      st.group = f.getGroup();
-      if (!st.isDir) {
-        st.length = f.getLen();
-        st.blockSize = f.getBlockSize();
-        st.replication = f.getReplication();
-      }
-      return st;
-    }
-
-    public void datanodeDown(String name, String storage, int thriftPort) {
-      DatanodeID dnId = new DatanodeID(name, storage, -1, -1);
-      LOG.info("Datanode " + dnId + ": Thrift port " + thriftPort + " closed");
-      thriftPorts.remove(dnId);
-    }
-
-    public void datanodeUp(String name, String storage, int thriftPort) {
-      DatanodeID dnId = new DatanodeID(name, storage, -1, -1);
-      LOG.info("Datanode " + dnId + ": " + "Thrift port " + thriftPort + " open");
-      thriftPorts.put(dnId, thriftPort);
-    }
-
-    @Override
-    public ThriftDelegationToken getDelegationToken(RequestContext ctx, final String renewer) throws IOException,
-        TException {
-      return assumeUserContextAndExecute(ctx, new PrivilegedExceptionAction<ThriftDelegationToken>() {
-        public ThriftDelegationToken run() throws java.io.IOException {
-          Token<DelegationTokenIdentifier> delegationToken = namenode.getDelegationToken(new Text(renewer));
-          return ThriftUtils.toThrift(delegationToken, namenode.getNameNodeAddress());
-        }
-      });
-    }
-  }
-
-  public NamenodePlugin() {
-  }
-
-  @Override
-  public void start(Object service) {
-    ThriftUtils.initConfigResource();
-    this.namenode = (NameNode) service;
-    try {
-      InetSocketAddress address = NetUtils.createSocketAddr(
-          conf.get(ThriftFsConfig.DFS_THRIFT_ADDR_KEY,
-                   DEFAULT_THRIFT_ADDRESS));
-
-      this.thriftServer = new ThriftPluginServer(address, new ProcessorFactory());
-      thriftServer.setConf(conf);
-      thriftServer.start();
-      // The port may have been 0, so we update it.
-      conf.set(ThriftFsConfig.DFS_THRIFT_ADDR_KEY,
-               address.getHostName() + ":" + thriftServer.getPort());
-    } catch (Exception e) {
-      throw new RuntimeException("Cannot start Thrift namenode plug-in", e);
-    }
-  }
-
-  @Override
-  public void stop() {
-    if (thriftServer != null) {
-      thriftServer.stop();
-    }
-  }
-
-  @Override
-  public void close() {
-    if (thriftServer != null) {
-      thriftServer.close();
-    }
-  }
-
-  public Configuration getConf() {
-    return conf;
-  }
-
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  class ProcessorFactory extends TProcessorFactory {
-
-    ProcessorFactory() {
-      super(null);
-    }
-
-    @Override
-    public TProcessor getProcessor(TTransport t) {
-      ThriftServerContext context = new ThriftServerContext(t);
-      Namenode.Iface impl =
-        ThriftUtils.SecurityCheckingProxy.create(
-          conf,
-          new ThriftHandler(context),
-          Namenode.Iface.class);
-      return new Namenode.Processor(impl);
-    }
-  }
-}

+ 1 - 165
desktop/libs/hadoop/java/src/main/java/org/apache/hadoop/thriftfs/ThriftUtils.java

@@ -19,12 +19,8 @@ package org.apache.hadoop.thriftfs;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -32,34 +28,16 @@ import java.lang.reflect.Method;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.thriftfs.api.Block;
-import org.apache.hadoop.thriftfs.api.Constants;
-import org.apache.hadoop.thriftfs.api.ContentSummary;
-import org.apache.hadoop.thriftfs.api.DatanodeInfo;
-import org.apache.hadoop.thriftfs.api.DatanodeState;
 import org.apache.hadoop.thriftfs.api.IOException;
-import org.apache.hadoop.thriftfs.api.Namenode;
 import org.apache.hadoop.thriftfs.api.ThriftDelegationToken;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransport;
 
 
 public class ThriftUtils {
@@ -73,110 +51,6 @@ public class ThriftUtils {
     Configuration.addDefaultResource("thriftfs-site.xml");
   }
 
-  public static LocatedBlock fromThrift(Block block) {
-    if (block == null) {
-      return null;
-    }
-
-    org.apache.hadoop.hdfs.protocol.Block b = new org.apache.hadoop.hdfs.protocol.Block(
-        block.blockId, block.numBytes, block.genStamp);
-
-    int n = block.nodes.size();
-    org.apache.hadoop.hdfs.protocol.DatanodeInfo[] nodes =
-        new org.apache.hadoop.hdfs.protocol.DatanodeInfo[n];
-    for (int i = 0; i < n; ++i) {
-      nodes[i] = fromThrift(block.nodes.get(0));
-    }
-
-    LocatedBlock lb = new LocatedBlock(b, nodes, block.startOffset);
-    return lb;
-  }
-
-  public static Block toThrift(LocatedBlock block, String path,
-      Map<DatanodeID, Integer> thriftPorts) throws java.io.IOException {
-    if (block == null) {
-      return new Block();
-    }
-
-    List<DatanodeInfo> nodes = new ArrayList<DatanodeInfo>();
-    for (org.apache.hadoop.hdfs.protocol.DatanodeInfo n: block.getLocations()) {
-      DatanodeInfo node = toThrift(n, thriftPorts); 
-      if (node.getThriftPort() != Constants.UNKNOWN_THRIFT_PORT) {
-        nodes.add(node);
-      }
-    }
-
-    org.apache.hadoop.hdfs.protocol.Block b = block.getBlock();
-    return new Block(b.getBlockId(), path, b.getNumBytes(),
-                     b.getGenerationStamp(), nodes, block.getStartOffset(), block.getBlockToken().encodeToUrlString());
-  }
-
-  public static ContentSummary toThrift(org.apache.hadoop.fs.ContentSummary cs, String path) {
-    ContentSummary tcs = new ContentSummary();
-    tcs.fileCount = cs.getFileCount();
-    tcs.directoryCount = cs.getDirectoryCount();
-    tcs.quota = cs.getQuota();
-    tcs.spaceConsumed = cs.getSpaceConsumed();
-    tcs.spaceQuota = cs.getSpaceQuota();
-    tcs.path = path;
-    return tcs;
-  }
-
-  public static org.apache.hadoop.hdfs.protocol.DatanodeInfo fromThrift(
-      DatanodeInfo node) {
-    if (node == null) {
-      return null;
-    }
-
-    org.apache.hadoop.hdfs.protocol.DatanodeInfo ret =
-        new org.apache.hadoop.hdfs.protocol.DatanodeInfo();
-    ret.name = node.name;
-    ret.storageID = node.storageID;
-    ret.setCapacity(node.capacity);
-    ret.setHostName(node.host);
-    ret.setXceiverCount(node.xceiverCount);
-    ret.setRemaining(node.remaining);
-    if (node.state == DatanodeState.DECOMMISSIONED) {
-      ret.setDecommissioned();
-    }
-    return ret;
-  }
-
-  public static DatanodeInfo toThrift(
-      org.apache.hadoop.hdfs.protocol.DatanodeInfo node,
-      Map<DatanodeID, Integer> thriftPorts) {
-    if (node == null) {
-      return new DatanodeInfo();
-    }
-
-    DatanodeInfo ret = new DatanodeInfo();
-    ret.name = node.getName();
-    ret.storageID = node.storageID;
-    ret.host = node.getHost();
-    Integer p = thriftPorts.get(node);
-    if (p == null) {
-      LOG.warn("Unknown Thrift port for datanode " + node.name);
-      ret.thriftPort = Constants.UNKNOWN_THRIFT_PORT;
-    } else {
-      ret.thriftPort = p.intValue();
-    }
-
-    ret.capacity = node.getCapacity();
-    ret.dfsUsed = node.getDfsUsed();
-    ret.remaining = node.getRemaining();
-    ret.xceiverCount = node.getXceiverCount();
-    ret.state = node.isDecommissioned() ? DatanodeState.DECOMMISSIONED :
-        node.isDecommissionInProgress() ? DatanodeState.DECOMMISSION_INPROGRESS :
-        DatanodeState.NORMAL_STATE;
-    ret.httpPort = node.getInfoPort();
-
-    long timestamp = node.getLastUpdate();
-    long currentTime = System.currentTimeMillis();
-    ret.millisSinceUpdate = currentTime - timestamp;
-
-    return ret;
-  }
-
   public static IOException toThrift(Throwable t) {
     if (t == null) {
       return new IOException();
@@ -217,11 +91,10 @@ public class ThriftUtils {
     public Object invoke(Object proxy, Method m, Object[] args)
       throws Throwable
     {
-      Object result;
       try {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Call " + wrapped.getClass() + "." + m.getName()
-                    + "(" + StringUtils.joinObjects(", ", Arrays.asList(args)) + ")");
+                    + "(" + StringUtils.join(", ", Arrays.asList(args)) + ")");
         }
         authorizeCall(m);
 
@@ -259,43 +132,6 @@ public class ThriftUtils {
     }
   }
 
-  /**
-   * Creates a Thrift name node client.
-   * 
-   * @param conf the HDFS instance
-   * @return a Thrift name node client.
-   */
-  public static Namenode.Client createNamenodeClient(Configuration conf)
-      throws Exception {
-    String s = conf.get(ThriftFsConfig.DFS_THRIFT_ADDR_KEY,
-                        NamenodePlugin.DEFAULT_THRIFT_ADDRESS);
-    // TODO(todd) use fs.default.name here if set to 0.0.0.0 - but share this with the code in
-    // SecondaryNameNode that does the same
-    InetSocketAddress addr = NetUtils.createSocketAddr(s);
-
-    // If the NN thrift server is listening on the wildcard address (0.0.0.0),
-    // use the external IP from the NN configuration, but with the port listed
-    // in the thrift config.
-    if (addr.getAddress().isAnyLocalAddress()) {
-      InetSocketAddress nnAddr = NameNode.getAddress(conf);
-      addr = new InetSocketAddress(nnAddr.getAddress(), addr.getPort());
-    }
-
-    LOG.info("Creating NameNode client against " + addr);
-    TTransport t = new TSocket(addr.getHostName(), addr.getPort());
-    if (UserGroupInformation.isSecurityEnabled()) {
-      t = new HadoopThriftAuthBridge.Client()
-        .createClientTransport(
-          conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
-          addr.getHostName(),
-          "KERBEROS", t);
-    }
-
-    t.open();
-    TProtocol p = new TBinaryProtocol(t);
-    return new Namenode.Client(p);
-  }
-
   public static ThriftDelegationToken toThrift(Token<? extends AbstractDelegationTokenIdentifier> delegationToken,
       InetSocketAddress address) throws java.io.IOException {
     String serviceAddress = InetAddress.getByName(address.getHostName()).getHostAddress() + ":"

+ 0 - 69
desktop/libs/hadoop/java/src/test/java/org/apache/hadoop/thriftfs/Helper.java

@@ -18,8 +18,6 @@
 package org.apache.hadoop.thriftfs;
 
 import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URI;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -31,20 +29,8 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.thriftfs.api.Datanode;
-import org.apache.hadoop.thriftfs.api.DatanodeInfo;
 import org.apache.hadoop.thriftfs.api.RequestContext;
-import org.apache.hadoop.thriftfs.ThriftFsConfig;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransport;
 
 /**
  * Helper class to create Thrift clients.
@@ -60,8 +46,6 @@ public class Helper {
     conf.set(ThriftFsConfig.DFS_THRIFT_ADDR_KEY, "127.0.0.1:10090");
     conf.set(ThriftFsConfig.DFS_THRIFT_DATANODE_ADDR_KEY, "127.0.0.1:0");
     conf.set("slave.host.name", "127.0.0.1");
-    conf.setStrings("dfs.namenode.plugins", NamenodePlugin.class.getName());
-    conf.setStrings("dfs.datanode.plugins", DatanodePlugin.class.getName());
     conf.setBoolean("dfs.permissions", true);
     conf.setBoolean("dfs.support.append", true);
 
@@ -90,30 +74,6 @@ public class Helper {
     return ctx;
   }
 
-  /** Create a DFS cluster. */
-  public static MiniDFSCluster createCluster(short replication)
-      throws IOException {
-    Configuration conf = Helper.createConf();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, replication, true, null);
-    cluster.waitActive();
-    return cluster;
-  }
-
-  /**
-   * Creates a Thrift data node client.
-   * 
-   * @param addr address of the data node
-   * @return a Thrift data node client.
-   */
-  public static Datanode.Client createDatanodeClient(DatanodeInfo node)
-      throws Exception {
-    InetSocketAddress addr = new InetSocketAddress(node.host, node.thriftPort);
-    TTransport t = new TSocket(addr.getHostName(), addr.getPort());
-    t.open();
-    TProtocol p = new TBinaryProtocol(t);
-    return new Datanode.Client(p);
-  }
-
   /** Create a file on the default file system. */
   public static void createFile(FileSystem fs, String path, short repl,
       short perms, long blockSize, int length) throws Exception {
@@ -127,33 +87,4 @@ public class Helper {
     fs.setPermission(p, new FsPermission(perms));
     fs.setOwner(p, TEST_USER, TEST_GROUP);
   }
-
-  public static void main(String[] args) throws Exception {
-    if (args.length != 4) {
-      System.err.println("Usage: " + Helper.class.getSimpleName()
-          + " <file name> <repl> <block size> <length>");
-      System.exit(1);
-    }
-
-    Configuration.addDefaultResource("core-site.xml");
-    Configuration.addDefaultResource("hdfs-site.xml");
-    Configuration conf = new Configuration();
-
-    DistributedFileSystem dfs = new DistributedFileSystem();
-    dfs.initialize(new URI(conf.get("fs.default.name")), conf);
-
-    for (;;) {
-      if (!dfs.setSafeMode(SafeModeAction.SAFEMODE_GET)) {
-        org.apache.hadoop.hdfs.protocol.DatanodeInfo[] nodes =
-          dfs.getClient().namenode.getDatanodeReport(DatanodeReportType.LIVE);
-        if (nodes.length > 0) {
-          break;
-        }
-      }
-      Thread.sleep(100);
-    }
-
-    createFile(dfs, args[0], Short.parseShort(args[1]),
-        (short)0644, Long.parseLong(args[2]), Integer.parseInt(args[3]));
-  }
 }

+ 0 - 152
desktop/libs/hadoop/java/src/test/java/org/apache/hadoop/thriftfs/TestDatanodePlugin.java

@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.thriftfs;
-
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.thriftfs.api.Block;
-import org.apache.hadoop.thriftfs.api.BlockData;
-import org.apache.hadoop.thriftfs.api.Datanode;
-import org.apache.hadoop.thriftfs.api.DatanodeInfo;
-import org.apache.hadoop.thriftfs.api.Namenode;
-import org.apache.hadoop.thriftfs.api.RequestContext;
-import org.apache.log4j.Level;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-/**
- * Unit tests for the datanode Thrift interface
- */
-public class TestDatanodePlugin  {
-  private static MiniDFSCluster cluster;
-  private static FileSystem fs;
-  private static Namenode.Client namenode;
-  private Datanode.Client datanode;
-  private static RequestContext ctx;
-
-  private static final String testFile = "/test-file";
-  private static Path testFilePath = new Path(testFile);
-  private static final short REPLICATION = 3;
-  private final int BUFFER_SIZE = 4096;
-  private final int BLOCK_SIZE = 8192;
-
-  private static final Log LOG = LogFactory.getLog(TestDatanodePlugin.class);
-
-  // Raise verbosity level of Thrift classes.
-  static {
-    ((Log4JLogger) DatanodePlugin.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) NamenodePlugin.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) ThriftPluginServer.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) ThriftUtils.LOG).getLogger().setLevel(Level.ALL);
-  }
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    cluster = Helper.createCluster(REPLICATION);
-    fs = cluster.getFileSystem();
-    Configuration conf = Helper.createConf();
-    namenode = ThriftUtils.createNamenodeClient(conf);
-    ctx = Helper.createRequestContext(true);
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    fs.delete(testFilePath, false);
-    cluster.shutdown();
-  }
-  
-  @Test
-  public void testRead() throws Exception {
-    createFile(32);
-    List<Block> blocks = namenode.getBlocks(ctx, testFile, 0, 32);
-    assertEquals(1, blocks.size());
-
-    Block b = blocks.get(0);
-    LOG.debug("Got block: " + b);
-    assertEquals(REPLICATION, b.nodes.size());
-    DatanodeInfo node = b.nodes.get(0);
-    datanode = Helper.createDatanodeClient(node);
-
-    BlockData blockData = datanode.readBlock(ctx, b, 0, 32);
-    LOG.debug("Read block: " + blockData);
-    assertEquals("0000 - Thirty-two bytes in a row",
-                 new String(blockData.getData()));
-
-    createFile(BLOCK_SIZE + 32);
-    blocks = namenode.getBlocks(ctx, testFile, 0, BLOCK_SIZE + 32);
-    assertEquals(2, blocks.size());
-
-    b = blocks.get(0);
-    assertEquals(REPLICATION, b.nodes.size());
-    node = b.nodes.get(0);
-    datanode = Helper.createDatanodeClient(node);
-
-    blockData = datanode.readBlock(ctx, b, 0, BLOCK_SIZE);
-    assertEquals(BLOCK_SIZE, blockData.length);
-    String data = new String(blockData.getData());
-    assertTrue(data.startsWith("0000 - Thirty-two bytes in a row"));
-    assertTrue(data.endsWith("0255 - Thirty-two bytes in a row"));
-
-    blockData = datanode.readBlock(ctx, b, 32, 32);
-    assertEquals(32, blockData.length);
-    assertEquals("0001 - Thirty-two bytes in a row",
-                 new String(blockData.getData()));
-
-    b = blocks.get(1);
-    blockData = datanode.readBlock(ctx, b, 0, 32);
-    assertEquals(32, blockData.length);
-    assertEquals("0256 - Thirty-two bytes in a row",
-                 new String(blockData.getData()));
-  }
-
-  private void createFile(int length) throws Exception {
-    LOG.debug("Creating " + testFilePath);
-    FSDataOutputStream out = fs.create(testFilePath, true, BUFFER_SIZE,
-        REPLICATION, BLOCK_SIZE);
-    out.write(testData(length));
-    LOG.debug("Closing " + testFilePath);
-    out.close();
-
-    assertTrue(fs.exists(testFilePath));
-    FileStatus st = fs.getFileStatus(testFilePath);
-    assertEquals(length, st.getLen());
-  }
-
-  private byte[] testData(int length) {
-    assertTrue("Invalid data length", length % 32 == 0);
-
-    StringBuilder sb = new StringBuilder();
-    for (int i = 0; i < length / 32; ++i) {
-      sb.append(String.format("%04d - Thirty-two bytes in a row", i));
-    }
-    return sb.toString().getBytes();
-  }
-}

+ 0 - 456
desktop/libs/hadoop/java/src/test/java/org/apache/hadoop/thriftfs/TestNamenodePlugin.java

@@ -1,456 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.thriftfs;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.thriftfs.api.Block;
-import org.apache.hadoop.thriftfs.api.Constants;
-import org.apache.hadoop.thriftfs.api.ContentSummary;
-import org.apache.hadoop.thriftfs.api.IOException;
-import org.apache.hadoop.thriftfs.api.Namenode;
-import org.apache.hadoop.thriftfs.api.RequestContext;
-import org.apache.hadoop.thriftfs.api.Stat;
-import org.apache.log4j.Level;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-/**
- * Unit tests for the name node Thrift interface
- */
-public class TestNamenodePlugin {
-
-  private static MiniDFSCluster cluster;
-  private static Namenode.Client namenode;
-  private static RequestContext ctx;
-  private static RequestContext unprivilegedCtx;
-  private static FileSystem fs;
-
-  private static final short REPLICATION = 3;
-
-  private static final String testFile = "/test-file";
-  private static Path testFilePath = new Path(testFile);
-  private final short PERMS = (short) 0644;
-
-  // Raise verbosity level of Thrift classes.
-  static {
-    ((Log4JLogger) NamenodePlugin.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) ThriftPluginServer.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) ThriftUtils.LOG).getLogger().setLevel(Level.ALL);
-  }
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    cluster = Helper.createCluster(REPLICATION);
-    fs = cluster.getFileSystem();
-    Configuration conf = Helper.createConf();
-    namenode = ThriftUtils.createNamenodeClient(conf);
-    ctx = Helper.createRequestContext(true);
-    unprivilegedCtx = Helper.createRequestContext(false);
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    cluster.shutdown();
-  }
-  
-  @After
-  public void cleanup() throws Exception {
-    fs.delete(testFilePath, false);
-  }
-
-  @Test
-  public void testChmod() throws Exception {
-    assertFalse(fs.exists(testFilePath));
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-    assertEquals(PERMS,
-        fs.getFileStatus(testFilePath).getPermission().toShort());
-
-    namenode.chmod(ctx, testFile, (short) 0600);
-    assertEquals((short) 0600,
-        fs.getFileStatus(testFilePath).getPermission().toShort());
-  }
-
-  @Test
-  public void testChown() throws Exception {
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-    FileStatus st = fs.getFileStatus(testFilePath);
-    assertEquals(Helper.TEST_USER, st.getOwner());
-    assertEquals(Helper.TEST_GROUP, st.getGroup());
-
-    namenode.chown(ctx, testFile, "foo", null);
-    st = fs.getFileStatus(testFilePath);
-    assertEquals("foo", st.getOwner());
-    assertEquals(Helper.TEST_GROUP, st.getGroup());
-
-    namenode.chown(ctx, testFile, null, "foo-group");
-    st = fs.getFileStatus(testFilePath);
-    assertEquals("foo", st.getOwner());
-    assertEquals("foo-group", st.getGroup());
-
-    try {
-      namenode.chown(ctx, testFile, null, null);
-      fail("chmod() needs non-null owner or group");
-    } catch (IOException e) {
-    }
-  }
-
-  @Test
-  public void testDf() throws Exception {
-    List<Long> st = namenode.df(ctx);
-    assertNotNull(st);
-    assertEquals(3, st.size());
-    for (long val : st) {
-      assertTrue(val > 0);
-    }
-  }
-
-  @Test
-  public void testGetPreferredBlockSize() throws Exception {
-    long bs = 1024;
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, bs, 0);
-    assertTrue(fs.exists(testFilePath));
-    assertEquals(bs, namenode.getPreferredBlockSize(ctx, testFile));
-
-    bs /= 2;
-    assertTrue(namenode.unlink(ctx, testFile, false));
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, bs, 0);
-    assertTrue(fs.exists(testFilePath));
-    assertEquals(bs, namenode.getPreferredBlockSize(ctx, testFile));
-  }
-
-  @Test
-  public void testLs() throws Exception {
-    List<Stat> dir = namenode.ls(ctx, "/");
-    assertEquals(0, dir.size());
-
-    assertTrue(namenode.mkdirhier(ctx, "/foo", (short) 0755));
-    dir = namenode.ls(ctx, "/");
-    assertEquals(1, dir.size());
-    assertEquals(true, dir.get(0).isDir);
-
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-    dir = namenode.ls(ctx, "/");
-    assertEquals(2, dir.size());
-    assertTrue(dir.get(0).isDir != dir.get(1).isDir);
-    assertTrue(namenode.unlink(ctx, "/foo", true));
-  }
-
-  @Test
-  public void testMkdirhier() throws Exception {
-    String foo = "/foo";
-    short perms = (short) 0755;
-    Path fooPath = new Path(foo);
-    assertFalse(fs.exists(fooPath));
-
-    assertTrue(namenode.mkdirhier(ctx, foo, perms));
-    assertTrue(fs.exists(fooPath));
-    assertTrue(namenode.mkdirhier(ctx, foo, perms));
-    assertTrue(namenode.unlink(ctx, foo, true));
-
-    String bar = "/bar/baz";
-    Path barPath = new Path(bar);
-    assertFalse(fs.exists(barPath));
-    assertTrue(namenode.mkdirhier(ctx, bar, perms));
-    assertTrue(fs.exists(barPath));
-    assertTrue(namenode.mkdirhier(ctx, bar, perms));
-    assertTrue(namenode.unlink(ctx, bar, true));
-  }
-
-  @Test
-  public void testRefreshNodes() throws Exception {
-    // XXX This does not test much...
-    namenode.refreshNodes(ctx);
-  }
-
-  @Test
-  public void testRename() throws Exception {
-    String foo = "/foo";
-    short perms = (short) 0755;
-    Path fooPath = new Path(foo);
-    assertTrue(namenode.mkdirhier(ctx, foo, perms));
-    assertTrue(fs.exists(fooPath));
-
-    assertFalse(namenode.rename(ctx, foo, foo));
-    String bar = "/bar";
-    Path barPath = new Path(bar);
-    assertTrue(namenode.rename(ctx, foo, bar));
-    assertTrue(fs.exists(barPath));
-    assertFalse(fs.exists(fooPath));
-
-    assertFalse(fs.exists(fooPath));
-    assertFalse(namenode.rename(ctx, bar, "/foo/baz"));
-    assertTrue(namenode.unlink(ctx, bar, true));
-
-    assertFalse(fs.exists(testFilePath));
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-
-    assertTrue(namenode.mkdirhier(ctx, "/foo/baz", PERMS));
-    String newTestFile = "/foo/baz" + testFile;
-    Path newTestFilePath = new Path(newTestFile);
-    assertFalse(fs.exists(newTestFilePath));
-    assertTrue(namenode.rename(ctx, testFile, newTestFile));
-    assertTrue(fs.exists(newTestFilePath));
-    assertFalse(fs.exists(testFilePath));
-
-    assertTrue(namenode.mkdirhier(ctx, foo, perms));
-    assertTrue(fs.exists(fooPath));
-    assertTrue(fs.getFileStatus(fooPath).isDir());
-    assertTrue(namenode.rename(ctx, newTestFile, foo));
-    // XXX Bug or feature?
-    assertTrue(fs.getFileStatus(fooPath).isDir());
-
-    assertTrue(namenode.unlink(ctx, foo, true));
-  }
-
-  @Test
-  public void testReportBadBlocks() throws Exception {
-    StringBuilder sb = new StringBuilder();
-    for (int i = 0; i < 100; ++i) {
-      sb.append("Blah blah blah");
-    }
-    String data = sb.toString();
-    FSDataOutputStream out = fs.create(testFilePath, true, 512, REPLICATION,
-        512);
-    out.writeBytes(data);
-    out.close();
-
-    // Block here are Thrift blocks
-    List<Block> blocks = namenode.getBlocks(ctx, testFile, 0, data.length());
-    assertTrue(blocks.size() > 0);
-    assertEquals(REPLICATION, blocks.get(0).nodes.size());
-
-    List<Block> badBlocks = new ArrayList<Block>();
-    Block b = blocks.get(0);
-    assertEquals(REPLICATION, b.nodes.size());
-    b.nodes.remove(0);
-    badBlocks.add(b);
-    namenode.reportBadBlocks(ctx, badBlocks);
-
-    blocks = namenode.getBlocks(ctx, testFile, 0, data.length());
-    assertTrue(blocks.size() > 0);
-    assertTrue(blocks.get(0).nodes.size() < REPLICATION);
-  }
-
-  @Test
-  public void testSafeMode() throws Exception {
-    assertFalse(namenode.isInSafeMode(ctx));
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-    assertTrue(namenode.unlink(ctx, testFile, false));
-
-    namenode.enterSafeMode(ctx);
-    assertTrue(namenode.isInSafeMode(ctx));
-    try {
-      Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-      fail("create() must fail when cluster is in safe mode");
-    } catch (Throwable t) {
-    }
-
-    namenode.leaveSafeMode(ctx);
-    assertFalse(namenode.isInSafeMode(ctx));
-  }
-
-  @Test
-  public void testSetQuota() throws Exception {
-    try {
-      namenode.setQuota(ctx, "/not-there", 1, Constants.QUOTA_DONT_SET);
-      fail("cannot setQuota() on non-existing directories");
-    } catch (IOException e) {}
-
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-    try {
-      namenode.setQuota(ctx, testFile, 1, Constants.QUOTA_DONT_SET);
-      fail("cannot setQuota() on files");
-    } catch (IOException e) {}
-
-    short perms = (short) 0755;
-    namenode.mkdirhier(ctx, "/foo", perms);
-    namenode.setQuota(ctx, "/foo", 2, Constants.QUOTA_DONT_SET);
-
-    namenode.mkdirhier(ctx, "/foo/one", perms);
-    try {
-      namenode.mkdirhier(ctx, "/foo/two", perms);
-      fail("namespaceQuota not set");
-    } catch (IOException e) {}
-
-    namenode.setQuota(ctx, "/foo", 3, Constants.QUOTA_DONT_SET);
-    assertTrue(namenode.mkdirhier(ctx, "/foo/two", perms));
-    assertTrue(namenode.unlink(ctx, "/foo", true));
-  }
-
-  @Test
-  public void testSetReplication() throws Exception {
-    short repl = (short) (REPLICATION - 1);
-    Helper.createFile(fs, testFile, repl, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-    FileStatus st = fs.getFileStatus(testFilePath);
-    assertEquals(repl, st.getReplication());
-
-    assertTrue(namenode.setReplication(ctx, testFile, REPLICATION));
-    st = fs.getFileStatus(testFilePath);
-    assertEquals(REPLICATION, st.getReplication());
-  }
-
-  @Test
-  public void testStat() throws Exception {
-    Stat st = namenode.stat(ctx, "/");
-    assertEquals("/", st.path);
-    assertTrue(st.isDir);
-    long now = System.currentTimeMillis();
-    Thread.sleep(10);
-    assertTrue(st.mtime < now);
-    assertTrue(st.atime < now);
-
-    long then = now;
-    assertFalse(fs.exists(testFilePath));
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-
-    st = namenode.stat(ctx, testFile);
-    assertEquals(testFile, st.path);
-    assertFalse(st.isDir);
-    now = System.currentTimeMillis();
-    Thread.sleep(10);
-    assertTrue(st.atime > then);
-    assertTrue(st.mtime > then);
-    assertTrue(now > st.atime);
-    assertTrue(now > st.mtime);
-    assertEquals(1024, st.blockSize);
-    assertEquals(REPLICATION, st.replication);
-    assertEquals(0, st.length);
-
-    try {
-      st = namenode.stat(ctx, "/not-there");
-      fail("No exception thrown for statting a non-existent file. " +
-           "Instead, got: " + String.valueOf(st));
-    } catch (IOException fne) {
-      assertEquals("java.io.FileNotFoundException", fne.clazz);
-    }
-  }
-
-  @Test
-  public void testContentSummary() throws Exception {
-    ContentSummary cs = namenode.getContentSummary(ctx, "/");
-    assertEquals(1, cs.directoryCount);
-    assertEquals(0, cs.fileCount);
-  }
-
-  @Test
-  public void testUnlink() throws Exception {
-    assertFalse(namenode.unlink(ctx, "/", true));
-
-    assertFalse(fs.exists(testFilePath));
-    assertFalse(namenode.unlink(ctx, testFile, false));
-
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-    assertTrue(namenode.unlink(ctx, testFile, false));
-
-    assertTrue(namenode.mkdirhier(ctx, "/foo", (short) 0755));
-    assertTrue(namenode.unlink(ctx, "/foo", false));
-
-    assertTrue(namenode.mkdirhier(ctx, "/foo", (short) 0755));
-    Path newTestFile = new Path("/foo/test-file");
-    Helper.createFile(fs, "/foo/test-file", REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(newTestFile));
-    try {
-      namenode.unlink(ctx, "/foo", false);
-      fail("unlink(path, recursive=false) must fail for non-empty paths");
-    } catch (IOException e) {
-    }
-    assertTrue(namenode.unlink(ctx, "/foo", true));
-  }
-
-  @Test
-  public void testUtime() throws Exception {
-    long tstamp = System.currentTimeMillis();
-    Helper.createFile(fs, testFile, REPLICATION, PERMS, 1024, 0);
-    assertTrue(fs.exists(testFilePath));
-
-    FileStatus st = fs.getFileStatus(testFilePath);
-    assertTrue(st.getAccessTime() >= tstamp);
-    assertTrue(st.getModificationTime() >= tstamp);
-
-    Thread.sleep(10);
-    tstamp = System.currentTimeMillis();
-    namenode.utime(ctx, testFile, -1, tstamp);
-    st = fs.getFileStatus(testFilePath);
-    assertTrue(st.getAccessTime() < tstamp);
-    assertTrue(st.getModificationTime() == tstamp);
-
-    Thread.sleep(10);
-    tstamp = System.currentTimeMillis();
-    namenode.utime(ctx, testFile, tstamp, -1);
-    st = fs.getFileStatus(testFilePath);
-    assertTrue(st.getAccessTime() == tstamp);
-    assertTrue(st.getModificationTime() < tstamp);
-
-    long prev = tstamp;
-    namenode.utime(ctx, testFile, -1, -1);
-    Thread.sleep(10);
-    tstamp = System.currentTimeMillis();
-    st = fs.getFileStatus(testFilePath);
-    assertTrue(st.getModificationTime() < tstamp);
-    assertTrue(st.getModificationTime() >= prev);
-    assertTrue(st.getAccessTime() < tstamp);
-    assertTrue(st.getAccessTime() >= prev);
-    assertTrue(st.getAccessTime() == st.getModificationTime());
-  }
-
-  /**
-   * Ensure that RPCs can spoof as different users using RequestContexts
-   */
-  @Test
-  public void testRequestContext() throws Exception {
-    Path byCurrentPath = new Path("/test-by-current");
-    Path byOtherPath = new Path("/test-by-other");
-
-    assertFalse(fs.exists(byCurrentPath));
-    assertFalse(fs.exists(byOtherPath));
-
-    // Dir made by 'ctx' should be owned by the current user
-    namenode.mkdirhier(ctx, "/test-by-current", (short)0755);
-    assertEquals(UserGroupInformation.getCurrentUser().getUserName(),
-                 fs.getFileStatus(byCurrentPath).getOwner());
-
-    assertTrue(fs.delete(byCurrentPath, true));
-
-    // Dir made by unprivelegedCtx should be owned by the test user
-    namenode.mkdirhier(unprivilegedCtx, "/test-by-other", (short)0755);
-    assertEquals(Helper.TEST_USER, fs.getFileStatus(byOtherPath).getOwner());
-  }
-}

+ 5 - 0
maven/pom.xml

@@ -79,6 +79,11 @@
         <artifactId>hadoop-core</artifactId>
         <version>${hadoop.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-client</artifactId>
+        <version>${hadoop.version}</version>
+      </dependency>
       <dependency>
         <groupId>org.apache.hadoop.hive</groupId>
         <artifactId>hive-metastore</artifactId>