| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526 |
- /*
- * Licensed to Cloudera, Inc. under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. Cloudera, Inc. licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- /*
- * Thrift interface for HDFS.
- */
- /* Common types and interfaces */
- include 'common.thrift'
- /*
- * Namespaces for generated code. The idea is to keep code generated by
- * Thrift under a 'hadoop.api' namespace, so that a higher-level set of
- * functions and classes may be defined under 'hadoop'.
- */
- namespace cpp hadoop.api.hdfs
- namespace csharp Hadoop.API.HDFS
- namespace java org.apache.hadoop.thriftfs.api
- namespace perl Hadoop.API.HDFS
- namespace php hadoop_api_hdfs
- namespace py hadoop.api.hdfs
- namespace rb Hadoop.API.HDFS
- /* Values for 'type' argument to getDatanodeReport(). */
- enum DatanodeReportType {
- ALL_DATANODES = 1;
- LIVE_DATANODES = 2;
- DEAD_DATANODES = 3;
- }
- /* Values for DatanodeInfo.state */
- enum DatanodeState {
- NORMAL_STATE = 1;
- DECOMMISSION_INPROGRESS = 2;
- DECOMMISSIONED = 3;
- }
- /* Value for unknown Thrift port in DatanodeInfo */
- const i32 UNKNOWN_THRIFT_PORT = -1;
- /* Values for setQuota() parameters. */
- const i64 QUOTA_DONT_SET = -2
- /* Clear quota on this path. */
- const i64 QUOTA_RESET = -1
- /**
- * Information and state of a data node.
- *
- * Modelled after org.apache.hadoop.hdfs.protocol.DatanodeInfo
- */
- struct DatanodeInfo {
- /** HDFS name of the datanode (equals to <host>:<datanode port>) */
- 1: string name,
-
- /** Unique ID within a HDFS cluster */
- 2: string storageID,
-
- /** Host name of the Thrift server socket. */
- 3: string host,
-
- /** Port number of the Thrift server socket, or UNKNOWN_THRIFT_PORT
- if the Thrift port for this datanode is not known. */
- 4: i32 thriftPort,
- /** Port number of the Web UI */
- 10: i32 httpPort,
-
- /** Raw capacity of the data node (in bytes). */
- 5: i64 capacity,
-
- /** Space used by the data node (in bytes). */
- 6: i64 dfsUsed,
-
- /** Raw free space in the data node (in bytes). */
- 7: i64 remaining,
-
- /** Number of active connections to the data node. */
- 8: i32 xceiverCount,
-
- /** State of this data node. */
- 9: DatanodeState state,
- /** Number of seconds since last contact */
- 11: i64 millisSinceUpdate,
- }
- /**
- * Representation of a file block in HDFS
- *
- * Modelled after org.apache.hadoop.hdfs.protocol.LocatedBlock
- */
- struct Block {
- /** Block ID (unique among all blocks in a filesystem). */
- 1: i64 blockId,
-
- /** Path of the file which this block belongs to. */
- 2: string path,
- /** Length of this block. */
- 3: i64 numBytes,
-
- /** Generational stamp of this block. */
- 4: i64 genStamp,
- /** List of data nodes with copies of this block. */
- 5: list<DatanodeInfo> nodes,
- /** Offset of the first byte of the block relative to the start of the file */
- 6: i64 startOffset,
- /** The serialized token associated with this block. */
- 7: string token,
- }
- /**
- * Information about a path in HDFS.
- *
- * Modelled after org.apache.hadoop.fs.FileStatus
- */
- struct Stat {
- /** The path. */
- 1: string path,
- /**
- * True: The path represents a file.
- * False: The path represents a directory.
- */
- 2: bool isDir,
- /* Fields common to file and directory paths. */
-
- /** Access time (milliseconds since 1970-01-01 00:00 UTC). */
- 3: i64 atime,
- /** Modification time (milliseconds since 1970-01-01 00:00 UTC). */
- 4: i64 mtime,
-
- /** Access permissions */
- 5: i16 perms,
-
- /** Owner */
- 6: string owner,
-
- /** Group */
- 7: string group,
-
- /* Fields for file paths (will be zero for directory entries). */
-
- /** Length (in bytes). */
- 13: i64 length,
-
- /** Block size (in bytes). */
- 14: i64 blockSize,
-
- /** Replication factor. */
- 15: i16 replication
- }
- /**
- * Information about an entire subtree under a directory
- * Includes the information from org.apache.hadoop.fs.ContentSummary
- */
- struct ContentSummary {
- /* Fields for directory paths (will be zero for file entries). */
-
- /** Number of files in this directory */
- 1: i64 fileCount,
-
- /** Number of directories in this directory */
- 2: i64 directoryCount,
-
- /** Quota for this directory (number of files). */
- 3: i64 quota,
-
- /** Space consumed in disk (in bytes). */
- 4: i64 spaceConsumed,
-
- /** Quota consumed in disk (in bytes). */
- 5: i64 spaceQuota,
- /** The path */
- 6: string path,
- }
- struct UpgradeStatusReport {
- 1: i32 version
- 2: i16 percentComplete
- 3: bool finalized
- /** The informative text that is the same as is shown on the NN web UI */
- 4: string statusText
- }
- /**
- * Information that mirrors the "health report" information available on the
- * NameNode web UI
- */
- struct DFSHealthReport {
- 1: i64 bytesTotal
- 2: i64 bytesUsed
- 3: i64 bytesRemaining
- 4: i64 bytesNonDfs
- /** How many datanodes are considered live */
- 5: i32 numLiveDataNodes
- /** How many datanodes are considered dead */
- 6: i32 numDeadDataNodes
- /**
- * Status of the current running upgrade. If no upgrade
- * is running, this will be null.
- */
- 7: UpgradeStatusReport upgradeStatus
- /**
- * The http port that the NameNode is listening on for its web UI
- * - this isn't really health, but it's related and handy
- */
- 8: i32 httpPort
- }
- struct ThriftHdfsDelegationToken {
- 1: binary delegationTokenBytes
- }
- /** Quota-related error */
- exception QuotaException {
- /** Error message. */
- 1: string msg,
-
- /** Textual representation of the call stack. */
- 2: string stack
- }
- /**
- * Provides an interface to a Hadoop Namenode. It is basically a Thrift
- * translation of org.apache.hadoop.hdfs.protocol.ClientProtocol.
- */
- service Namenode extends common.HadoopServiceBase {
- /** Set permissions of an existing file or directory. */
- void chmod(10: common.RequestContext ctx,
- /** Path of the file or directory. */
- 1: string path,
-
- /** New permissions for the file or directory. */
- 2: i16 perms) throws (1: common.IOException err),
- /**
- * Set owner of a file or directory.
- *
- * If either parameter 'owner' or 'group' is set to null, that
- * parameter is left unchanged.
- *
- * Parameters 'owner' and 'group' cannot be both null.
- */
- void chown(10: common.RequestContext ctx,
- /** Path to the file or directory */
- 1: string path,
-
- /** New owner. */
- 2: string owner,
-
- /** New group. */
- 3: string group) throws (1: common.IOException err),
- /**
- * Return a list containing:
- * (index 0) The total storage capacity of the file system (in bytes).
- * (index 1) The total used space of the file system (in bytes).
- * (index 2) The available storage of the file system (in bytes).
- */
- list<i64> df(10: common.RequestContext ctx),
- /**
- * Enter safe mode.
- */
- void enterSafeMode(10: common.RequestContext ctx) throws (1: common.IOException err),
- /** Get a list of all blocks containing a region of a file */
- list<Block> getBlocks(10: common.RequestContext ctx,
- /** Path to the file. */
- 1: string path,
-
- /** Offset of the region. */
- 2: i64 offset,
-
- /** Length of the region */
- 3: i64 length) throws (1: common.IOException err),
-
- /**
- * Get the preferred block size for the given file.
- *
- * The path must exist, or common.IOException is thrown.
- */
- i64 getPreferredBlockSize(10: common.RequestContext ctx,
- /** Path to the file. */
- 1: string path) throws (1: common.IOException err),
-
- /**
- * Returns whether HDFS is in safe mode or not.
- */
- bool isInSafeMode(10: common.RequestContext ctx) throws (1: common.IOException err),
- /**
- * Leave safe mode.
- */
- void leaveSafeMode(10: common.RequestContext ctx) throws (1: common.IOException err),
- /** Get a listing of the indicated directory. */
- list<Stat> ls(10: common.RequestContext ctx,
- /** Path to the directory. */
- 1: string path) throws (1: common.IOException err),
- /**
- * Create a directory (or hierarchy of directories).
- *
- * Returns false if directory did not exist and could not be created,
- * true otherwise.
- */
- bool mkdirhier(10: common.RequestContext ctx,
- /** Path to the directory. */
- 1: string path,
-
- /** Access permissions of the directory. */
- 2: i16 perms) throws (1: common.IOException err),
- /** Tells the name node to reread the hosts and exclude files. */
- void refreshNodes(10: common.RequestContext ctx) throws (1: common.IOException err),
- /**
- * Rename an item in the file system namespace.
- *
- * Returns true if successful, or
- * false if the old name does not exist or if the new name already
- * belongs to the namespace.
- */
- bool rename(10: common.RequestContext ctx,
- /** Path to existing file or directory. */
- 1: string path,
-
- /** New path. */
- 2: string newPath) throws (1: common.IOException err),
-
- /** Report corrupted blocks. */
- void reportBadBlocks(10: common.RequestContext ctx,
- /** List of corrupted blocks. */
- 1: list<Block> blocks) throws (1: common.IOException err),
- /**
- * Get information about a path in HDFS.
- *
- * Return value will be nul if path does not exist.
- */
- Stat stat(10: common.RequestContext ctx,
- /** Path of the file or directory. */
- 1: string path) throws (1: common.IOException err),
- /**
- * Get the summary of a directory's contents.
- *
- * Note that this has runtime linear in the total number of nodes
- * in the directory tree - this can be expensive for directories
- * near the top of a big HDFS. Use with care.
- */
- ContentSummary getContentSummary(10: common.RequestContext ctx,
- 1: string Path) throws (1: common.IOException err),
- /**
- * Get ContentSummary objects for multiple directories simultaneously. The same warnings
- * apply as for getContentSummary(...) above.
- */
- list<ContentSummary> multiGetContentSummary(10: common.RequestContext ctx,
- 1: list<string> paths) throws (1: common.IOException err),
-
- /**
- * Set the quota for a directory.
- *
- * Quota parameters may have three types of values:
- *
- * (1) 0 or more: Quota will be set to that value.
- * (2) QUOTA_DONT_SET: Quota will not be changed,
- * (3) QUOTA_RESET: Quota will be reset.
- *
- * Any other value is a runtime error.
- */
- void setQuota(10: common.RequestContext ctx,
- /** Path of the directory. */
- 1: string path,
-
- /** Limit on the number of names in the directory. */
- 2: i64 namespaceQuota,
-
- /**
- * Limit on disk space occupied by all the files in the
- * directory.
- */
- 3: i64 diskspaceQuota) throws (1: common.IOException err),
-
- /**
- * Set replication factor for an existing file.
- *
- * This call just updates the value of the replication factor. The actual
- * block replication is not expected to be performed during this method call.
- * The blocks will be populated or removed in the background as the result of
- * the routine block maintenance procedures.
- *
- * Returns true if successful, false if file does not exist or is a
- * directory.
- */
- bool setReplication(10: common.RequestContext ctx,
- /** Path of the file. */
- 1: string path,
-
- /** New replication factor. */
- 2: i16 replication) throws (1: common.IOException err),
-
- /**
- * Delete a file or directory from the file system.
- *
- * Any blocks belonging to the deleted files will be garbage-collected.
- */
- bool unlink(10: common.RequestContext ctx,
- /** Path of the file or directory. */
- 1: string path,
-
- /** Delete a non-empty directory recursively. */
- 2: bool recursive) throws (1: common.IOException err),
- /**
- * Sets the modification and access time of a file or directory.
- *
- * Setting *one single time paramater* to -1 means that time parameter
- * must not be set by this call.
- *
- * Setting *both time parameters* to -1 means both of them must be set to
- * the current time.
- */
- void utime(10: common.RequestContext ctx,
- /** Path of the file or directory. */
- 1: string path,
-
- /** Access time in milliseconds since 1970-01-01 00:00 UTC */
- 2: i64 atime,
-
- /** Modification time in milliseconds since 1970-01-01 00:00 UTC */
- 3: i64 mtime) throws (1: common.IOException err),
-
- /*
- * The following methods are meant to be called by datanodes to advertise
- * themselves to the namenode.
- */
-
- /**
- * Inform the namenode that a datanode process has started.
- */
- void datanodeUp(/** <host name>:<port number> of the datanode */
- 1: string name,
- /** the storage id of the datanode */
- 2: string storage,
- /** Thrift port of the datanode */
- 3: i32 thriftPort),
-
- /**
- * Inform the namenode that a datanode process has stopped.
- */
- void datanodeDown(/** <host name>:<port number> of the datanode */
- 1: string name,
- /** the storage id of the datanode */
- 2: string storage,
- /** Thrift port of the datanode */
- 3: i32 thriftPort),
- ThriftHdfsDelegationToken getDelegationToken(10:common.RequestContext ctx, 1:string renewer) throws(1: common.IOException err)
- }
- /** Encapsulates a block data transfer with its CRC */
- struct BlockData {
- /** CRC32 of the data being transfered */
- 1: i32 crc,
- /** Length of the data being transfered */
- 2: i32 length,
- /** The data itsef */
- 3: binary data
- }
- /**
- * Provides an interface to data nodes, so that clients may read and write
- * data blocks.
- */
- service Datanode {
- /**
- * Read bytes from a block.
- *
- * Only 2^31 - 1 bytes may be read on a single call to this method.
- */
- BlockData readBlock(10: common.RequestContext ctx,
- /** Block to be read from. */
- 1: Block block,
-
- /** Offset within the block where read must start from. */
- 2: i64 offset,
-
- /** Number of bytes to read. */
- 3: i32 length) throws (1:common.IOException err)
- }
|