hdfs.thrift 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /*
  2. * Licensed to Cloudera, Inc. under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. Cloudera, Inc. licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /*
  19. * Thrift interface for HDFS.
  20. */
  21. /* Common types and interfaces */
  22. include 'common.thrift'
  23. /*
  24. * Namespaces for generated code. The idea is to keep code generated by
  25. * Thrift under a 'hadoop.api' namespace, so that a higher-level set of
  26. * functions and classes may be defined under 'hadoop'.
  27. */
  28. namespace cpp hadoop.api.hdfs
  29. namespace csharp Hadoop.API.HDFS
  30. namespace java org.apache.hadoop.thriftfs.api
  31. namespace perl Hadoop.API.HDFS
  32. namespace php hadoop_api_hdfs
  33. namespace py hadoop.api.hdfs
  34. namespace rb Hadoop.API.HDFS
  35. /* Values for 'type' argument to getDatanodeReport(). */
  36. enum DatanodeReportType {
  37. ALL_DATANODES = 1;
  38. LIVE_DATANODES = 2;
  39. DEAD_DATANODES = 3;
  40. }
  41. /* Values for DatanodeInfo.state */
  42. enum DatanodeState {
  43. NORMAL_STATE = 1;
  44. DECOMMISSION_INPROGRESS = 2;
  45. DECOMMISSIONED = 3;
  46. }
  47. /* Value for unknown Thrift port in DatanodeInfo */
  48. const i32 UNKNOWN_THRIFT_PORT = -1;
  49. /* Values for setQuota() parameters. */
  50. const i64 QUOTA_DONT_SET = -2
  51. /* Clear quota on this path. */
  52. const i64 QUOTA_RESET = -1
  53. /**
  54. * Information and state of a data node.
  55. *
  56. * Modelled after org.apache.hadoop.hdfs.protocol.DatanodeInfo
  57. */
  58. struct DatanodeInfo {
  59. /** HDFS name of the datanode (equals to <host>:<datanode port>) */
  60. 1: string name,
  61. /** Unique ID within a HDFS cluster */
  62. 2: string storageID,
  63. /** Host name of the Thrift server socket. */
  64. 3: string host,
  65. /** Port number of the Thrift server socket, or UNKNOWN_THRIFT_PORT
  66. if the Thrift port for this datanode is not known. */
  67. 4: i32 thriftPort,
  68. /** Port number of the Web UI */
  69. 10: i32 httpPort,
  70. /** Raw capacity of the data node (in bytes). */
  71. 5: i64 capacity,
  72. /** Space used by the data node (in bytes). */
  73. 6: i64 dfsUsed,
  74. /** Raw free space in the data node (in bytes). */
  75. 7: i64 remaining,
  76. /** Number of active connections to the data node. */
  77. 8: i32 xceiverCount,
  78. /** State of this data node. */
  79. 9: DatanodeState state,
  80. /** Number of seconds since last contact */
  81. 11: i64 millisSinceUpdate,
  82. }
  83. /**
  84. * Representation of a file block in HDFS
  85. *
  86. * Modelled after org.apache.hadoop.hdfs.protocol.LocatedBlock
  87. */
  88. struct Block {
  89. /** Block ID (unique among all blocks in a filesystem). */
  90. 1: i64 blockId,
  91. /** Path of the file which this block belongs to. */
  92. 2: string path,
  93. /** Length of this block. */
  94. 3: i64 numBytes,
  95. /** Generational stamp of this block. */
  96. 4: i64 genStamp,
  97. /** List of data nodes with copies of this block. */
  98. 5: list<DatanodeInfo> nodes,
  99. /** Offset of the first byte of the block relative to the start of the file */
  100. 6: i64 startOffset,
  101. /** The serialized token associated with this block. */
  102. 7: string token,
  103. }
  104. /**
  105. * Information about a path in HDFS.
  106. *
  107. * Modelled after org.apache.hadoop.fs.FileStatus
  108. */
  109. struct Stat {
  110. /** The path. */
  111. 1: string path,
  112. /**
  113. * True: The path represents a file.
  114. * False: The path represents a directory.
  115. */
  116. 2: bool isDir,
  117. /* Fields common to file and directory paths. */
  118. /** Access time (milliseconds since 1970-01-01 00:00 UTC). */
  119. 3: i64 atime,
  120. /** Modification time (milliseconds since 1970-01-01 00:00 UTC). */
  121. 4: i64 mtime,
  122. /** Access permissions */
  123. 5: i16 perms,
  124. /** Owner */
  125. 6: string owner,
  126. /** Group */
  127. 7: string group,
  128. /* Fields for file paths (will be zero for directory entries). */
  129. /** Length (in bytes). */
  130. 13: i64 length,
  131. /** Block size (in bytes). */
  132. 14: i64 blockSize,
  133. /** Replication factor. */
  134. 15: i16 replication
  135. }
  136. /**
  137. * Information about an entire subtree under a directory
  138. * Includes the information from org.apache.hadoop.fs.ContentSummary
  139. */
  140. struct ContentSummary {
  141. /* Fields for directory paths (will be zero for file entries). */
  142. /** Number of files in this directory */
  143. 1: i64 fileCount,
  144. /** Number of directories in this directory */
  145. 2: i64 directoryCount,
  146. /** Quota for this directory (number of files). */
  147. 3: i64 quota,
  148. /** Space consumed in disk (in bytes). */
  149. 4: i64 spaceConsumed,
  150. /** Quota consumed in disk (in bytes). */
  151. 5: i64 spaceQuota,
  152. /** The path */
  153. 6: string path,
  154. }
  155. struct UpgradeStatusReport {
  156. 1: i32 version
  157. 2: i16 percentComplete
  158. 3: bool finalized
  159. /** The informative text that is the same as is shown on the NN web UI */
  160. 4: string statusText
  161. }
  162. /**
  163. * Information that mirrors the "health report" information available on the
  164. * NameNode web UI
  165. */
  166. struct DFSHealthReport {
  167. 1: i64 bytesTotal
  168. 2: i64 bytesUsed
  169. 3: i64 bytesRemaining
  170. 4: i64 bytesNonDfs
  171. /** How many datanodes are considered live */
  172. 5: i32 numLiveDataNodes
  173. /** How many datanodes are considered dead */
  174. 6: i32 numDeadDataNodes
  175. /**
  176. * Status of the current running upgrade. If no upgrade
  177. * is running, this will be null.
  178. */
  179. 7: UpgradeStatusReport upgradeStatus
  180. /**
  181. * The http port that the NameNode is listening on for its web UI
  182. * - this isn't really health, but it's related and handy
  183. */
  184. 8: i32 httpPort
  185. }
  186. struct ThriftHdfsDelegationToken {
  187. 1: binary delegationTokenBytes
  188. }
  189. /** Quota-related error */
  190. exception QuotaException {
  191. /** Error message. */
  192. 1: string msg,
  193. /** Textual representation of the call stack. */
  194. 2: string stack
  195. }
  196. /**
  197. * Provides an interface to a Hadoop Namenode. It is basically a Thrift
  198. * translation of org.apache.hadoop.hdfs.protocol.ClientProtocol.
  199. */
  200. service Namenode extends common.HadoopServiceBase {
  201. /** Set permissions of an existing file or directory. */
  202. void chmod(10: common.RequestContext ctx,
  203. /** Path of the file or directory. */
  204. 1: string path,
  205. /** New permissions for the file or directory. */
  206. 2: i16 perms) throws (1: common.IOException err),
  207. /**
  208. * Set owner of a file or directory.
  209. *
  210. * If either parameter 'owner' or 'group' is set to null, that
  211. * parameter is left unchanged.
  212. *
  213. * Parameters 'owner' and 'group' cannot be both null.
  214. */
  215. void chown(10: common.RequestContext ctx,
  216. /** Path to the file or directory */
  217. 1: string path,
  218. /** New owner. */
  219. 2: string owner,
  220. /** New group. */
  221. 3: string group) throws (1: common.IOException err),
  222. /**
  223. * Return a list containing:
  224. * (index 0) The total storage capacity of the file system (in bytes).
  225. * (index 1) The total used space of the file system (in bytes).
  226. * (index 2) The available storage of the file system (in bytes).
  227. */
  228. list<i64> df(10: common.RequestContext ctx),
  229. /**
  230. * Enter safe mode.
  231. */
  232. void enterSafeMode(10: common.RequestContext ctx) throws (1: common.IOException err),
  233. /** Get a list of all blocks containing a region of a file */
  234. list<Block> getBlocks(10: common.RequestContext ctx,
  235. /** Path to the file. */
  236. 1: string path,
  237. /** Offset of the region. */
  238. 2: i64 offset,
  239. /** Length of the region */
  240. 3: i64 length) throws (1: common.IOException err),
  241. /**
  242. * Get the preferred block size for the given file.
  243. *
  244. * The path must exist, or common.IOException is thrown.
  245. */
  246. i64 getPreferredBlockSize(10: common.RequestContext ctx,
  247. /** Path to the file. */
  248. 1: string path) throws (1: common.IOException err),
  249. /**
  250. * Returns whether HDFS is in safe mode or not.
  251. */
  252. bool isInSafeMode(10: common.RequestContext ctx) throws (1: common.IOException err),
  253. /**
  254. * Leave safe mode.
  255. */
  256. void leaveSafeMode(10: common.RequestContext ctx) throws (1: common.IOException err),
  257. /** Get a listing of the indicated directory. */
  258. list<Stat> ls(10: common.RequestContext ctx,
  259. /** Path to the directory. */
  260. 1: string path) throws (1: common.IOException err),
  261. /**
  262. * Create a directory (or hierarchy of directories).
  263. *
  264. * Returns false if directory did not exist and could not be created,
  265. * true otherwise.
  266. */
  267. bool mkdirhier(10: common.RequestContext ctx,
  268. /** Path to the directory. */
  269. 1: string path,
  270. /** Access permissions of the directory. */
  271. 2: i16 perms) throws (1: common.IOException err),
  272. /** Tells the name node to reread the hosts and exclude files. */
  273. void refreshNodes(10: common.RequestContext ctx) throws (1: common.IOException err),
  274. /**
  275. * Rename an item in the file system namespace.
  276. *
  277. * Returns true if successful, or
  278. * false if the old name does not exist or if the new name already
  279. * belongs to the namespace.
  280. */
  281. bool rename(10: common.RequestContext ctx,
  282. /** Path to existing file or directory. */
  283. 1: string path,
  284. /** New path. */
  285. 2: string newPath) throws (1: common.IOException err),
  286. /** Report corrupted blocks. */
  287. void reportBadBlocks(10: common.RequestContext ctx,
  288. /** List of corrupted blocks. */
  289. 1: list<Block> blocks) throws (1: common.IOException err),
  290. /**
  291. * Get information about a path in HDFS.
  292. *
  293. * Return value will be nul if path does not exist.
  294. */
  295. Stat stat(10: common.RequestContext ctx,
  296. /** Path of the file or directory. */
  297. 1: string path) throws (1: common.IOException err),
  298. /**
  299. * Get the summary of a directory's contents.
  300. *
  301. * Note that this has runtime linear in the total number of nodes
  302. * in the directory tree - this can be expensive for directories
  303. * near the top of a big HDFS. Use with care.
  304. */
  305. ContentSummary getContentSummary(10: common.RequestContext ctx,
  306. 1: string Path) throws (1: common.IOException err),
  307. /**
  308. * Get ContentSummary objects for multiple directories simultaneously. The same warnings
  309. * apply as for getContentSummary(...) above.
  310. */
  311. list<ContentSummary> multiGetContentSummary(10: common.RequestContext ctx,
  312. 1: list<string> paths) throws (1: common.IOException err),
  313. /**
  314. * Set the quota for a directory.
  315. *
  316. * Quota parameters may have three types of values:
  317. *
  318. * (1) 0 or more: Quota will be set to that value.
  319. * (2) QUOTA_DONT_SET: Quota will not be changed,
  320. * (3) QUOTA_RESET: Quota will be reset.
  321. *
  322. * Any other value is a runtime error.
  323. */
  324. void setQuota(10: common.RequestContext ctx,
  325. /** Path of the directory. */
  326. 1: string path,
  327. /** Limit on the number of names in the directory. */
  328. 2: i64 namespaceQuota,
  329. /**
  330. * Limit on disk space occupied by all the files in the
  331. * directory.
  332. */
  333. 3: i64 diskspaceQuota) throws (1: common.IOException err),
  334. /**
  335. * Set replication factor for an existing file.
  336. *
  337. * This call just updates the value of the replication factor. The actual
  338. * block replication is not expected to be performed during this method call.
  339. * The blocks will be populated or removed in the background as the result of
  340. * the routine block maintenance procedures.
  341. *
  342. * Returns true if successful, false if file does not exist or is a
  343. * directory.
  344. */
  345. bool setReplication(10: common.RequestContext ctx,
  346. /** Path of the file. */
  347. 1: string path,
  348. /** New replication factor. */
  349. 2: i16 replication) throws (1: common.IOException err),
  350. /**
  351. * Delete a file or directory from the file system.
  352. *
  353. * Any blocks belonging to the deleted files will be garbage-collected.
  354. */
  355. bool unlink(10: common.RequestContext ctx,
  356. /** Path of the file or directory. */
  357. 1: string path,
  358. /** Delete a non-empty directory recursively. */
  359. 2: bool recursive) throws (1: common.IOException err),
  360. /**
  361. * Sets the modification and access time of a file or directory.
  362. *
  363. * Setting *one single time paramater* to -1 means that time parameter
  364. * must not be set by this call.
  365. *
  366. * Setting *both time parameters* to -1 means both of them must be set to
  367. * the current time.
  368. */
  369. void utime(10: common.RequestContext ctx,
  370. /** Path of the file or directory. */
  371. 1: string path,
  372. /** Access time in milliseconds since 1970-01-01 00:00 UTC */
  373. 2: i64 atime,
  374. /** Modification time in milliseconds since 1970-01-01 00:00 UTC */
  375. 3: i64 mtime) throws (1: common.IOException err),
  376. /*
  377. * The following methods are meant to be called by datanodes to advertise
  378. * themselves to the namenode.
  379. */
  380. /**
  381. * Inform the namenode that a datanode process has started.
  382. */
  383. void datanodeUp(/** <host name>:<port number> of the datanode */
  384. 1: string name,
  385. /** the storage id of the datanode */
  386. 2: string storage,
  387. /** Thrift port of the datanode */
  388. 3: i32 thriftPort),
  389. /**
  390. * Inform the namenode that a datanode process has stopped.
  391. */
  392. void datanodeDown(/** <host name>:<port number> of the datanode */
  393. 1: string name,
  394. /** the storage id of the datanode */
  395. 2: string storage,
  396. /** Thrift port of the datanode */
  397. 3: i32 thriftPort),
  398. ThriftHdfsDelegationToken getDelegationToken(10:common.RequestContext ctx, 1:string renewer) throws(1: common.IOException err)
  399. }
  400. /** Encapsulates a block data transfer with its CRC */
  401. struct BlockData {
  402. /** CRC32 of the data being transfered */
  403. 1: i32 crc,
  404. /** Length of the data being transfered */
  405. 2: i32 length,
  406. /** The data itsef */
  407. 3: binary data
  408. }
  409. /**
  410. * Provides an interface to data nodes, so that clients may read and write
  411. * data blocks.
  412. */
  413. service Datanode {
  414. /**
  415. * Read bytes from a block.
  416. *
  417. * Only 2^31 - 1 bytes may be read on a single call to this method.
  418. */
  419. BlockData readBlock(10: common.RequestContext ctx,
  420. /** Block to be read from. */
  421. 1: Block block,
  422. /** Offset within the block where read must start from. */
  423. 2: i64 offset,
  424. /** Number of bytes to read. */
  425. 3: i32 length) throws (1:common.IOException err)
  426. }