hdfs.thrift 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. /*
  2. * Licensed to Cloudera, Inc. under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. Cloudera, Inc. licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /*
  19. * Thrift interface for HDFS.
  20. */
  21. /* Common types and interfaces */
  22. include 'common.thrift'
  23. /*
  24. * Namespaces for generated code. The idea is to keep code generated by
  25. * Thrift under a 'hadoop.api' namespace, so that a higher-level set of
  26. * functions and classes may be defined under 'hadoop'.
  27. */
  28. namespace cpp hadoop.api.hdfs
  29. namespace csharp Hadoop.API.HDFS
  30. namespace java org.apache.hadoop.thriftfs.api
  31. namespace perl Hadoop.API.HDFS
  32. namespace php hadoop_api_hdfs
  33. namespace py hadoop.api.hdfs
  34. namespace rb Hadoop.API.HDFS
  35. /* Values for 'type' argument to getDatanodeReport(). */
  36. enum DatanodeReportType {
  37. ALL_DATANODES = 1;
  38. LIVE_DATANODES = 2;
  39. DEAD_DATANODES = 3;
  40. }
  41. /* Values for DatanodeInfo.state */
  42. enum DatanodeState {
  43. NORMAL_STATE = 1;
  44. DECOMMISSION_INPROGRESS = 2;
  45. DECOMMISSIONED = 3;
  46. }
  47. /* Value for unknown Thrift port in DatanodeInfo */
  48. const i32 UNKNOWN_THRIFT_PORT = -1;
  49. /* Values for setQuota() parameters. */
  50. const i64 QUOTA_DONT_SET = -2
  51. /* Clear quota on this path. */
  52. const i64 QUOTA_RESET = -1
  53. /**
  54. * Information and state of a data node.
  55. *
  56. * Modelled after org.apache.hadoop.hdfs.protocol.DatanodeInfo
  57. */
  58. struct DatanodeInfo {
  59. /** HDFS name of the datanode (equals to <host>:<datanode port>) */
  60. 1: string name,
  61. /** Unique ID within a HDFS cluster */
  62. 2: string storageID,
  63. /** Host name of the Thrift server socket. */
  64. 3: string host,
  65. /** Port number of the Thrift server socket, or UNKNOWN_THRIFT_PORT
  66. if the Thrift port for this datanode is not known. */
  67. 4: i32 thriftPort,
  68. /** Port number of the Web UI */
  69. 10: i32 httpPort,
  70. /** Raw capacity of the data node (in bytes). */
  71. 5: i64 capacity,
  72. /** Space used by the data node (in bytes). */
  73. 6: i64 dfsUsed,
  74. /** Raw free space in the data node (in bytes). */
  75. 7: i64 remaining,
  76. /** Number of active connections to the data node. */
  77. 8: i32 xceiverCount,
  78. /** State of this data node. */
  79. 9: DatanodeState state,
  80. /** Number of seconds since last contact */
  81. 11: i64 millisSinceUpdate,
  82. }
  83. /**
  84. * Representation of a file block in HDFS
  85. *
  86. * Modelled after org.apache.hadoop.hdfs.protocol.LocatedBlock
  87. */
  88. struct Block {
  89. /** Block ID (unique among all blocks in a filesystem). */
  90. 1: i64 blockId,
  91. /** Path of the file which this block belongs to. */
  92. 2: string path,
  93. /** Length of this block. */
  94. 3: i64 numBytes,
  95. /** Generational stamp of this block. */
  96. 4: i64 genStamp,
  97. /** List of data nodes with copies of this block. */
  98. 5: list<DatanodeInfo> nodes,
  99. /** Offset of the first byte of the block relative to the start of the file */
  100. 6: i64 startOffset,
  101. /** The serialized token associated with this block. */
  102. 7: string token,
  103. }
  104. /**
  105. * Information about a path in HDFS.
  106. *
  107. * Modelled after org.apache.hadoop.fs.FileStatus
  108. */
  109. struct Stat {
  110. /** The path. */
  111. 1: string path,
  112. /**
  113. * True: The path represents a file.
  114. * False: The path represents a directory.
  115. */
  116. 2: bool isDir,
  117. /* Fields common to file and directory paths. */
  118. /** Access time (milliseconds since 1970-01-01 00:00 UTC). */
  119. 3: i64 atime,
  120. /** Modification time (milliseconds since 1970-01-01 00:00 UTC). */
  121. 4: i64 mtime,
  122. /** Access permissions */
  123. 5: i16 perms,
  124. /** Owner */
  125. 6: string owner,
  126. /** Group */
  127. 7: string group,
  128. /* Fields for file paths (will be zero for directory entries). */
  129. /** Length (in bytes). */
  130. 13: i64 length,
  131. /** Block size (in bytes). */
  132. 14: i64 blockSize,
  133. /** Replication factor. */
  134. 15: i16 replication
  135. }
  136. /**
  137. * Information about an entire subtree under a directory
  138. * Includes the information from org.apache.hadoop.fs.ContentSummary
  139. */
  140. struct ContentSummary {
  141. /* Fields for directory paths (will be zero for file entries). */
  142. /** Number of files in this directory */
  143. 1: i64 fileCount,
  144. /** Number of directories in this directory */
  145. 2: i64 directoryCount,
  146. /** Quota for this directory (number of files). */
  147. 3: i64 quota,
  148. /** Space consumed in disk (in bytes). */
  149. 4: i64 spaceConsumed,
  150. /** Quota consumed in disk (in bytes). */
  151. 5: i64 spaceQuota,
  152. /** The path */
  153. 6: string path,
  154. }
  155. struct UpgradeStatusReport {
  156. 1: i32 version
  157. 2: i16 percentComplete
  158. 3: bool finalized
  159. /** The informative text that is the same as is shown on the NN web UI */
  160. 4: string statusText
  161. }
  162. /**
  163. * Information that mirrors the "health report" information available on the
  164. * NameNode web UI
  165. */
  166. struct DFSHealthReport {
  167. 1: i64 bytesTotal
  168. 2: i64 bytesUsed
  169. 3: i64 bytesRemaining
  170. 4: i64 bytesNonDfs
  171. /** How many datanodes are considered live */
  172. 5: i32 numLiveDataNodes
  173. /** How many datanodes are considered dead */
  174. 6: i32 numDeadDataNodes
  175. /**
  176. * Status of the current running upgrade. If no upgrade
  177. * is running, this will be null.
  178. */
  179. 7: UpgradeStatusReport upgradeStatus
  180. /**
  181. * The http port that the NameNode is listening on for its web UI
  182. * - this isn't really health, but it's related and handy
  183. */
  184. 8: i32 httpPort
  185. }
  186. /** Quota-related error */
  187. exception QuotaException {
  188. /** Error message. */
  189. 1: string msg,
  190. /** Textual representation of the call stack. */
  191. 2: string stack
  192. }
  193. /**
  194. * Provides an interface to a Hadoop Namenode. It is basically a Thrift
  195. * translation of org.apache.hadoop.hdfs.protocol.ClientProtocol.
  196. */
  197. service Namenode extends common.HadoopServiceBase {
  198. /** Set permissions of an existing file or directory. */
  199. void chmod(10: common.RequestContext ctx,
  200. /** Path of the file or directory. */
  201. 1: string path,
  202. /** New permissions for the file or directory. */
  203. 2: i16 perms) throws (1: common.IOException err),
  204. /**
  205. * Set owner of a file or directory.
  206. *
  207. * If either parameter 'owner' or 'group' is set to null, that
  208. * parameter is left unchanged.
  209. *
  210. * Parameters 'owner' and 'group' cannot be both null.
  211. */
  212. void chown(10: common.RequestContext ctx,
  213. /** Path to the file or directory */
  214. 1: string path,
  215. /** New owner. */
  216. 2: string owner,
  217. /** New group. */
  218. 3: string group) throws (1: common.IOException err),
  219. /**
  220. * Return a list containing:
  221. * (index 0) The total storage capacity of the file system (in bytes).
  222. * (index 1) The total used space of the file system (in bytes).
  223. * (index 2) The available storage of the file system (in bytes).
  224. */
  225. list<i64> df(10: common.RequestContext ctx),
  226. /**
  227. * Enter safe mode.
  228. */
  229. void enterSafeMode(10: common.RequestContext ctx) throws (1: common.IOException err),
  230. /** Get a list of all blocks containing a region of a file */
  231. list<Block> getBlocks(10: common.RequestContext ctx,
  232. /** Path to the file. */
  233. 1: string path,
  234. /** Offset of the region. */
  235. 2: i64 offset,
  236. /** Length of the region */
  237. 3: i64 length) throws (1: common.IOException err),
  238. /**
  239. * Get the preferred block size for the given file.
  240. *
  241. * The path must exist, or common.IOException is thrown.
  242. */
  243. i64 getPreferredBlockSize(10: common.RequestContext ctx,
  244. /** Path to the file. */
  245. 1: string path) throws (1: common.IOException err),
  246. /**
  247. * Returns whether HDFS is in safe mode or not.
  248. */
  249. bool isInSafeMode(10: common.RequestContext ctx) throws (1: common.IOException err),
  250. /**
  251. * Leave safe mode.
  252. */
  253. void leaveSafeMode(10: common.RequestContext ctx) throws (1: common.IOException err),
  254. /** Get a listing of the indicated directory. */
  255. list<Stat> ls(10: common.RequestContext ctx,
  256. /** Path to the directory. */
  257. 1: string path) throws (1: common.IOException err),
  258. /**
  259. * Create a directory (or hierarchy of directories).
  260. *
  261. * Returns false if directory did not exist and could not be created,
  262. * true otherwise.
  263. */
  264. bool mkdirhier(10: common.RequestContext ctx,
  265. /** Path to the directory. */
  266. 1: string path,
  267. /** Access permissions of the directory. */
  268. 2: i16 perms) throws (1: common.IOException err),
  269. /** Tells the name node to reread the hosts and exclude files. */
  270. void refreshNodes(10: common.RequestContext ctx) throws (1: common.IOException err),
  271. /**
  272. * Rename an item in the file system namespace.
  273. *
  274. * Returns true if successful, or
  275. * false if the old name does not exist or if the new name already
  276. * belongs to the namespace.
  277. */
  278. bool rename(10: common.RequestContext ctx,
  279. /** Path to existing file or directory. */
  280. 1: string path,
  281. /** New path. */
  282. 2: string newPath) throws (1: common.IOException err),
  283. /** Report corrupted blocks. */
  284. void reportBadBlocks(10: common.RequestContext ctx,
  285. /** List of corrupted blocks. */
  286. 1: list<Block> blocks) throws (1: common.IOException err),
  287. /**
  288. * Get information about a path in HDFS.
  289. *
  290. * Return value will be nul if path does not exist.
  291. */
  292. Stat stat(10: common.RequestContext ctx,
  293. /** Path of the file or directory. */
  294. 1: string path) throws (1: common.IOException err),
  295. /**
  296. * Get the summary of a directory's contents.
  297. *
  298. * Note that this has runtime linear in the total number of nodes
  299. * in the directory tree - this can be expensive for directories
  300. * near the top of a big HDFS. Use with care.
  301. */
  302. ContentSummary getContentSummary(10: common.RequestContext ctx,
  303. 1: string Path) throws (1: common.IOException err),
  304. /**
  305. * Get ContentSummary objects for multiple directories simultaneously. The same warnings
  306. * apply as for getContentSummary(...) above.
  307. */
  308. list<ContentSummary> multiGetContentSummary(10: common.RequestContext ctx,
  309. 1: list<string> paths) throws (1: common.IOException err),
  310. /**
  311. * Set the quota for a directory.
  312. *
  313. * Quota parameters may have three types of values:
  314. *
  315. * (1) 0 or more: Quota will be set to that value.
  316. * (2) QUOTA_DONT_SET: Quota will not be changed,
  317. * (3) QUOTA_RESET: Quota will be reset.
  318. *
  319. * Any other value is a runtime error.
  320. */
  321. void setQuota(10: common.RequestContext ctx,
  322. /** Path of the directory. */
  323. 1: string path,
  324. /** Limit on the number of names in the directory. */
  325. 2: i64 namespaceQuota,
  326. /**
  327. * Limit on disk space occupied by all the files in the
  328. * directory.
  329. */
  330. 3: i64 diskspaceQuota) throws (1: common.IOException err),
  331. /**
  332. * Set replication factor for an existing file.
  333. *
  334. * This call just updates the value of the replication factor. The actual
  335. * block replication is not expected to be performed during this method call.
  336. * The blocks will be populated or removed in the background as the result of
  337. * the routine block maintenance procedures.
  338. *
  339. * Returns true if successful, false if file does not exist or is a
  340. * directory.
  341. */
  342. bool setReplication(10: common.RequestContext ctx,
  343. /** Path of the file. */
  344. 1: string path,
  345. /** New replication factor. */
  346. 2: i16 replication) throws (1: common.IOException err),
  347. /**
  348. * Delete a file or directory from the file system.
  349. *
  350. * Any blocks belonging to the deleted files will be garbage-collected.
  351. */
  352. bool unlink(10: common.RequestContext ctx,
  353. /** Path of the file or directory. */
  354. 1: string path,
  355. /** Delete a non-empty directory recursively. */
  356. 2: bool recursive) throws (1: common.IOException err),
  357. /**
  358. * Sets the modification and access time of a file or directory.
  359. *
  360. * Setting *one single time paramater* to -1 means that time parameter
  361. * must not be set by this call.
  362. *
  363. * Setting *both time parameters* to -1 means both of them must be set to
  364. * the current time.
  365. */
  366. void utime(10: common.RequestContext ctx,
  367. /** Path of the file or directory. */
  368. 1: string path,
  369. /** Access time in milliseconds since 1970-01-01 00:00 UTC */
  370. 2: i64 atime,
  371. /** Modification time in milliseconds since 1970-01-01 00:00 UTC */
  372. 3: i64 mtime) throws (1: common.IOException err),
  373. /*
  374. * The following methods are meant to be called by datanodes to advertise
  375. * themselves to the namenode.
  376. */
  377. /**
  378. * Inform the namenode that a datanode process has started.
  379. */
  380. void datanodeUp(/** <host name>:<port number> of the datanode */
  381. 1: string name,
  382. /** the storage id of the datanode */
  383. 2: string storage,
  384. /** Thrift port of the datanode */
  385. 3: i32 thriftPort),
  386. /**
  387. * Inform the namenode that a datanode process has stopped.
  388. */
  389. void datanodeDown(/** <host name>:<port number> of the datanode */
  390. 1: string name,
  391. /** the storage id of the datanode */
  392. 2: string storage,
  393. /** Thrift port of the datanode */
  394. 3: i32 thriftPort),
  395. /**
  396. * Get an HDFS delegation token.
  397. */
  398. common.ThriftDelegationToken getDelegationToken(10:common.RequestContext ctx, 1:string renewer) throws(1: common.IOException err)
  399. }
  400. /** Encapsulates a block data transfer with its CRC */
  401. struct BlockData {
  402. /** CRC32 of the data being transfered */
  403. 1: i32 crc,
  404. /** Length of the data being transfered */
  405. 2: i32 length,
  406. /** The data itsef */
  407. 3: binary data
  408. }
  409. /**
  410. * Provides an interface to data nodes, so that clients may read and write
  411. * data blocks.
  412. */
  413. service Datanode {
  414. /**
  415. * Read bytes from a block.
  416. *
  417. * Only 2^31 - 1 bytes may be read on a single call to this method.
  418. */
  419. BlockData readBlock(10: common.RequestContext ctx,
  420. /** Block to be read from. */
  421. 1: Block block,
  422. /** Offset within the block where read must start from. */
  423. 2: i64 offset,
  424. /** Number of bytes to read. */
  425. 3: i32 length) throws (1:common.IOException err)
  426. }