hive_metastore.thrift 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548
  1. #!/usr/local/bin/thrift -java
  2. /**
  3. * Licensed to the Apache Software Foundation (ASF) under one
  4. * or more contributor license agreements. See the NOTICE file
  5. * distributed with this work for additional information
  6. * regarding copyright ownership. The ASF licenses this file
  7. * to you under the Apache License, Version 2.0 (the
  8. * "License"); you may not use this file except in compliance
  9. * with the License. You may obtain a copy of the License at
  10. *
  11. * http://www.apache.org/licenses/LICENSE-2.0
  12. *
  13. * Unless required by applicable law or agreed to in writing, software
  14. * distributed under the License is distributed on an "AS IS" BASIS,
  15. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. * See the License for the specific language governing permissions and
  17. * limitations under the License.
  18. */
  19. #
  20. # Thrift Service that the MetaStore is built on
  21. #
  22. #include "share/fb303/if/fb303.thrift"
  23. include "fb303.thrift"
  24. namespace java org.apache.hadoop.hive.metastore.api
  25. namespace php metastore
  26. namespace cpp Apache.Hadoop.Hive
  27. const string DDL_TIME = "transient_lastDdlTime"
  28. struct Version {
  29. 1: string version,
  30. 2: string comments
  31. }
  32. struct FieldSchema {
  33. 1: string name, // name of the field
  34. 2: string type, // type of the field. primitive types defined above, specify list<TYPE_NAME>, map<TYPE_NAME, TYPE_NAME> for lists & maps
  35. 3: string comment
  36. }
  37. struct SQLPrimaryKey {
  38. 1: string table_db, // table schema
  39. 2: string table_name, // table name
  40. 3: string column_name, // column name
  41. 4: i32 key_seq, // sequence number within primary key
  42. 5: string pk_name, // primary key name
  43. 6: bool enable_cstr, // Enable/Disable
  44. 7: bool validate_cstr, // Validate/No validate
  45. 8: bool rely_cstr // Rely/No Rely
  46. }
  47. struct SQLForeignKey {
  48. 1: string pktable_db, // primary key table schema
  49. 2: string pktable_name, // primary key table name
  50. 3: string pkcolumn_name, // primary key column name
  51. 4: string fktable_db, // foreign key table schema
  52. 5: string fktable_name, // foreign key table name
  53. 6: string fkcolumn_name, // foreign key column name
  54. 7: i32 key_seq, // sequence within foreign key
  55. 8: i32 update_rule, // what happens to foreign key when parent key is updated
  56. 9: i32 delete_rule, // what happens to foreign key when parent key is deleted
  57. 10: string fk_name, // foreign key name
  58. 11: string pk_name, // primary key name
  59. 12: bool enable_cstr, // Enable/Disable
  60. 13: bool validate_cstr, // Validate/No validate
  61. 14: bool rely_cstr // Rely/No Rely
  62. }
  63. struct Type {
  64. 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types
  65. 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE)
  66. 3: optional string type2, // val type if the name is 'map' (MAP_TYPE)
  67. 4: optional list<FieldSchema> fields // if the name is one of the user defined types
  68. }
  69. enum HiveObjectType {
  70. GLOBAL = 1,
  71. DATABASE = 2,
  72. TABLE = 3,
  73. PARTITION = 4,
  74. COLUMN = 5,
  75. }
  76. enum PrincipalType {
  77. USER = 1,
  78. ROLE = 2,
  79. GROUP = 3,
  80. }
  81. const string HIVE_FILTER_FIELD_OWNER = "hive_filter_field_owner__"
  82. const string HIVE_FILTER_FIELD_PARAMS = "hive_filter_field_params__"
  83. const string HIVE_FILTER_FIELD_LAST_ACCESS = "hive_filter_field_last_access__"
  84. enum PartitionEventType {
  85. LOAD_DONE = 1,
  86. }
  87. // Enums for transaction and lock management
  88. enum TxnState {
  89. COMMITTED = 1,
  90. ABORTED = 2,
  91. OPEN = 3,
  92. }
  93. enum LockLevel {
  94. DB = 1,
  95. TABLE = 2,
  96. PARTITION = 3,
  97. }
  98. enum LockState {
  99. ACQUIRED = 1, // requester has the lock
  100. WAITING = 2, // requester is waiting for the lock and should call checklock at a later point to see if the lock has been obtained.
  101. ABORT = 3, // the lock has been aborted, most likely due to timeout
  102. NOT_ACQUIRED = 4, // returned only with lockNoWait, indicates the lock was not available and was not acquired
  103. }
  104. enum LockType {
  105. SHARED_READ = 1,
  106. SHARED_WRITE = 2,
  107. EXCLUSIVE = 3,
  108. }
  109. enum CompactionType {
  110. MINOR = 1,
  111. MAJOR = 2,
  112. }
  113. enum GrantRevokeType {
  114. GRANT = 1,
  115. REVOKE = 2,
  116. }
  117. enum DataOperationType {
  118. SELECT = 1,
  119. INSERT = 2
  120. UPDATE = 3,
  121. DELETE = 4,
  122. UNSET = 5,//this is the default to distinguish from NULL from old clients
  123. NO_TXN = 6,//drop table, insert overwrite, etc - something non-transactional
  124. }
  125. // Types of events the client can request that the metastore fire. For now just support DML operations, as the metastore knows
  126. // about DDL operations and there's no reason for the client to request such an event.
  127. enum EventRequestType {
  128. INSERT = 1,
  129. UPDATE = 2,
  130. DELETE = 3,
  131. }
  132. struct HiveObjectRef{
  133. 1: HiveObjectType objectType,
  134. 2: string dbName,
  135. 3: string objectName,
  136. 4: list<string> partValues,
  137. 5: string columnName,
  138. }
  139. struct PrivilegeGrantInfo {
  140. 1: string privilege,
  141. 2: i32 createTime,
  142. 3: string grantor,
  143. 4: PrincipalType grantorType,
  144. 5: bool grantOption,
  145. }
  146. struct HiveObjectPrivilege {
  147. 1: HiveObjectRef hiveObject,
  148. 2: string principalName,
  149. 3: PrincipalType principalType,
  150. 4: PrivilegeGrantInfo grantInfo,
  151. }
  152. struct PrivilegeBag {
  153. 1: list<HiveObjectPrivilege> privileges,
  154. }
  155. struct PrincipalPrivilegeSet {
  156. 1: map<string, list<PrivilegeGrantInfo>> userPrivileges, // user name -> privilege grant info
  157. 2: map<string, list<PrivilegeGrantInfo>> groupPrivileges, // group name -> privilege grant info
  158. 3: map<string, list<PrivilegeGrantInfo>> rolePrivileges, //role name -> privilege grant info
  159. }
  160. struct GrantRevokePrivilegeRequest {
  161. 1: GrantRevokeType requestType;
  162. 2: PrivilegeBag privileges;
  163. 3: optional bool revokeGrantOption; // Only for revoke request
  164. }
  165. struct GrantRevokePrivilegeResponse {
  166. 1: optional bool success;
  167. }
  168. struct Role {
  169. 1: string roleName,
  170. 2: i32 createTime,
  171. 3: string ownerName,
  172. }
  173. // Representation of a grant for a principal to a role
  174. struct RolePrincipalGrant {
  175. 1: string roleName,
  176. 2: string principalName,
  177. 3: PrincipalType principalType,
  178. 4: bool grantOption,
  179. 5: i32 grantTime,
  180. 6: string grantorName,
  181. 7: PrincipalType grantorPrincipalType
  182. }
  183. struct GetRoleGrantsForPrincipalRequest {
  184. 1: required string principal_name,
  185. 2: required PrincipalType principal_type
  186. }
  187. struct GetRoleGrantsForPrincipalResponse {
  188. 1: required list<RolePrincipalGrant> principalGrants;
  189. }
  190. struct GetPrincipalsInRoleRequest {
  191. 1: required string roleName;
  192. }
  193. struct GetPrincipalsInRoleResponse {
  194. 1: required list<RolePrincipalGrant> principalGrants;
  195. }
  196. struct GrantRevokeRoleRequest {
  197. 1: GrantRevokeType requestType;
  198. 2: string roleName;
  199. 3: string principalName;
  200. 4: PrincipalType principalType;
  201. 5: optional string grantor; // Needed for grant
  202. 6: optional PrincipalType grantorType; // Needed for grant
  203. 7: optional bool grantOption;
  204. }
  205. struct GrantRevokeRoleResponse {
  206. 1: optional bool success;
  207. }
  208. // namespace for tables
  209. struct Database {
  210. 1: string name,
  211. 2: string description,
  212. 3: string locationUri,
  213. 4: map<string, string> parameters, // properties associated with the database
  214. 5: optional PrincipalPrivilegeSet privileges,
  215. 6: optional string ownerName,
  216. 7: optional PrincipalType ownerType
  217. // field id is 9 to keep backwards compatibility when we rebase to Hive 3.0 which has
  218. // catalogname as field id 8
  219. 9: optional i32 createTime // creation time of database in seconds since epoch
  220. }
  221. // This object holds the information needed by SerDes
  222. struct SerDeInfo {
  223. 1: string name, // name of the serde, table name by default
  224. 2: string serializationLib, // usually the class that implements the extractor & loader
  225. 3: map<string, string> parameters // initialization parameters
  226. }
  227. // sort order of a column (column name along with asc(1)/desc(0))
  228. struct Order {
  229. 1: string col, // sort column name
  230. 2: i32 order // asc(1) or desc(0)
  231. }
  232. // this object holds all the information about skewed table
  233. struct SkewedInfo {
  234. 1: list<string> skewedColNames, // skewed column names
  235. 2: list<list<string>> skewedColValues, //skewed values
  236. 3: map<list<string>, string> skewedColValueLocationMaps, //skewed value to location mappings
  237. }
  238. // this object holds all the information about physical storage of the data belonging to a table
  239. struct StorageDescriptor {
  240. 1: list<FieldSchema> cols, // required (refer to types defined above)
  241. 2: string location, // defaults to <warehouse loc>/<db loc>/tablename
  242. 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format
  243. 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format
  244. 5: bool compressed, // compressed or not
  245. 6: i32 numBuckets, // this must be specified if there are any dimension columns
  246. 7: SerDeInfo serdeInfo, // serialization and deserialization information
  247. 8: list<string> bucketCols, // reducer grouping columns and clustering columns and bucketing columns`
  248. 9: list<Order> sortCols, // sort order of the data in each bucket
  249. 10: map<string, string> parameters, // any user supplied key value hash
  250. 11: optional SkewedInfo skewedInfo, // skewed information
  251. 12: optional bool storedAsSubDirectories // stored as subdirectories or not
  252. }
  253. // table information
  254. struct Table {
  255. 1: string tableName, // name of the table
  256. 2: string dbName, // database name ('default')
  257. 3: string owner, // owner of this table
  258. 4: i32 createTime, // creation time of the table
  259. 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on)
  260. 6: i32 retention, // retention time
  261. 7: StorageDescriptor sd, // storage descriptor of the table
  262. 8: list<FieldSchema> partitionKeys, // partition keys of the table. only primitive types are supported
  263. 9: map<string, string> parameters, // to store comments or any other user level parameters
  264. 10: string viewOriginalText, // original view text, null for non-view
  265. 11: string viewExpandedText, // expanded view text, null for non-view
  266. 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE
  267. 13: optional PrincipalPrivilegeSet privileges,
  268. 14: optional bool temporary=false,
  269. 15: optional PrincipalType ownerType = PrincipalType.USER // owner type of this table (default to USER for backward compatibility)
  270. }
  271. struct Partition {
  272. 1: list<string> values // string value is converted to appropriate partition key type
  273. 2: string dbName,
  274. 3: string tableName,
  275. 4: i32 createTime,
  276. 5: i32 lastAccessTime,
  277. 6: StorageDescriptor sd,
  278. 7: map<string, string> parameters,
  279. 8: optional PrincipalPrivilegeSet privileges
  280. }
  281. struct PartitionWithoutSD {
  282. 1: list<string> values // string value is converted to appropriate partition key type
  283. 2: i32 createTime,
  284. 3: i32 lastAccessTime,
  285. 4: string relativePath,
  286. 5: map<string, string> parameters,
  287. 6: optional PrincipalPrivilegeSet privileges
  288. }
  289. struct PartitionSpecWithSharedSD {
  290. 1: list<PartitionWithoutSD> partitions,
  291. 2: StorageDescriptor sd,
  292. }
  293. struct PartitionListComposingSpec {
  294. 1: list<Partition> partitions
  295. }
  296. struct PartitionSpec {
  297. 1: string dbName,
  298. 2: string tableName,
  299. 3: string rootPath,
  300. 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
  301. 5: optional PartitionListComposingSpec partitionList
  302. }
  303. struct Index {
  304. 1: string indexName, // unique with in the whole database namespace
  305. 2: string indexHandlerClass, // reserved
  306. 3: string dbName,
  307. 4: string origTableName,
  308. 5: i32 createTime,
  309. 6: i32 lastAccessTime,
  310. 7: string indexTableName,
  311. 8: StorageDescriptor sd,
  312. 9: map<string, string> parameters,
  313. 10: bool deferredRebuild
  314. }
  315. // column statistics
  316. struct BooleanColumnStatsData {
  317. 1: required i64 numTrues,
  318. 2: required i64 numFalses,
  319. 3: required i64 numNulls,
  320. 4: optional string bitVectors
  321. }
  322. struct DoubleColumnStatsData {
  323. 1: optional double lowValue,
  324. 2: optional double highValue,
  325. 3: required i64 numNulls,
  326. 4: required i64 numDVs,
  327. 5: optional string bitVectors
  328. }
  329. struct LongColumnStatsData {
  330. 1: optional i64 lowValue,
  331. 2: optional i64 highValue,
  332. 3: required i64 numNulls,
  333. 4: required i64 numDVs,
  334. 5: optional string bitVectors
  335. }
  336. struct StringColumnStatsData {
  337. 1: required i64 maxColLen,
  338. 2: required double avgColLen,
  339. 3: required i64 numNulls,
  340. 4: required i64 numDVs,
  341. 5: optional string bitVectors
  342. }
  343. struct BinaryColumnStatsData {
  344. 1: required i64 maxColLen,
  345. 2: required double avgColLen,
  346. 3: required i64 numNulls,
  347. 4: optional string bitVectors
  348. }
  349. struct Decimal {
  350. 1: required binary unscaled,
  351. 3: required i16 scale
  352. }
  353. struct DecimalColumnStatsData {
  354. 1: optional Decimal lowValue,
  355. 2: optional Decimal highValue,
  356. 3: required i64 numNulls,
  357. 4: required i64 numDVs,
  358. 5: optional string bitVectors
  359. }
  360. struct Date {
  361. 1: required i64 daysSinceEpoch
  362. }
  363. struct DateColumnStatsData {
  364. 1: optional Date lowValue,
  365. 2: optional Date highValue,
  366. 3: required i64 numNulls,
  367. 4: required i64 numDVs,
  368. 5: optional string bitVectors
  369. }
  370. union ColumnStatisticsData {
  371. 1: BooleanColumnStatsData booleanStats,
  372. 2: LongColumnStatsData longStats,
  373. 3: DoubleColumnStatsData doubleStats,
  374. 4: StringColumnStatsData stringStats,
  375. 5: BinaryColumnStatsData binaryStats,
  376. 6: DecimalColumnStatsData decimalStats,
  377. 7: DateColumnStatsData dateStats
  378. }
  379. struct ColumnStatisticsObj {
  380. 1: required string colName,
  381. 2: required string colType,
  382. 3: required ColumnStatisticsData statsData
  383. }
  384. struct ColumnStatisticsDesc {
  385. 1: required bool isTblLevel,
  386. 2: required string dbName,
  387. 3: required string tableName,
  388. 4: optional string partName,
  389. 5: optional i64 lastAnalyzed
  390. }
  391. struct ColumnStatistics {
  392. 1: required ColumnStatisticsDesc statsDesc,
  393. 2: required list<ColumnStatisticsObj> statsObj;
  394. }
  395. struct AggrStats {
  396. 1: required list<ColumnStatisticsObj> colStats,
  397. 2: required i64 partsFound // number of partitions for which stats were found
  398. }
  399. struct SetPartitionsStatsRequest {
  400. 1: required list<ColumnStatistics> colStats,
  401. 2: optional bool needMerge //stats need to be merged with the existing stats
  402. }
  403. // schema of the table/query results etc.
  404. struct Schema {
  405. // column names, types, comments
  406. 1: list<FieldSchema> fieldSchemas, // delimiters etc
  407. 2: map<string, string> properties
  408. }
  409. // Key-value store to be used with selected
  410. // Metastore APIs (create, alter methods).
  411. // The client can pass environment properties / configs that can be
  412. // accessed in hooks.
  413. struct EnvironmentContext {
  414. 1: map<string, string> properties
  415. }
  416. struct PrimaryKeysRequest {
  417. 1: required string db_name,
  418. 2: required string tbl_name
  419. }
  420. struct PrimaryKeysResponse {
  421. 1: required list<SQLPrimaryKey> primaryKeys
  422. }
  423. struct ForeignKeysRequest {
  424. 1: string parent_db_name,
  425. 2: string parent_tbl_name,
  426. 3: string foreign_db_name,
  427. 4: string foreign_tbl_name
  428. }
  429. struct ForeignKeysResponse {
  430. 1: required list<SQLForeignKey> foreignKeys
  431. }
  432. struct DropConstraintRequest {
  433. 1: required string dbname,
  434. 2: required string tablename,
  435. 3: required string constraintname
  436. }
  437. struct AddPrimaryKeyRequest {
  438. 1: required list<SQLPrimaryKey> primaryKeyCols
  439. }
  440. struct AddForeignKeyRequest {
  441. 1: required list<SQLForeignKey> foreignKeyCols
  442. }
  443. // Return type for get_partitions_by_expr
  444. struct PartitionsByExprResult {
  445. 1: required list<Partition> partitions,
  446. // Whether the results has any (currently, all) partitions which may or may not match
  447. 2: required bool hasUnknownPartitions
  448. }
  449. struct PartitionsByExprRequest {
  450. 1: required string dbName,
  451. 2: required string tblName,
  452. 3: required binary expr,
  453. 4: optional string defaultPartitionName,
  454. 5: optional i16 maxParts=-1
  455. }
  456. struct TableStatsResult {
  457. 1: required list<ColumnStatisticsObj> tableStats
  458. }
  459. struct PartitionsStatsResult {
  460. 1: required map<string, list<ColumnStatisticsObj>> partStats
  461. }
  462. struct TableStatsRequest {
  463. 1: required string dbName,
  464. 2: required string tblName,
  465. 3: required list<string> colNames
  466. }
  467. struct PartitionsStatsRequest {
  468. 1: required string dbName,
  469. 2: required string tblName,
  470. 3: required list<string> colNames,
  471. 4: required list<string> partNames
  472. }
  473. // Return type for add_partitions_req
  474. struct AddPartitionsResult {
  475. 1: optional list<Partition> partitions,
  476. }
  477. // Request type for add_partitions_req
  478. struct AddPartitionsRequest {
  479. 1: required string dbName,
  480. 2: required string tblName,
  481. 3: required list<Partition> parts,
  482. 4: required bool ifNotExists,
  483. 5: optional bool needResult=true
  484. }
  485. // Return type for drop_partitions_req
  486. struct DropPartitionsResult {
  487. 1: optional list<Partition> partitions,
  488. }
  489. struct DropPartitionsExpr {
  490. 1: required binary expr;
  491. 2: optional i32 partArchiveLevel;
  492. }
  493. union RequestPartsSpec {
  494. 1: list<string> names;
  495. 2: list<DropPartitionsExpr> exprs;
  496. }
  497. // Request type for drop_partitions_req
  498. // TODO: we might want to add "bestEffort" flag; where a subset can fail
  499. struct DropPartitionsRequest {
  500. 1: required string dbName,
  501. 2: required string tblName,
  502. 3: required RequestPartsSpec parts,
  503. 4: optional bool deleteData,
  504. 5: optional bool ifExists=true, // currently verified on client
  505. 6: optional bool ignoreProtection,
  506. 7: optional EnvironmentContext environmentContext,
  507. 8: optional bool needResult=true
  508. }
  509. enum FunctionType {
  510. JAVA = 1,
  511. }
  512. enum ResourceType {
  513. JAR = 1,
  514. FILE = 2,
  515. ARCHIVE = 3,
  516. }
  517. struct ResourceUri {
  518. 1: ResourceType resourceType,
  519. 2: string uri,
  520. }
  521. // User-defined function
  522. struct Function {
  523. 1: string functionName,
  524. 2: string dbName,
  525. 3: string className,
  526. 4: string ownerName,
  527. 5: PrincipalType ownerType,
  528. 6: i32 createTime,
  529. 7: FunctionType functionType,
  530. 8: list<ResourceUri> resourceUris,
  531. }
  532. // Structs for transaction and locks
  533. struct TxnInfo {
  534. 1: required i64 id,
  535. 2: required TxnState state,
  536. 3: required string user, // used in 'show transactions' to help admins find who has open transactions
  537. 4: required string hostname, // used in 'show transactions' to help admins find who has open transactions
  538. 5: optional string agentInfo = "Unknown",
  539. 6: optional i32 heartbeatCount=0,
  540. 7: optional string metaInfo,
  541. }
  542. struct GetOpenTxnsInfoResponse {
  543. 1: required i64 txn_high_water_mark,
  544. 2: required list<TxnInfo> open_txns,
  545. }
  546. struct GetOpenTxnsResponse {
  547. 1: required i64 txn_high_water_mark,
  548. 2: required set<i64> open_txns,
  549. 3: optional i64 min_open_txn, //since 1.3,2.2
  550. }
  551. struct OpenTxnRequest {
  552. 1: required i32 num_txns,
  553. 2: required string user,
  554. 3: required string hostname,
  555. 4: optional string agentInfo = "Unknown",
  556. }
  557. struct OpenTxnsResponse {
  558. 1: required list<i64> txn_ids,
  559. }
  560. struct AbortTxnRequest {
  561. 1: required i64 txnid,
  562. }
  563. struct AbortTxnsRequest {
  564. 1: required list<i64> txn_ids,
  565. }
  566. struct CommitTxnRequest {
  567. 1: required i64 txnid,
  568. }
  569. struct LockComponent {
  570. 1: required LockType type,
  571. 2: required LockLevel level,
  572. 3: required string dbname,
  573. 4: optional string tablename,
  574. 5: optional string partitionname,
  575. 6: optional DataOperationType operationType = DataOperationType.UNSET,
  576. 7: optional bool isAcid = false
  577. }
  578. struct LockRequest {
  579. 1: required list<LockComponent> component,
  580. 2: optional i64 txnid,
  581. 3: required string user, // used in 'show locks' to help admins find who has open locks
  582. 4: required string hostname, // used in 'show locks' to help admins find who has open locks
  583. 5: optional string agentInfo = "Unknown",
  584. }
  585. struct LockResponse {
  586. 1: required i64 lockid,
  587. 2: required LockState state,
  588. }
  589. struct CheckLockRequest {
  590. 1: required i64 lockid,
  591. 2: optional i64 txnid,
  592. 3: optional i64 elapsed_ms,
  593. }
  594. struct UnlockRequest {
  595. 1: required i64 lockid,
  596. }
  597. struct ShowLocksRequest {
  598. 1: optional string dbname,
  599. 2: optional string tablename,
  600. 3: optional string partname,
  601. 4: optional bool isExtended=false,
  602. }
  603. struct ShowLocksResponseElement {
  604. 1: required i64 lockid,
  605. 2: required string dbname,
  606. 3: optional string tablename,
  607. 4: optional string partname,
  608. 5: required LockState state,
  609. 6: required LockType type,
  610. 7: optional i64 txnid,
  611. 8: required i64 lastheartbeat,
  612. 9: optional i64 acquiredat,
  613. 10: required string user,
  614. 11: required string hostname,
  615. 12: optional i32 heartbeatCount = 0,
  616. 13: optional string agentInfo,
  617. 14: optional i64 blockedByExtId,
  618. 15: optional i64 blockedByIntId,
  619. 16: optional i64 lockIdInternal,
  620. }
  621. struct ShowLocksResponse {
  622. 1: list<ShowLocksResponseElement> locks,
  623. }
  624. struct HeartbeatRequest {
  625. 1: optional i64 lockid,
  626. 2: optional i64 txnid
  627. }
  628. struct HeartbeatTxnRangeRequest {
  629. 1: required i64 min,
  630. 2: required i64 max
  631. }
  632. struct HeartbeatTxnRangeResponse {
  633. 1: required set<i64> aborted,
  634. 2: required set<i64> nosuch
  635. }
  636. struct CompactionRequest {
  637. 1: required string dbname,
  638. 2: required string tablename,
  639. 3: optional string partitionname,
  640. 4: required CompactionType type,
  641. 5: optional string runas,
  642. 6: optional map<string, string> properties
  643. }
  644. struct ShowCompactRequest {
  645. }
  646. struct ShowCompactResponseElement {
  647. 1: required string dbname,
  648. 2: required string tablename,
  649. 3: optional string partitionname,
  650. 4: required CompactionType type,
  651. 5: required string state,
  652. 6: optional string workerid,
  653. 7: optional i64 start,
  654. 8: optional string runAs,
  655. 9: optional i64 hightestTxnId, // Highest Txn ID handled by this compaction
  656. 10: optional string metaInfo,
  657. 11: optional i64 endTime,
  658. 12: optional string hadoopJobId = "None",
  659. }
  660. struct ShowCompactResponse {
  661. 1: required list<ShowCompactResponseElement> compacts,
  662. }
  663. struct AddDynamicPartitions {
  664. 1: required i64 txnid,
  665. 2: required string dbname,
  666. 3: required string tablename,
  667. 4: required list<string> partitionnames,
  668. 5: optional DataOperationType operationType = DataOperationType.UNSET
  669. }
  670. struct NotificationEventRequest {
  671. 1: required i64 lastEvent,
  672. 2: optional i32 maxEvents,
  673. }
  674. struct NotificationEvent {
  675. 1: required i64 eventId,
  676. 2: required i32 eventTime,
  677. 3: required string eventType,
  678. 4: optional string dbName,
  679. 5: optional string tableName,
  680. 6: required string message,
  681. }
  682. struct NotificationEventResponse {
  683. 1: required list<NotificationEvent> events,
  684. }
  685. struct CurrentNotificationEventId {
  686. 1: required i64 eventId,
  687. }
  688. struct InsertEventRequestData {
  689. 1: required list<string> filesAdded,
  690. 2: optional bool replace
  691. }
  692. union FireEventRequestData {
  693. 1: InsertEventRequestData insertData
  694. }
  695. struct FireEventRequest {
  696. 1: required bool successful,
  697. 2: required FireEventRequestData data
  698. // dbname, tablename, and partition vals are included as optional in the top level event rather than placed in each type of
  699. // subevent as I assume they'll be used across most event types.
  700. 3: optional string dbName,
  701. 4: optional string tableName,
  702. 5: optional list<string> partitionVals,
  703. }
  704. struct FireEventResponse {
  705. // NOP for now, this is just a place holder for future responses
  706. }
  707. struct MetadataPpdResult {
  708. 1: optional binary metadata,
  709. 2: optional binary includeBitset
  710. }
  711. // Return type for get_file_metadata_by_expr
  712. struct GetFileMetadataByExprResult {
  713. 1: required map<i64, MetadataPpdResult> metadata,
  714. 2: required bool isSupported
  715. }
  716. enum FileMetadataExprType {
  717. ORC_SARG = 1
  718. }
  719. // Request type for get_file_metadata_by_expr
  720. struct GetFileMetadataByExprRequest {
  721. 1: required list<i64> fileIds,
  722. 2: required binary expr,
  723. 3: optional bool doGetFooters,
  724. 4: optional FileMetadataExprType type
  725. }
  726. // Return type for get_file_metadata
  727. struct GetFileMetadataResult {
  728. 1: required map<i64, binary> metadata,
  729. 2: required bool isSupported
  730. }
  731. // Request type for get_file_metadata
  732. struct GetFileMetadataRequest {
  733. 1: required list<i64> fileIds
  734. }
  735. // Return type for put_file_metadata
  736. struct PutFileMetadataResult {
  737. }
  738. // Request type for put_file_metadata
  739. struct PutFileMetadataRequest {
  740. 1: required list<i64> fileIds,
  741. 2: required list<binary> metadata,
  742. 3: optional FileMetadataExprType type
  743. }
  744. // Return type for clear_file_metadata
  745. struct ClearFileMetadataResult {
  746. }
  747. // Request type for clear_file_metadata
  748. struct ClearFileMetadataRequest {
  749. 1: required list<i64> fileIds
  750. }
  751. // Return type for cache_file_metadata
  752. struct CacheFileMetadataResult {
  753. 1: required bool isSupported
  754. }
  755. // Request type for cache_file_metadata
  756. struct CacheFileMetadataRequest {
  757. 1: required string dbName,
  758. 2: required string tblName,
  759. 3: optional string partName,
  760. 4: optional bool isAllParts
  761. }
  762. struct GetAllFunctionsResponse {
  763. 1: optional list<Function> functions
  764. }
  765. struct TableMeta {
  766. 1: required string dbName;
  767. 2: required string tableName;
  768. 3: required string tableType;
  769. 4: optional string comments;
  770. }
  771. exception MetaException {
  772. 1: string message
  773. }
  774. exception UnknownTableException {
  775. 1: string message
  776. }
  777. exception UnknownDBException {
  778. 1: string message
  779. }
  780. exception AlreadyExistsException {
  781. 1: string message
  782. }
  783. exception InvalidPartitionException {
  784. 1: string message
  785. }
  786. exception UnknownPartitionException {
  787. 1: string message
  788. }
  789. exception InvalidObjectException {
  790. 1: string message
  791. }
  792. exception NoSuchObjectException {
  793. 1: string message
  794. }
  795. exception IndexAlreadyExistsException {
  796. 1: string message
  797. }
  798. exception InvalidOperationException {
  799. 1: string message
  800. }
  801. exception ConfigValSecurityException {
  802. 1: string message
  803. }
  804. exception InvalidInputException {
  805. 1: string message
  806. }
  807. // Transaction and lock exceptions
  808. exception NoSuchTxnException {
  809. 1: string message
  810. }
  811. exception TxnAbortedException {
  812. 1: string message
  813. }
  814. exception TxnOpenException {
  815. 1: string message
  816. }
  817. exception NoSuchLockException {
  818. 1: string message
  819. }
  820. /*
  821. * Generic Partition request API, providing different kinds of filtering and controlling output.
  822. *
  823. * The API entry point is get_partitions_with_specs(), which is based on a single
  824. * request/response object model.
  825. *
  826. * The request (GetPartitionsRequest) defines any filtering that should be done for partitions
  827. * as well as the list of fields that should be returned (this is called ProjectionSpec).
  828. * Projection is simply a list of dot separated strings which represent the fields which should
  829. * be returned. Projection may also include whitelist or blacklist of parameters to include in
  830. * the partition. When both blacklist and whitelist are present, the blacklist supersedes the
  831. * whitelist in case of conflicts.
  832. *
  833. * Partition filter spec is the generalization of various types of partition filtering.
  834. * Partitions can be filtered by names, by values or by partition expressions.
  835. */
  836. struct GetPartitionsProjectionSpec {
  837. // fieldList is a list of dot separated strings which represent the fields which must be returned.
  838. // Any other field which is not in the fieldList may be unset in the returned partitions (it
  839. // is up to the implementation to decide whether it chooses to include or exclude such fields).
  840. // E.g. setting the field list to sd.location, serdeInfo.name, sd.cols.name, sd.cols.type will
  841. // return partitions which will have location field set in the storage descriptor. Also the serdeInfo
  842. // in the returned storage descriptor will only have name field set. This applies to multi-valued
  843. // fields as well like sd.cols, so in the example above only name and type fields will be set for sd.cols.
  844. // If the fieldList is empty or not present, all the fields will be set
  845. 1: list<string> fieldList;
  846. // SQL-92 compliant regex pattern for param keys to be included
  847. // _ or % wildcards are supported. '_' represent one character and '%' represents 0 or more characters
  848. 2: string includeParamKeyPattern;
  849. // SQL-92 compliant regex pattern for param keys to be excluded
  850. // _ or % wildcards are supported. '_' represent one character and '%' represents 0 or more characters
  851. 3: string excludeParamKeyPattern;
  852. }
  853. enum PartitionFilterMode {
  854. BY_NAMES, // filter by names
  855. BY_VALUES, // filter by values
  856. BY_EXPR // filter by expression
  857. }
  858. struct GetPartitionsFilterSpec {
  859. 7: optional PartitionFilterMode filterMode,
  860. 8: optional list<string> filters //used as list of partitionNames or list of values or expressions depending on mode
  861. }
  862. struct GetPartitionsResponse {
  863. 1: list<PartitionSpec> partitionSpec
  864. }
  865. struct GetPartitionsRequest {
  866. 1: optional string catName,
  867. 2: string dbName,
  868. 3: string tblName,
  869. 4: optional bool withAuth,
  870. 5: optional string user,
  871. 6: optional list<string> groupNames,
  872. 7: GetPartitionsProjectionSpec projectionSpec
  873. 8: GetPartitionsFilterSpec filterSpec // TODO not yet implemented. Must be present but ignored
  874. }
  875. /**
  876. * This interface is live.
  877. */
  878. service ThriftHiveMetastore extends fb303.FacebookService
  879. {
  880. string getMetaConf(1:string key) throws(1:MetaException o1)
  881. void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1)
  882. void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
  883. Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2)
  884. void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
  885. list<string> get_databases(1:string pattern) throws(1:MetaException o1)
  886. list<string> get_all_databases() throws(1:MetaException o1)
  887. void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2)
  888. // returns the type with given name (make seperate calls for the dependent types if needed)
  889. Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2)
  890. bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
  891. bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2)
  892. map<string, Type> get_type_all(1:string name)
  893. throws(1:MetaException o2)
  894. // Gets a list of FieldSchemas describing the columns of a particular table
  895. list<FieldSchema> get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3),
  896. list<FieldSchema> get_fields_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
  897. // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table
  898. list<FieldSchema> get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
  899. list<FieldSchema> get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
  900. // create a Hive table. Following fields must be set
  901. // tableName
  902. // database (only 'default' for now until Hive QL supports databases)
  903. // owner (not needed, but good to have for tracking purposes)
  904. // sd.cols (list of field schemas)
  905. // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat)
  906. // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat)
  907. // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe
  908. // * See notes on DDL_TIME
  909. void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4)
  910. void create_table_with_environment_context(1:Table tbl,
  911. 2:EnvironmentContext environment_context)
  912. throws (1:AlreadyExistsException o1,
  913. 2:InvalidObjectException o2, 3:MetaException o3,
  914. 4:NoSuchObjectException o4)
  915. void create_table_with_constraints(1:Table tbl, 2: list<SQLPrimaryKey> primaryKeys, 3: list<SQLForeignKey> foreignKeys)
  916. throws (1:AlreadyExistsException o1,
  917. 2:InvalidObjectException o2, 3:MetaException o3,
  918. 4:NoSuchObjectException o4)
  919. void drop_constraint(1:DropConstraintRequest req)
  920. throws(1:NoSuchObjectException o1, 2:MetaException o3)
  921. void add_primary_key(1:AddPrimaryKeyRequest req)
  922. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  923. void add_foreign_key(1:AddForeignKeyRequest req)
  924. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  925. // drops the table and all the partitions associated with it if the table has partitions
  926. // delete data (including partitions) if deleteData is set to true
  927. void drop_table(1:string dbname, 2:string name, 3:bool deleteData)
  928. throws(1:NoSuchObjectException o1, 2:MetaException o3)
  929. void drop_table_with_environment_context(1:string dbname, 2:string name, 3:bool deleteData,
  930. 4:EnvironmentContext environment_context)
  931. throws(1:NoSuchObjectException o1, 2:MetaException o3)
  932. list<string> get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1)
  933. list<TableMeta> get_table_meta(1: string db_patterns, 2: string tbl_patterns, 3: list<string> tbl_types)
  934. throws (1: MetaException o1)
  935. list<string> get_all_tables(1: string db_name) throws (1: MetaException o1)
  936. Table get_table(1:string dbname, 2:string tbl_name)
  937. throws (1:MetaException o1, 2:NoSuchObjectException o2)
  938. list<Table> get_table_objects_by_name(1:string dbname, 2:list<string> tbl_names)
  939. throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
  940. // Get a list of table names that match a filter.
  941. // The filter operators are LIKE, <, <=, >, >=, =, <>
  942. //
  943. // In the filter statement, values interpreted as strings must be enclosed in quotes,
  944. // while values interpreted as integers should not be. Strings and integers are the only
  945. // supported value types.
  946. //
  947. // The currently supported key names in the filter are:
  948. // Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
  949. // and supports all filter operators
  950. // Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
  951. // and supports all filter operators except LIKE
  952. // Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
  953. // and only supports the filter operators = and <>.
  954. // Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
  955. // For example, to filter on parameter keys called "retention", the key name in the filter
  956. // statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
  957. // Also, = and <> only work for keys that exist
  958. // in the tables. E.g., if you are looking for tables where key1 <> value, it will only
  959. // look at tables that have a value for the parameter key1.
  960. // Some example filter statements include:
  961. // filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
  962. // Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
  963. // filter = Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
  964. // Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\""
  965. // @param dbName
  966. // The name of the database from which you will retrieve the table names
  967. // @param filterType
  968. // The type of filter
  969. // @param filter
  970. // The filter string
  971. // @param max_tables
  972. // The maximum number of tables returned
  973. // @return A list of table names that match the desired filter
  974. list<string> get_table_names_by_filter(1:string dbname, 2:string filter, 3:i16 max_tables=-1)
  975. throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
  976. // alter table applies to only future partitions not for existing partitions
  977. // * See notes on DDL_TIME
  978. void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl)
  979. throws (1:InvalidOperationException o1, 2:MetaException o2)
  980. void alter_table_with_environment_context(1:string dbname, 2:string tbl_name,
  981. 3:Table new_tbl, 4:EnvironmentContext environment_context)
  982. throws (1:InvalidOperationException o1, 2:MetaException o2)
  983. // alter table not only applies to future partitions but also cascade to existing partitions
  984. void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade)
  985. throws (1:InvalidOperationException o1, 2:MetaException o2)
  986. // the following applies to only tables that have partitions
  987. // * See notes on DDL_TIME
  988. Partition add_partition(1:Partition new_part)
  989. throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  990. Partition add_partition_with_environment_context(1:Partition new_part,
  991. 2:EnvironmentContext environment_context)
  992. throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2,
  993. 3:MetaException o3)
  994. i32 add_partitions(1:list<Partition> new_parts)
  995. throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  996. i32 add_partitions_pspec(1:list<PartitionSpec> new_parts)
  997. throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  998. Partition append_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
  999. throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  1000. AddPartitionsResult add_partitions_req(1:AddPartitionsRequest request)
  1001. throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  1002. Partition append_partition_with_environment_context(1:string db_name, 2:string tbl_name,
  1003. 3:list<string> part_vals, 4:EnvironmentContext environment_context)
  1004. throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  1005. Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name)
  1006. throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  1007. Partition append_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
  1008. 3:string part_name, 4:EnvironmentContext environment_context)
  1009. throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  1010. bool drop_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:bool deleteData)
  1011. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1012. bool drop_partition_with_environment_context(1:string db_name, 2:string tbl_name,
  1013. 3:list<string> part_vals, 4:bool deleteData, 5:EnvironmentContext environment_context)
  1014. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1015. bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData)
  1016. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1017. bool drop_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
  1018. 3:string part_name, 4:bool deleteData, 5:EnvironmentContext environment_context)
  1019. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1020. DropPartitionsResult drop_partitions_req(1: DropPartitionsRequest req)
  1021. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1022. Partition get_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
  1023. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1024. Partition exchange_partition(1:map<string, string> partitionSpecs, 2:string source_db,
  1025. 3:string source_table_name, 4:string dest_db, 5:string dest_table_name)
  1026. throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
  1027. 4:InvalidInputException o4)
  1028. list<Partition> exchange_partitions(1:map<string, string> partitionSpecs, 2:string source_db,
  1029. 3:string source_table_name, 4:string dest_db, 5:string dest_table_name)
  1030. throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
  1031. 4:InvalidInputException o4)
  1032. Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals,
  1033. 4: string user_name, 5: list<string> group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1034. Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name)
  1035. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1036. // returns all the partitions for this table in reverse chronological order.
  1037. // If max parts is given then it will return only that many.
  1038. list<Partition> get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
  1039. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1040. list<Partition> get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1,
  1041. 4: string user_name, 5: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1042. list<PartitionSpec> get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1)
  1043. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1044. list<string> get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
  1045. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1046. // get_partition*_ps methods allow filtering by a partial partition specification,
  1047. // as needed for dynamic partitions. The values that are not restricted should
  1048. // be empty strings. Nulls were considered (instead of "") but caused errors in
  1049. // generated Python code. The size of part_vals may be smaller than the
  1050. // number of partition columns - the unspecified values are considered the same
  1051. // as "".
  1052. list<Partition> get_partitions_ps(1:string db_name 2:string tbl_name
  1053. 3:list<string> part_vals, 4:i16 max_parts=-1)
  1054. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1055. list<Partition> get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1,
  1056. 5: string user_name, 6: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1057. list<string> get_partition_names_ps(1:string db_name,
  1058. 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1)
  1059. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1060. // get the partitions matching the given partition filter
  1061. list<Partition> get_partitions_by_filter(1:string db_name 2:string tbl_name
  1062. 3:string filter, 4:i16 max_parts=-1)
  1063. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1064. // List partitions as PartitionSpec instances.
  1065. list<PartitionSpec> get_part_specs_by_filter(1:string db_name 2:string tbl_name
  1066. 3:string filter, 4:i32 max_parts=-1)
  1067. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1068. // get the partitions matching the given partition filter
  1069. // unlike get_partitions_by_filter, takes serialized hive expression, and with that can work
  1070. // with any filter (get_partitions_by_filter only works if the filter can be pushed down to JDOQL.
  1071. PartitionsByExprResult get_partitions_by_expr(1:PartitionsByExprRequest req)
  1072. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1073. // get the partitions matching the given partition filter
  1074. i32 get_num_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter)
  1075. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1076. // get partitions give a list of partition names
  1077. list<Partition> get_partitions_by_names(1:string db_name 2:string tbl_name 3:list<string> names)
  1078. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1079. // changes the partition to the new partition object. partition is identified from the part values
  1080. // in the new_part
  1081. // * See notes on DDL_TIME
  1082. void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part)
  1083. throws (1:InvalidOperationException o1, 2:MetaException o2)
  1084. // change a list of partitions. All partitions are altered atomically and all
  1085. // prehooks are fired together followed by all post hooks
  1086. void alter_partitions(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts)
  1087. throws (1:InvalidOperationException o1, 2:MetaException o2)
  1088. void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2)
  1089. void alter_partition_with_environment_context(1:string db_name,
  1090. 2:string tbl_name, 3:Partition new_part,
  1091. 4:EnvironmentContext environment_context)
  1092. throws (1:InvalidOperationException o1, 2:MetaException o2)
  1093. // rename the old partition to the new partition object by changing old part values to the part values
  1094. // in the new_part. old partition is identified from part_vals.
  1095. // partition keys in new_part should be the same as those in old partition.
  1096. void rename_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:Partition new_part)
  1097. throws (1:InvalidOperationException o1, 2:MetaException o2)
  1098. // returns whether or not the partition name is valid based on the value of the config
  1099. // hive.metastore.partition.name.whitelist.pattern
  1100. bool partition_name_has_valid_characters(1:list<string> part_vals, 2:bool throw_exception)
  1101. throws(1: MetaException o1)
  1102. // gets the value of the configuration key in the metastore server. returns
  1103. // defaultValue if the key does not exist. if the configuration key does not
  1104. // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is
  1105. // thrown.
  1106. string get_config_value(1:string name, 2:string defaultValue)
  1107. throws(1:ConfigValSecurityException o1)
  1108. // converts a partition name into a partition values array
  1109. list<string> partition_name_to_vals(1: string part_name)
  1110. throws(1: MetaException o1)
  1111. // converts a partition name into a partition specification (a mapping from
  1112. // the partition cols to the values)
  1113. map<string, string> partition_name_to_spec(1: string part_name)
  1114. throws(1: MetaException o1)
  1115. void markPartitionForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
  1116. 4:PartitionEventType eventType) throws (1: MetaException o1, 2: NoSuchObjectException o2,
  1117. 3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
  1118. 6: InvalidPartitionException o6)
  1119. bool isPartitionMarkedForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
  1120. 4: PartitionEventType eventType) throws (1: MetaException o1, 2:NoSuchObjectException o2,
  1121. 3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
  1122. 6: InvalidPartitionException o6)
  1123. //index
  1124. Index add_index(1:Index new_index, 2: Table index_table)
  1125. throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  1126. void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx)
  1127. throws (1:InvalidOperationException o1, 2:MetaException o2)
  1128. bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData)
  1129. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1130. Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name)
  1131. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1132. list<Index> get_indexes(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1)
  1133. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  1134. list<string> get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1)
  1135. throws(1:MetaException o2)
  1136. //primary keys and foreign keys
  1137. PrimaryKeysResponse get_primary_keys(1:PrimaryKeysRequest request)
  1138. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1139. ForeignKeysResponse get_foreign_keys(1:ForeignKeysRequest request)
  1140. throws(1:MetaException o1, 2:NoSuchObjectException o2)
  1141. // column statistics interfaces
  1142. // update APIs persist the column statistics object(s) that are passed in. If statistics already
  1143. // exists for one or more columns, the existing statistics will be overwritten. The update APIs
  1144. // validate that the dbName, tableName, partName, colName[] passed in as part of the ColumnStatistics
  1145. // struct are valid, throws InvalidInputException/NoSuchObjectException if found to be invalid
  1146. bool update_table_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1,
  1147. 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
  1148. bool update_partition_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1,
  1149. 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
  1150. // get APIs return the column statistics corresponding to db_name, tbl_name, [part_name], col_name if
  1151. // such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException
  1152. // For instance, if get_table_column_statistics is called on a partitioned table for which only
  1153. // partition level column stats exist, get_table_column_statistics will throw NoSuchObjectException
  1154. ColumnStatistics get_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
  1155. (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidInputException o3, 4:InvalidObjectException o4)
  1156. ColumnStatistics get_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name,
  1157. 4:string col_name) throws (1:NoSuchObjectException o1, 2:MetaException o2,
  1158. 3:InvalidInputException o3, 4:InvalidObjectException o4)
  1159. TableStatsResult get_table_statistics_req(1:TableStatsRequest request) throws
  1160. (1:NoSuchObjectException o1, 2:MetaException o2)
  1161. PartitionsStatsResult get_partitions_statistics_req(1:PartitionsStatsRequest request) throws
  1162. (1:NoSuchObjectException o1, 2:MetaException o2)
  1163. AggrStats get_aggr_stats_for(1:PartitionsStatsRequest request) throws
  1164. (1:NoSuchObjectException o1, 2:MetaException o2)
  1165. bool set_aggr_stats_for(1:SetPartitionsStatsRequest request) throws
  1166. (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
  1167. // delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name]
  1168. // and col_name. If the delete API doesn't find the statistics record in the metastore, throws NoSuchObjectException
  1169. // Delete API validates the input and if the input is invalid throws InvalidInputException/InvalidObjectException.
  1170. bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name) throws
  1171. (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
  1172. 4:InvalidInputException o4)
  1173. bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
  1174. (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
  1175. 4:InvalidInputException o4)
  1176. //
  1177. // user-defined functions
  1178. //
  1179. void create_function(1:Function func)
  1180. throws (1:AlreadyExistsException o1,
  1181. 2:InvalidObjectException o2,
  1182. 3:MetaException o3,
  1183. 4:NoSuchObjectException o4)
  1184. void drop_function(1:string dbName, 2:string funcName)
  1185. throws (1:NoSuchObjectException o1, 2:MetaException o3)
  1186. void alter_function(1:string dbName, 2:string funcName, 3:Function newFunc)
  1187. throws (1:InvalidOperationException o1, 2:MetaException o2)
  1188. list<string> get_functions(1:string dbName, 2:string pattern)
  1189. throws (1:MetaException o1)
  1190. Function get_function(1:string dbName, 2:string funcName)
  1191. throws (1:MetaException o1, 2:NoSuchObjectException o2)
  1192. GetAllFunctionsResponse get_all_functions() throws (1:MetaException o1)
  1193. //authorization privileges
  1194. bool create_role(1:Role role) throws(1:MetaException o1)
  1195. bool drop_role(1:string role_name) throws(1:MetaException o1)
  1196. list<string> get_role_names() throws(1:MetaException o1)
  1197. // Deprecated, use grant_revoke_role()
  1198. bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type,
  1199. 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1)
  1200. // Deprecated, use grant_revoke_role()
  1201. bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type)
  1202. throws(1:MetaException o1)
  1203. list<Role> list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1)
  1204. GrantRevokeRoleResponse grant_revoke_role(1:GrantRevokeRoleRequest request) throws(1:MetaException o1)
  1205. // get all role-grants for users/roles that have been granted the given role
  1206. // Note that in the returned list of RolePrincipalGrants, the roleName is
  1207. // redundant as it would match the role_name argument of this function
  1208. GetPrincipalsInRoleResponse get_principals_in_role(1: GetPrincipalsInRoleRequest request) throws(1:MetaException o1)
  1209. // get grant information of all roles granted to the given principal
  1210. // Note that in the returned list of RolePrincipalGrants, the principal name,type is
  1211. // redundant as it would match the principal name,type arguments of this function
  1212. GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(1: GetRoleGrantsForPrincipalRequest request) throws(1:MetaException o1)
  1213. PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name,
  1214. 3: list<string> group_names) throws(1:MetaException o1)
  1215. list<HiveObjectPrivilege> list_privileges(1:string principal_name, 2:PrincipalType principal_type,
  1216. 3: HiveObjectRef hiveObject) throws(1:MetaException o1)
  1217. // Deprecated, use grant_revoke_privileges()
  1218. bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
  1219. // Deprecated, use grant_revoke_privileges()
  1220. bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
  1221. GrantRevokePrivilegeResponse grant_revoke_privileges(1:GrantRevokePrivilegeRequest request) throws(1:MetaException o1);
  1222. // this is used by metastore client to send UGI information to metastore server immediately
  1223. // after setting up a connection.
  1224. list<string> set_ugi(1:string user_name, 2:list<string> group_names) throws (1:MetaException o1)
  1225. //Authentication (delegation token) interfaces
  1226. // get metastore server delegation token for use from the map/reduce tasks to authenticate
  1227. // to metastore server
  1228. string get_delegation_token(1:string token_owner, 2:string renewer_kerberos_principal_name)
  1229. throws (1:MetaException o1)
  1230. // method to renew delegation token obtained from metastore server
  1231. i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1)
  1232. // method to cancel delegation token obtained from metastore server
  1233. void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1)
  1234. // add a delegation token
  1235. bool add_token(1:string token_identifier, 2:string delegation_token)
  1236. // remove a delegation token
  1237. bool remove_token(1:string token_identifier)
  1238. // get a delegation token by identifier
  1239. string get_token(1:string token_identifier)
  1240. // get all delegation token identifiers
  1241. list<string> get_all_token_identifiers()
  1242. // add master key
  1243. i32 add_master_key(1:string key) throws (1:MetaException o1)
  1244. // update master key
  1245. void update_master_key(1:i32 seq_number, 2:string key) throws (1:NoSuchObjectException o1, 2:MetaException o2)
  1246. // remove master key
  1247. bool remove_master_key(1:i32 key_seq)
  1248. // get master keys
  1249. list<string> get_master_keys()
  1250. // Transaction and lock management calls
  1251. // Get just list of open transactions
  1252. GetOpenTxnsResponse get_open_txns()
  1253. // Get list of open transactions with state (open, aborted)
  1254. GetOpenTxnsInfoResponse get_open_txns_info()
  1255. OpenTxnsResponse open_txns(1:OpenTxnRequest rqst)
  1256. void abort_txn(1:AbortTxnRequest rqst) throws (1:NoSuchTxnException o1)
  1257. void abort_txns(1:AbortTxnsRequest rqst) throws (1:NoSuchTxnException o1)
  1258. void commit_txn(1:CommitTxnRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
  1259. LockResponse lock(1:LockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
  1260. LockResponse check_lock(1:CheckLockRequest rqst)
  1261. throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:NoSuchLockException o3)
  1262. void unlock(1:UnlockRequest rqst) throws (1:NoSuchLockException o1, 2:TxnOpenException o2)
  1263. ShowLocksResponse show_locks(1:ShowLocksRequest rqst)
  1264. void heartbeat(1:HeartbeatRequest ids) throws (1:NoSuchLockException o1, 2:NoSuchTxnException o2, 3:TxnAbortedException o3)
  1265. HeartbeatTxnRangeResponse heartbeat_txn_range(1:HeartbeatTxnRangeRequest txns)
  1266. void compact(1:CompactionRequest rqst)
  1267. ShowCompactResponse show_compact(1:ShowCompactRequest rqst)
  1268. void add_dynamic_partitions(1:AddDynamicPartitions rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
  1269. // Notification logging calls
  1270. NotificationEventResponse get_next_notification(1:NotificationEventRequest rqst)
  1271. CurrentNotificationEventId get_current_notificationEventId()
  1272. FireEventResponse fire_listener_event(1:FireEventRequest rqst)
  1273. void flushCache()
  1274. GetFileMetadataByExprResult get_file_metadata_by_expr(1:GetFileMetadataByExprRequest req)
  1275. GetFileMetadataResult get_file_metadata(1:GetFileMetadataRequest req)
  1276. PutFileMetadataResult put_file_metadata(1:PutFileMetadataRequest req)
  1277. ClearFileMetadataResult clear_file_metadata(1:ClearFileMetadataRequest req)
  1278. CacheFileMetadataResult cache_file_metadata(1:CacheFileMetadataRequest req)
  1279. // Metastore DB properties
  1280. string get_metastore_db_uuid() throws (1:MetaException o1)
  1281. // get_partitions with filter and projectspec
  1282. GetPartitionsResponse get_partitions_with_specs(1: GetPartitionsRequest request) throws(1:MetaException o1)
  1283. }
  1284. // * Note about the DDL_TIME: When creating or altering a table or a partition,
  1285. // if the DDL_TIME is not set, the current time will be used.
  1286. // For storing info about archived partitions in parameters
  1287. // Whether the partition is archived
  1288. const string IS_ARCHIVED = "is_archived",
  1289. // The original location of the partition, before archiving. After archiving,
  1290. // this directory will contain the archive. When the partition
  1291. // is dropped, this directory will be deleted
  1292. const string ORIGINAL_LOCATION = "original_location",
  1293. // Whether or not the table is considered immutable - immutable tables can only be
  1294. // overwritten or created if unpartitioned, or if partitioned, partitions inside them
  1295. // can only be overwritten or created. Immutability supports write-once and replace
  1296. // semantics, but not append.
  1297. const string IS_IMMUTABLE = "immutable",
  1298. // these should be needed only for backward compatibility with filestore
  1299. const string META_TABLE_COLUMNS = "columns",
  1300. const string META_TABLE_COLUMN_TYPES = "columns.types",
  1301. const string BUCKET_FIELD_NAME = "bucket_field_name",
  1302. const string BUCKET_COUNT = "bucket_count",
  1303. const string FIELD_TO_DIMENSION = "field_to_dimension",
  1304. const string META_TABLE_NAME = "name",
  1305. const string META_TABLE_DB = "db",
  1306. const string META_TABLE_LOCATION = "location",
  1307. const string META_TABLE_SERDE = "serde",
  1308. const string META_TABLE_PARTITION_COLUMNS = "partition_columns",
  1309. const string META_TABLE_PARTITION_COLUMN_TYPES = "partition_columns.types",
  1310. const string FILE_INPUT_FORMAT = "file.inputformat",
  1311. const string FILE_OUTPUT_FORMAT = "file.outputformat",
  1312. const string META_TABLE_STORAGE = "storage_handler",
  1313. const string TABLE_IS_TRANSACTIONAL = "transactional",
  1314. const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction",