hive_metastore.thrift 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. #!/usr/local/bin/thrift -java
  2. // Licensed to Cloudera, Inc. under one
  3. // or more contributor license agreements. See the NOTICE file
  4. // distributed with this work for additional information
  5. // regarding copyright ownership. Cloudera, Inc. licenses this file
  6. // to you under the Apache License, Version 2.0 (the
  7. // "License"); you may not use this file except in compliance
  8. // with the License. You may obtain a copy of the License at
  9. //
  10. // http://www.apache.org/licenses/LICENSE-2.0
  11. //
  12. // Unless required by applicable law or agreed to in writing, software
  13. // distributed under the License is distributed on an "AS IS" BASIS,
  14. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. // See the License for the specific language governing permissions and
  16. // limitations under the License.
  17. #
  18. # Thrift Service that the MetaStore is built on
  19. #
  20. include "fb303.thrift"
  21. namespace java org.apache.hadoop.hive.metastore.api
  22. namespace php metastore
  23. namespace cpp Apache.Hadoop.Hive
  24. const string DDL_TIME = "transient_lastDdlTime"
  25. struct Version {
  26. 1: string version,
  27. 2: string comments
  28. }
  29. struct FieldSchema {
  30. 1: string name, // name of the field
  31. 2: string type, // type of the field. primitive types defined above, specify list<TYPE_NAME>, map<TYPE_NAME, TYPE_NAME> for lists & maps
  32. 3: string comment
  33. }
  34. struct Type {
  35. 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types
  36. 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE)
  37. 3: optional string type2, // val type if the name is 'map' (MAP_TYPE)
  38. 4: optional list<FieldSchema> fields // if the name is one of the user defined types
  39. }
  40. // namespace for tables
  41. struct Database {
  42. 1: string name,
  43. 2: string description,
  44. }
  45. // This object holds the information needed by SerDes
  46. struct SerDeInfo {
  47. 1: string name, // name of the serde, table name by default
  48. 2: string serializationLib, // usually the class that implements the extractor & loader
  49. 3: map<string, string> parameters // initialization parameters
  50. }
  51. // sort order of a column (column name along with asc(1)/desc(0))
  52. struct Order {
  53. 1: string col, // sort column name
  54. 2: i32 order // asc(1) or desc(0)
  55. }
  56. // this object holds all the information about physical storage of the data belonging to a table
  57. struct StorageDescriptor {
  58. 1: list<FieldSchema> cols, // required (refer to types defined above)
  59. 2: string location, // defaults to <warehouse loc>/<db loc>/tablename
  60. 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format
  61. 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format
  62. 5: bool compressed, // compressed or not
  63. 6: i32 numBuckets, // this must be specified if there are any dimension columns
  64. 7: SerDeInfo serdeInfo, // serialization and deserialization information
  65. 8: list<string> bucketCols, // reducer grouping columns and clustering columns and bucketing columns`
  66. 9: list<Order> sortCols, // sort order of the data in each bucket
  67. 10: map<string, string> parameters // any user supplied key value hash
  68. }
  69. // table information
  70. struct Table {
  71. 1: string tableName, // name of the table
  72. 2: string dbName, // database name ('default')
  73. 3: string owner, // owner of this table
  74. 4: i32 createTime, // creation time of the table
  75. 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on)
  76. 6: i32 retention, // retention time
  77. 7: StorageDescriptor sd, // storage descriptor of the table
  78. 8: list<FieldSchema> partitionKeys, // partition keys of the table. only primitive types are supported
  79. 9: map<string, string> parameters // to store comments or any other user level parameters
  80. }
  81. struct Partition {
  82. 1: list<string> values // string value is converted to appropriate partition key type
  83. 2: string dbName,
  84. 3: string tableName,
  85. 4: i32 createTime,
  86. 5: i32 lastAccessTime,
  87. 6: StorageDescriptor sd,
  88. 7: map<string, string> parameters
  89. }
  90. // index on a hive table is also another table whose columns are the subset of the base table columns along with the offset
  91. // this will automatically generate table (table_name_index_name)
  92. struct Index {
  93. 1: string indexName, // unique with in the whole database namespace
  94. 2: i32 indexType, // reserved
  95. 3: string tableName,
  96. 4: string dbName,
  97. 5: list<string> colNames, // for now columns will be sorted in the ascending order
  98. 6: string partName // partition name
  99. }
  100. // schema of the table/query results etc.
  101. struct Schema {
  102. // column names, types, comments
  103. 1: list<FieldSchema> fieldSchemas, // delimiters etc
  104. 2: map<string, string> properties
  105. }
  106. exception MetaException {
  107. 1: string message
  108. }
  109. exception UnknownTableException {
  110. 1: string message
  111. }
  112. exception UnknownDBException {
  113. 1: string message
  114. }
  115. exception AlreadyExistsException {
  116. 1: string message
  117. }
  118. exception InvalidObjectException {
  119. 1: string message
  120. }
  121. exception NoSuchObjectException {
  122. 1: string message
  123. }
  124. exception IndexAlreadyExistsException {
  125. 1: string message
  126. }
  127. exception InvalidOperationException {
  128. 1: string message
  129. }
  130. exception ConfigValSecurityException {
  131. 1: string message
  132. }
  133. /**
  134. * This interface is live.
  135. */
  136. service ThriftHiveMetastore extends fb303.FacebookService
  137. {
  138. bool create_database(1:string name, 2:string description)
  139. throws(1:AlreadyExistsException o1, 2:MetaException o2)
  140. Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2)
  141. bool drop_database(1:string name) throws(2:MetaException o2)
  142. list<string> get_databases() throws(1:MetaException o1)
  143. // returns the type with given name (make seperate calls for the dependent types if needed)
  144. Type get_type(1:string name) throws(1:MetaException o2)
  145. bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
  146. bool drop_type(1:string type) throws(1:MetaException o2)
  147. map<string, Type> get_type_all(1:string name)
  148. throws(1:MetaException o2)
  149. // Gets a list of FieldSchemas describing the columns of a particular table
  150. list<FieldSchema> get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3),
  151. // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table
  152. list<FieldSchema> get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
  153. // create a Hive table. Following fields must be set
  154. // tableName
  155. // database (only 'default' for now until Hive QL supports databases)
  156. // owner (not needed, but good to have for tracking purposes)
  157. // sd.cols (list of field schemas)
  158. // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat)
  159. // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat)
  160. // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe
  161. void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4)
  162. // drops the table and all the partitions associated with it if the table has partitions
  163. // delete data (including partitions) if deleteData is set to true
  164. void drop_table(1:string dbname, 2:string name, 3:bool deleteData)
  165. throws(1:NoSuchObjectException o1, 2:MetaException o3)
  166. list<string> get_tables(1: string db_name, 2: string pattern)
  167. throws (1: MetaException o1)
  168. Table get_table(1:string dbname, 2:string tbl_name)
  169. throws (1:MetaException o1, 2:NoSuchObjectException o2)
  170. // alter table applies to only future partitions not for existing partitions
  171. void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl)
  172. throws (1:InvalidOperationException o1, 2:MetaException o2)
  173. // the following applies to only tables that have partitions
  174. Partition add_partition(1:Partition new_part)
  175. throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  176. Partition append_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
  177. throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
  178. bool drop_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:bool deleteData)
  179. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  180. Partition get_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
  181. throws(1:MetaException o1)
  182. // returns all the partitions for this table in reverse chronological order.
  183. // if max parts is given then it will return only that many
  184. list<Partition> get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
  185. throws(1:NoSuchObjectException o1, 2:MetaException o2)
  186. list<string> get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
  187. throws(1:MetaException o2)
  188. // changes the partition to the new partition object. partition is identified from the part values
  189. // in the new_part
  190. void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part)
  191. throws(1:InvalidOperationException o1, 2:MetaException o2)
  192. // gets the value of the configuration key in the metastore server. returns
  193. // defaultValue if the key does not exist. if the configuration key does not
  194. // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is
  195. // thrown.
  196. string get_config_value(1:string name, 2:string defaultValue)
  197. throws(1:ConfigValSecurityException o1)
  198. }
  199. // these should be needed only for backward compatibility with filestore
  200. const string META_TABLE_COLUMNS = "columns",
  201. const string META_TABLE_COLUMN_TYPES = "columns.types",
  202. const string BUCKET_FIELD_NAME = "bucket_field_name",
  203. const string BUCKET_COUNT = "bucket_count",
  204. const string FIELD_TO_DIMENSION = "field_to_dimension",
  205. const string META_TABLE_NAME = "name",
  206. const string META_TABLE_DB = "db",
  207. const string META_TABLE_LOCATION = "location",
  208. const string META_TABLE_SERDE = "serde",
  209. const string META_TABLE_PARTITION_COLUMNS = "partition_columns",
  210. const string FILE_INPUT_FORMAT = "file.inputformat",
  211. const string FILE_OUTPUT_FORMAT = "file.outputformat",