__init__.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. import gzip
  2. import json
  3. import logging
  4. import struct
  5. import StringIO
  6. import sys
  7. from collections import defaultdict
  8. from ttypes import (FileMetaData, CompressionCodec, Encoding,
  9. FieldRepetitionType, PageHeader, PageType, Type)
  10. from thrift.protocol import TCompactProtocol
  11. from thrift.transport import TTransport
  12. import encoding
  13. import schema
  14. logger = logging.getLogger("parquet")
  15. try:
  16. import snappy
  17. except ImportError:
  18. logger.warn(
  19. "Couldn't import snappy. Support for snappy compression disabled.")
  20. class ParquetFormatException(Exception):
  21. pass
  22. def _check_header_magic_bytes(fo):
  23. "Returns true if the file-like obj has the PAR1 magic bytes at the header"
  24. fo.seek(0, 0)
  25. magic = fo.read(4)
  26. return magic == 'PAR1'
  27. def _check_footer_magic_bytes(fo):
  28. "Returns true if the file-like obj has the PAR1 magic bytes at the footer"
  29. fo.seek(-4, 2) # seek to four bytes from the end of the file
  30. magic = fo.read(4)
  31. return magic == 'PAR1'
  32. def _get_footer_size(fo):
  33. "Readers the footer size in bytes, which is serialized as little endian"
  34. fo.seek(-8, 2)
  35. tup = struct.unpack("<i", fo.read(4))
  36. return tup[0]
  37. def _read_footer(fo):
  38. """Reads the footer from the given file object, returning a FileMetaData
  39. object. This method assumes that the fo references a valid parquet file"""
  40. footer_size = _get_footer_size(fo)
  41. logger.debug("Footer size in bytes: %s", footer_size)
  42. fo.seek(-(8+footer_size), 2) # seek to beginning of footer
  43. tin = TTransport.TFileObjectTransport(fo)
  44. pin = TCompactProtocol.TCompactProtocol(tin)
  45. fmd = FileMetaData()
  46. fmd.read(pin)
  47. return fmd
  48. def _read_page_header(fo):
  49. """Reads the page_header from the given fo"""
  50. tin = TTransport.TFileObjectTransport(fo)
  51. pin = TCompactProtocol.TCompactProtocol(tin)
  52. ph = PageHeader()
  53. ph.read(pin)
  54. return ph
  55. def read_footer(filename):
  56. """Reads and returns the FileMetaData object for the given file."""
  57. with open(filename, 'rb') as fo:
  58. if not _check_header_magic_bytes(fo) or \
  59. not _check_footer_magic_bytes(fo):
  60. raise ParquetFormatException("{0} is not a valid parquet file "
  61. "(missing magic bytes)"
  62. .format(filename))
  63. return _read_footer(fo)
  64. def _get_name(type_, value):
  65. """Returns the name for the given value of the given type_ unless value is
  66. None, in which case it returns empty string"""
  67. return type_._VALUES_TO_NAMES[value] if value else "None"
  68. def _get_offset(cmd):
  69. """Returns the offset into the cmd based upon if it's a dictionary page or
  70. a data page"""
  71. dict_offset = cmd.dictionary_page_offset
  72. data_offset = cmd.data_page_offset
  73. if dict_offset is None or data_offset < dict_offset:
  74. return data_offset
  75. return dict_offset
  76. def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
  77. def println(value):
  78. out.write(value + "\n")
  79. footer = read_footer(filename)
  80. println("File Metadata: {0}".format(filename))
  81. println(" Version: {0}".format(footer.version))
  82. println(" Num Rows: {0}".format(footer.num_rows))
  83. println(" k/v metadata: ")
  84. if footer.key_value_metadata and len(footer.key_value_metadata) > 0:
  85. for kv in footer.key_value_metadata:
  86. println(" {0}={1}".format(kv.key, kv.value))
  87. else:
  88. println(" (none)")
  89. println(" schema: ")
  90. for se in footer.schema:
  91. println(" {name} ({type}): length={type_length}, "
  92. "repetition={repetition_type}, "
  93. "children={num_children}, "
  94. "converted_type={converted_type}".format(
  95. name=se.name,
  96. type=Type._VALUES_TO_NAMES[se.type] if se.type else None,
  97. type_length=se.type_length,
  98. repetition_type=_get_name(FieldRepetitionType,
  99. se.repetition_type),
  100. num_children=se.num_children,
  101. converted_type=se.converted_type))
  102. if show_row_group_metadata:
  103. println(" row groups: ")
  104. for rg in footer.row_groups:
  105. num_rows = rg.num_rows
  106. bytes = rg.total_byte_size
  107. println(" rows={num_rows}, bytes={bytes}".format(num_rows=num_rows,
  108. bytes=bytes))
  109. println(" chunks:")
  110. for cg in rg.columns:
  111. cmd = cg.meta_data
  112. println(" type={type} file_offset={offset} "
  113. "compression={codec} "
  114. "encodings={encodings} path_in_schema={path_in_schema} "
  115. "num_values={num_values} uncompressed_bytes={raw_bytes} "
  116. "compressed_bytes={compressed_bytes} "
  117. "data_page_offset={data_page_offset} "
  118. "dictionary_page_offset={dictionary_page_offset}".format(
  119. type=cmd.type,
  120. offset=cg.file_offset,
  121. codec=_get_name(CompressionCodec, cmd.codec),
  122. encodings=",".join(
  123. [_get_name(Encoding, s) for s in cmd.encodings]),
  124. path_in_schema=cmd.path_in_schema,
  125. num_values=cmd.num_values,
  126. raw_bytes=cmd.total_uncompressed_size,
  127. compressed_bytes=cmd.total_compressed_size,
  128. data_page_offset=cmd.data_page_offset,
  129. dictionary_page_offset=cmd.dictionary_page_offset))
  130. with open(filename, 'rb') as fo:
  131. offset = _get_offset(cmd)
  132. fo.seek(offset, 0)
  133. values_read = 0
  134. println(" pages: ")
  135. while values_read < num_rows:
  136. ph = _read_page_header(fo)
  137. # seek past current page.
  138. fo.seek(ph.compressed_page_size, 1)
  139. daph = ph.data_page_header
  140. type_ = _get_name(PageType, ph.type)
  141. raw_bytes = ph.uncompressed_page_size
  142. num_values = None
  143. if ph.type == PageType.DATA_PAGE:
  144. num_values = daph.num_values
  145. values_read += num_values
  146. if ph.type == PageType.DICTIONARY_PAGE:
  147. pass
  148. #num_values = diph.num_values
  149. encoding_type = None
  150. def_level_encoding = None
  151. rep_level_encoding = None
  152. if daph:
  153. encoding_type = _get_name(Encoding, daph.encoding)
  154. def_level_encoding = _get_name(Encoding, daph.definition_level_encoding)
  155. rep_level_encoding = _get_name(Encoding, daph.repetition_level_encoding)
  156. println(" page header: type={type} "
  157. "uncompressed_size={raw_bytes} "
  158. "num_values={num_values} encoding={encoding} "
  159. "def_level_encoding={def_level_encoding} "
  160. "rep_level_encoding={rep_level_encoding}".format(
  161. type=type_,
  162. raw_bytes=raw_bytes,
  163. num_values=num_values,
  164. encoding=encoding_type,
  165. def_level_encoding=def_level_encoding,
  166. rep_level_encoding=rep_level_encoding))
  167. def _read_page(fo, page_header, column_metadata):
  168. """Internal function to read the data page from the given file-object
  169. and convert it to raw, uncompressed bytes (if necessary)."""
  170. bytes_from_file = fo.read(page_header.compressed_page_size)
  171. codec = column_metadata.codec
  172. if codec is not None and codec != CompressionCodec.UNCOMPRESSED:
  173. if column_metadata.codec == CompressionCodec.SNAPPY:
  174. raw_bytes = snappy.decompress(bytes_from_file)
  175. elif column_metadata.codec == CompressionCodec.GZIP:
  176. io_obj = StringIO.StringIO(bytes_from_file)
  177. with gzip.GzipFile(fileobj=io_obj, mode='rb') as f:
  178. raw_bytes = f.read()
  179. else:
  180. raise ParquetFormatException(
  181. "Unsupported Codec: {0}".format(codec))
  182. else:
  183. raw_bytes = bytes_from_file
  184. return raw_bytes
  185. def _read_data(fo, fo_encoding, value_count, bit_width):
  186. """Internal method to read data from the file-object using the given
  187. encoding. The data could be definition levels, repetition levels, or
  188. actual values.
  189. """
  190. vals = []
  191. if fo_encoding == Encoding.RLE:
  192. seen = 0
  193. while seen < value_count:
  194. values = encoding.read_rle_bit_packed_hybrid(fo, bit_width)
  195. if values is None:
  196. break # EOF was reached.
  197. vals += values
  198. seen += len(values)
  199. elif fo_encoding == Encoding.BIT_PACKED:
  200. raise NotImplementedError("Bit packing not yet supported")
  201. return vals
  202. def read_data_page(fo, schema_helper, page_header, column_metadata,
  203. dictionary):
  204. """Reads the datapage from the given file-like object based upon the
  205. metadata in the schema_helper, page_header, column_metadata, and
  206. (optional) dictionary. Returns a list of values.
  207. """
  208. daph = page_header.data_page_header
  209. raw_bytes = _read_page(fo, page_header, column_metadata)
  210. io_obj = StringIO.StringIO(raw_bytes)
  211. vals = []
  212. logger.debug(" definition_level_encoding: %s",
  213. _get_name(Encoding, daph.definition_level_encoding))
  214. logger.debug(" repetition_level_encoding: %s",
  215. _get_name(Encoding, daph.repetition_level_encoding))
  216. logger.debug(" encoding: %s", _get_name(Encoding, daph.encoding))
  217. # definition levels are skipped if data is required.
  218. if not schema_helper.is_required(column_metadata.path_in_schema[-1]):
  219. max_definition_level = schema_helper.max_definition_level(
  220. column_metadata.path_in_schema)
  221. bit_width = encoding.width_from_max_int(max_definition_level)
  222. logger.debug(" max def level: %s bit_width: %s",
  223. max_definition_level, bit_width)
  224. if bit_width == 0:
  225. definition_levels = [0] * daph.num_values
  226. else:
  227. definition_levels = _read_data(io_obj,
  228. daph.definition_level_encoding,
  229. daph.num_values,
  230. bit_width)
  231. logger.debug(" Definition levels: %s",
  232. ",".join([str(dl) for dl in definition_levels]))
  233. # repetition levels are skipped if data is at the first level.
  234. if len(column_metadata.path_in_schema) > 1:
  235. max_repetition_level = schema_helper.max_repetition_level(
  236. column_metadata.path_in_schema)
  237. bit_width = encoding.width_from_max_int(max_repetition_level)
  238. repetition_levels = _read_data(io_obj,
  239. daph.repetition_level_encoding,
  240. daph.num_values)
  241. # TODO Actually use the definition and repetition levels.
  242. if daph.encoding == Encoding.PLAIN:
  243. for i in range(daph.num_values):
  244. vals.append(
  245. encoding.read_plain(io_obj, column_metadata.type, None))
  246. logger.debug(" Values: %s", ",".join([str(x) for x in vals]))
  247. elif daph.encoding == Encoding.PLAIN_DICTIONARY:
  248. # bit_width is stored as single byte.
  249. bit_width = struct.unpack("<B", io_obj.read(1))[0]
  250. logger.debug("bit_width: %d", bit_width)
  251. total_seen = 0
  252. dict_values_bytes = io_obj.read()
  253. dict_values_io_obj = StringIO.StringIO(dict_values_bytes)
  254. # TODO jcrobak -- not sure that this loop is needed?
  255. while total_seen < daph.num_values:
  256. values = encoding.read_rle_bit_packed_hybrid(
  257. dict_values_io_obj, bit_width, len(dict_values_bytes))
  258. vals += [dictionary[v] for v in values]
  259. total_seen += len(values)
  260. else:
  261. raise ParquetFormatException("Unsupported encoding: %s",
  262. _get_name(Encoding, daph.encoding))
  263. return vals
  264. def read_dictionary_page(fo, page_header, column_metadata):
  265. raw_bytes = _read_page(fo, page_header, column_metadata)
  266. io_obj = StringIO.StringIO(raw_bytes)
  267. dict_items = []
  268. while io_obj.tell() < len(raw_bytes):
  269. # TODO - length for fixed byte array
  270. dict_items.append(
  271. encoding.read_plain(io_obj, column_metadata.type, None))
  272. return dict_items
  273. ## max_records=10
  274. def dump(filename, options, out=sys.stdout):
  275. def println(value):
  276. out.write(value + "\n")
  277. footer = read_footer(filename)
  278. schema_helper = schema.SchemaHelper(footer.schema)
  279. total_count = 0
  280. for rg in footer.row_groups:
  281. res = defaultdict(list)
  282. row_group_rows = rg.num_rows
  283. for idx, cg in enumerate(rg.columns):
  284. dict_items = []
  285. cmd = cg.meta_data
  286. # skip if the list of columns is specified and this isn't in it
  287. if options.col and not ".".join(cmd.path_in_schema) in options.col:
  288. continue
  289. with open(filename, 'rb') as fo:
  290. offset = _get_offset(cmd)
  291. fo.seek(offset, 0)
  292. values_seen = 0
  293. logger.debug("reading column chunk of type: %s",
  294. _get_name(Type, cmd.type))
  295. while values_seen < row_group_rows:
  296. ph = _read_page_header(fo)
  297. logger.debug("Reading page (type=%s, "
  298. "uncompressed=%s bytes, "
  299. "compressed=%s bytes)",
  300. _get_name(PageType, ph.type),
  301. ph.uncompressed_page_size,
  302. ph.compressed_page_size)
  303. if ph.type == PageType.DATA_PAGE:
  304. values = read_data_page(fo, schema_helper, ph, cmd,
  305. dict_items)
  306. res[".".join(cmd.path_in_schema)] += values
  307. values_seen += cmd.num_values
  308. elif ph.type == PageType.DICTIONARY_PAGE:
  309. logger.debug(ph)
  310. assert dict_items == []
  311. dict_items = read_dictionary_page(fo, ph, cmd)
  312. logger.debug("Dictionary: %s", str(dict_items))
  313. else:
  314. logger.warn("Skipping unknown page type={0}".format(
  315. _get_name(PageType, ph.type)))
  316. keys = options.col if options.col else [s.name for s in
  317. footer.schema if s.name in res]
  318. if options.format == "csv" and not options.no_headers:
  319. println("\t".join(keys))
  320. for i in range(rg.num_rows):
  321. if options.limit != -1 and i + total_count >= options.limit:
  322. return
  323. if options.format == "csv":
  324. println("\t".join(str(res[k][i]) for k in keys))
  325. elif options.format == "json":
  326. println(json.dumps(dict([(k, res[k][i]) for k in keys])))
  327. total_count += rg.num_rows