__init__.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. import gzip
  2. import json
  3. import logging
  4. import struct
  5. import thrift
  6. import StringIO
  7. from collections import defaultdict
  8. from ttypes import FileMetaData, CompressionCodec, Encoding, FieldRepetitionType, PageHeader, PageType, Type
  9. from thrift.protocol import TCompactProtocol
  10. from thrift.transport import TTransport
  11. import encoding
  12. import schema
  13. logger = logging.getLogger("parquet")
  14. try:
  15. import snappy
  16. except ImportError:
  17. logger.warn("Couldn't import snappy. Support for snappy compression disabled.")
  18. class ParquetFormatException(Exception):
  19. pass
  20. def _check_header_magic_bytes(fo):
  21. """Returns true if the file-like obj has the PAR1 magic bytes at the header"""
  22. fo.seek(0, 0)
  23. magic = fo.read(4)
  24. return magic == 'PAR1'
  25. def _check_footer_magic_bytes(fo):
  26. """Returns true if the file-like obj has the PAR1 magic bytes at the footer"""
  27. fo.seek(-4, 2) # seek to four bytes from the end of the file
  28. magic = fo.read(4)
  29. return magic == 'PAR1'
  30. def _get_footer_size(fo):
  31. """Readers the footer size in bytes, which is serialized as little endian"""
  32. fo.seek(-8, 2)
  33. tup = struct.unpack("<i", fo.read(4))
  34. return tup[0]
  35. def _read_footer(fo):
  36. """Reads the footer from the given file object, returning a FileMetaData object. This method
  37. assumes that the fo references a valid parquet file"""
  38. footer_size = _get_footer_size(fo)
  39. logger.debug("Footer size in bytes: %s", footer_size)
  40. fo.seek(-(8+footer_size), 2) # seek to beginning of footer
  41. tin = TTransport.TFileObjectTransport(fo)
  42. pin = TCompactProtocol.TCompactProtocol(tin)
  43. fmd = FileMetaData()
  44. fmd.read(pin)
  45. return fmd
  46. def _read_page_header(fo):
  47. """Reads the page_header from the given fo"""
  48. tin = TTransport.TFileObjectTransport(fo)
  49. pin = TCompactProtocol.TCompactProtocol(tin)
  50. ph = PageHeader()
  51. ph.read(pin)
  52. return ph
  53. def read_footer(filename):
  54. """Reads and returns the FileMetaData object for the given file."""
  55. with open(filename, 'rb') as fo:
  56. if not _check_header_magic_bytes(fo) or not _check_footer_magic_bytes(fo):
  57. raise ParquetFormatException("{0} is not a valid parquet file (missing magic bytes)".format(filename))
  58. return _read_footer(fo)
  59. def dump_metadata(filename):
  60. footer = read_footer(filename)
  61. print("File: {0}".format(filename))
  62. print(" version: {0}".format(footer.version))
  63. print(" num rows: {0}".format(footer.num_rows))
  64. print(" k/v metadata: ")
  65. if footer.key_value_metadata and len(footer.key_value_metadata) > 0:
  66. for kv in footer.key_value_metadata:
  67. print(" {0}={1}".format(kv.key, kv.value))
  68. else:
  69. print(" (none)")
  70. print(" schema: ")
  71. for se in footer.schema:
  72. print(" {name} ({type}): length={type_length}, repetition={repetition_type}, children={num_children}, converted_type={converted_type}".format(
  73. name=se.name, type=Type._VALUES_TO_NAMES[se.type] if se.type else None, type_length=se.type_length,
  74. repetition_type=FieldRepetitionType._VALUES_TO_NAMES[se.repetition_type] if se.repetition_type else None,
  75. num_children=se.num_children, converted_type=se.converted_type))
  76. print(" row groups: ")
  77. for rg in footer.row_groups:
  78. num_rows = rg.num_rows
  79. bytes = rg.total_byte_size
  80. print(" rows={num_rows}, bytes={bytes}".format(num_rows=num_rows, bytes=bytes))
  81. print(" chunks:")
  82. for cg in rg.columns:
  83. cmd = cg.meta_data
  84. print(" type={type} file_offset={offset} compression={codec} "
  85. "encodings={encodings} path_in_schema={path_in_schema} "
  86. "num_values={num_values} uncompressed_bytes={raw_bytes} "
  87. "compressed_bytes={compressed_bytes} data_page_offset={data_page_offset} "
  88. "dictionary_page_offset={dictionary_page_offset}".format(
  89. type=cmd.type, offset=cg.file_offset, codec=CompressionCodec._VALUES_TO_NAMES[cmd.codec],
  90. encodings=",".join([Encoding._VALUES_TO_NAMES[s] for s in cmd.encodings]),
  91. path_in_schema=cmd.path_in_schema, num_values=cmd.num_values,
  92. raw_bytes=cmd.total_uncompressed_size, compressed_bytes=cmd.total_compressed_size,
  93. data_page_offset=cmd.data_page_offset, dictionary_page_offset=cmd.dictionary_page_offset
  94. ))
  95. with open(filename, 'rb') as fo:
  96. offset = cmd.data_page_offset if (cmd.dictionary_page_offset is None or cmd.data_page_offset < cmd.dictionary_page_offset) else cmd.dictionary_page_offset
  97. fo.seek(offset, 0)
  98. values_read = 0
  99. print(" pages: ")
  100. while values_read < num_rows:
  101. ph = _read_page_header(fo)
  102. fo.seek(ph.compressed_page_size, 1) # seek past current page.
  103. daph = ph.data_page_header
  104. diph = ph.dictionary_page_header
  105. type_ = PageType._VALUES_TO_NAMES[ph.type] if ph.type else None
  106. raw_bytes = ph.uncompressed_page_size
  107. num_values = None
  108. if ph.type == PageType.DATA_PAGE:
  109. num_values = daph.num_values
  110. values_read += num_values
  111. if ph.type == PageType.DICTIONARY_PAGE:
  112. pass
  113. #num_values = diph.num_values
  114. encoding = None
  115. def_level_encoding = None
  116. rep_level_encoding = None
  117. if daph:
  118. encoding = Encoding._VALUES_TO_NAMES[daph.encoding]
  119. def_level_encoding = Encoding._VALUES_TO_NAMES[daph.definition_level_encoding]
  120. rep_level_encoding = Encoding._VALUES_TO_NAMES[daph.repetition_level_encoding]
  121. print(" page header: type={type} uncompressed_size={raw_bytes} "
  122. "num_values={num_values} encoding={encoding} "
  123. "def_level_encoding={def_level_encoding} "
  124. "rep_level_encoding={rep_level_encoding}".format(
  125. type=type_, raw_bytes=raw_bytes, num_values=num_values,
  126. encoding=encoding, def_level_encoding=def_level_encoding,
  127. rep_level_encoding=rep_level_encoding))
  128. def _read_page(fo, page_header, column_metadata):
  129. """Reads the data page from the given file-object using the column metadata"""
  130. bytes_from_file = fo.read(page_header.compressed_page_size)
  131. if column_metadata.codec is not None and column_metadata.codec != CompressionCodec.UNCOMPRESSED:
  132. if column_metadata.codec == CompressionCodec.SNAPPY:
  133. raw_bytes = snappy.decompress(bytes_from_file)
  134. elif column_metadata.codec == CompressionCodec.GZIP:
  135. io_obj = StringIO.StringIO(bytes_from_file)
  136. with gzip.GzipFile(fileobj=io_obj, mode='rb') as f:
  137. raw_bytes = f.read()
  138. else:
  139. raw_bytes = bytes_from_file
  140. return raw_bytes
  141. def _read_data(fo, fo_encoding, value_count, bit_width):
  142. """Internal method to read data from the file-object using the given encoding. The data
  143. could be definition levels, repetition levels, or actual values."""
  144. vals = []
  145. if fo_encoding == Encoding.RLE:
  146. seen = 0
  147. while seen < value_count:
  148. values = encoding.read_rle_bit_packed_hybrid(fo, bit_width)
  149. if values is None:
  150. break ## EOF was reached.
  151. vals += values
  152. seen += len(values)
  153. elif fo_encoding == Encoding.BIT_PACKED:
  154. raise NotImplementedError("Bit packing not yet supported")
  155. return vals
  156. def read_data_page(fo, schema_helper, page_header, column_metadata, dictionary):
  157. daph = page_header.data_page_header
  158. raw_bytes = _read_page(fo, page_header, column_metadata)
  159. io_obj = StringIO.StringIO(raw_bytes)
  160. vals = []
  161. print(" definition_level_encoding: {0}".format(Encoding._VALUES_TO_NAMES[daph.definition_level_encoding]))
  162. print(" repetition_level_encoding: {0}".format(Encoding._VALUES_TO_NAMES[daph.repetition_level_encoding]))
  163. print(" encoding: {0}".format(Encoding._VALUES_TO_NAMES[daph.encoding]) )
  164. # definition levels are skipped if data is required.
  165. if not schema_helper.is_required(column_metadata.path_in_schema[-1]):
  166. max_definition_level = schema_helper.max_definition_level(column_metadata.path_in_schema)
  167. bit_width = encoding.width_from_max_int(max_definition_level) # TODO Where does the -1 come from?
  168. print " max def level: {1} bit_width: {0}".format(bit_width, max_definition_level)
  169. if bit_width == 0:
  170. definition_levels = [0] * daph.num_values
  171. else:
  172. definition_levels = _read_data(io_obj, daph.definition_level_encoding, daph.num_values, bit_width)
  173. print (" Definition levels: {0}".format(",".join([str(dl) for dl in definition_levels])))
  174. # repetition levels are skipped if data is at the first level.
  175. if len(column_metadata.path_in_schema) > 1:
  176. max_repetition_level = schema_helper.max_repetition_level(column_metadata.path_in_schema)
  177. bit_width = encoding.width_from_max_int(max_repetition_level)
  178. repetition_levels = _read_data(io_obj, daph.repetition_level_encoding, daph.num_values)
  179. # TODO Actually use the definition and repetition levels.
  180. if daph.encoding == Encoding.PLAIN:
  181. for i in range(daph.num_values):
  182. vals.append(encoding.read_plain(io_obj, column_metadata.type, None))
  183. print " Values: " + ",".join([str(x) for x in vals])
  184. elif daph.encoding == Encoding.PLAIN_DICTIONARY:
  185. bit_width = struct.unpack("<B", io_obj.read(1))[0] # bitwidth is stored as single byte.
  186. print "bit_width: {0}".format(bit_width)
  187. total_seen = 0
  188. dict_values_bytes = io_obj.read()
  189. dict_values_io_obj = StringIO.StringIO(dict_values_bytes)
  190. while total_seen < daph.num_values: # TODO jcrobak -- not sure that this loop i sneeded?
  191. values = encoding.read_rle_bit_packed_hybrid(dict_values_io_obj, bit_width, len(dict_values_bytes))
  192. vals += [dictionary[v] for v in values]
  193. total_seen += len(values)
  194. else:
  195. raise ParquetFormatException("Unsupported encoding: " + Encoding._VALUES_TO_NAMES[daph.encoding])
  196. return vals
  197. def read_dictionary_page(fo, page_header, column_metadata):
  198. raw_bytes = _read_page(fo, page_header, column_metadata)
  199. io_obj = StringIO.StringIO(raw_bytes)
  200. dict_items = []
  201. while io_obj.tell() < len(raw_bytes):
  202. dict_items.append(encoding.read_plain(io_obj, column_metadata.type, None)) # TODO - length for fixed byte array
  203. return dict_items
  204. def dump(filename, max_records=10):
  205. footer = read_footer(filename)
  206. schema_helper = schema.SchemaHelper(footer.schema)
  207. for rg in footer.row_groups:
  208. res = defaultdict(list)
  209. row_group_rows = rg.num_rows
  210. dict_items = []
  211. for idx, cg in enumerate(rg.columns):
  212. cmd = cg.meta_data
  213. with open(filename, 'rb') as fo:
  214. offset = cmd.data_page_offset if (cmd.dictionary_page_offset is None or cmd.data_page_offset < cmd.dictionary_page_offset) else cmd.dictionary_page_offset
  215. fo.seek(offset, 0)
  216. values_seen = 0
  217. print("reading column chunk of type: {0}".format(Type._VALUES_TO_NAMES[cmd.type]))
  218. while values_seen < row_group_rows:
  219. ph = _read_page_header(fo)
  220. print("Reading page (type={2}, uncompressed={0} bytes, compressed={1} bytes)".format(
  221. ph.uncompressed_page_size, ph.compressed_page_size, PageType._VALUES_TO_NAMES[ph.type]))
  222. daph = ph.data_page_header
  223. diph = ph.dictionary_page_header
  224. if ph.type == PageType.DATA_PAGE:
  225. values = read_data_page(fo, schema_helper, ph, cmd, dict_items)
  226. res[".".join(cmd.path_in_schema)] += values
  227. values_seen += cmd.num_values
  228. elif ph.type == PageType.DICTIONARY_PAGE:
  229. print ph
  230. assert dict_items == []
  231. dict_items = read_dictionary_page(fo, ph, cmd)
  232. print("Dictionary: " + str(dict_items))
  233. else:
  234. logger.info("Skipping unknown page type={0}".format(ph.type))
  235. print "Data for row group: "
  236. keys = res.keys()
  237. print "\t".join(keys)
  238. for i in range(rg.num_rows):
  239. print "\t".join(str(res[k][i]) for k in keys)