__init__.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. import gzip
  2. import json
  3. import logging
  4. import struct
  5. import StringIO
  6. import sys
  7. from collections import defaultdict
  8. from ttypes import (FileMetaData, CompressionCodec, Encoding,
  9. FieldRepetitionType, PageHeader, PageType, Type)
  10. from thrift.protocol import TCompactProtocol
  11. from thrift.transport import TTransport
  12. import encoding
  13. import schema
  14. logger = logging.getLogger("parquet")
  15. try:
  16. import snappy
  17. except ImportError:
  18. logger.warn(
  19. "Couldn't import snappy. Support for snappy compression disabled.")
  20. class ParquetFormatException(Exception):
  21. pass
  22. def _check_header_magic_bytes(fo):
  23. "Returns true if the file-like obj has the PAR1 magic bytes at the header"
  24. fo.seek(0, 0)
  25. magic = fo.read(4)
  26. return magic == 'PAR1'
  27. def _check_footer_magic_bytes(fo):
  28. "Returns true if the file-like obj has the PAR1 magic bytes at the footer"
  29. fo.seek(-4, 2) # seek to four bytes from the end of the file
  30. magic = fo.read(4)
  31. return magic == 'PAR1'
  32. def _get_footer_size(fo):
  33. "Readers the footer size in bytes, which is serialized as little endian"
  34. fo.seek(-8, 2)
  35. tup = struct.unpack("<i", fo.read(4))
  36. return tup[0]
  37. def _read_footer(fo):
  38. """Reads the footer from the given file object, returning a FileMetaData
  39. object. This method assumes that the fo references a valid parquet file"""
  40. footer_size = _get_footer_size(fo)
  41. logger.debug("Footer size in bytes: %s", footer_size)
  42. fo.seek(-(8 + footer_size), 2) # seek to beginning of footer
  43. tin = TTransport.TFileObjectTransport(fo)
  44. pin = TCompactProtocol.TCompactProtocol(tin)
  45. fmd = FileMetaData()
  46. fmd.read(pin)
  47. return fmd
  48. def _read_page_header(fo):
  49. """Reads the page_header from the given fo"""
  50. tin = TTransport.TFileObjectTransport(fo)
  51. pin = TCompactProtocol.TCompactProtocol(tin)
  52. ph = PageHeader()
  53. ph.read(pin)
  54. return ph
  55. def read_footer(filename):
  56. """Reads and returns the FileMetaData object for the given file."""
  57. with open(filename, 'rb') as fo:
  58. if not _check_header_magic_bytes(fo) or \
  59. not _check_footer_magic_bytes(fo):
  60. raise ParquetFormatException("{0} is not a valid parquet file "
  61. "(missing magic bytes)"
  62. .format(filename))
  63. return _read_footer(fo)
  64. def _get_name(type_, value):
  65. """Returns the name for the given value of the given type_ unless value is
  66. None, in which case it returns empty string"""
  67. return type_._VALUES_TO_NAMES[value] if value is not None else "None"
  68. def _get_offset(cmd):
  69. """Returns the offset into the cmd based upon if it's a dictionary page or
  70. a data page"""
  71. dict_offset = cmd.dictionary_page_offset
  72. data_offset = cmd.data_page_offset
  73. if dict_offset is None or data_offset < dict_offset:
  74. return data_offset
  75. return dict_offset
  76. def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
  77. def println(value):
  78. out.write(value + "\n")
  79. footer = read_footer(filename)
  80. println("File Metadata: {0}".format(filename))
  81. println(" Version: {0}".format(footer.version))
  82. println(" Num Rows: {0}".format(footer.num_rows))
  83. println(" k/v metadata: ")
  84. if footer.key_value_metadata and len(footer.key_value_metadata) > 0:
  85. for kv in footer.key_value_metadata:
  86. println(" {0}={1}".format(kv.key, kv.value))
  87. else:
  88. println(" (none)")
  89. println(" schema: ")
  90. for se in footer.schema:
  91. println(" {name} ({type}): length={type_length}, "
  92. "repetition={repetition_type}, "
  93. "children={num_children}, "
  94. "converted_type={converted_type}".format(
  95. name=se.name,
  96. type=Type._VALUES_TO_NAMES[se.type] if se.type else None,
  97. type_length=se.type_length,
  98. repetition_type=_get_name(FieldRepetitionType,
  99. se.repetition_type),
  100. num_children=se.num_children,
  101. converted_type=se.converted_type))
  102. if show_row_group_metadata:
  103. println(" row groups: ")
  104. for rg in footer.row_groups:
  105. num_rows = rg.num_rows
  106. bytes = rg.total_byte_size
  107. println(
  108. " rows={num_rows}, bytes={bytes}".format(num_rows=num_rows,
  109. bytes=bytes))
  110. println(" chunks:")
  111. for cg in rg.columns:
  112. cmd = cg.meta_data
  113. println(" type={type} file_offset={offset} "
  114. "compression={codec} "
  115. "encodings={encodings} path_in_schema={path_in_schema} "
  116. "num_values={num_values} uncompressed_bytes={raw_bytes} "
  117. "compressed_bytes={compressed_bytes} "
  118. "data_page_offset={data_page_offset} "
  119. "dictionary_page_offset={dictionary_page_offset}".format(
  120. type=_get_name(Type, cmd.type),
  121. offset=cg.file_offset,
  122. codec=_get_name(CompressionCodec, cmd.codec),
  123. encodings=",".join(
  124. [_get_name(
  125. Encoding, s) for s in cmd.encodings]),
  126. path_in_schema=cmd.path_in_schema,
  127. num_values=cmd.num_values,
  128. raw_bytes=cmd.total_uncompressed_size,
  129. compressed_bytes=cmd.total_compressed_size,
  130. data_page_offset=cmd.data_page_offset,
  131. dictionary_page_offset=cmd.dictionary_page_offset))
  132. with open(filename, 'rb') as fo:
  133. offset = _get_offset(cmd)
  134. fo.seek(offset, 0)
  135. values_read = 0
  136. println(" pages: ")
  137. while values_read < num_rows:
  138. ph = _read_page_header(fo)
  139. # seek past current page.
  140. fo.seek(ph.compressed_page_size, 1)
  141. daph = ph.data_page_header
  142. type_ = _get_name(PageType, ph.type)
  143. raw_bytes = ph.uncompressed_page_size
  144. num_values = None
  145. if ph.type == PageType.DATA_PAGE:
  146. num_values = daph.num_values
  147. values_read += num_values
  148. if ph.type == PageType.DICTIONARY_PAGE:
  149. pass
  150. #num_values = diph.num_values
  151. encoding_type = None
  152. def_level_encoding = None
  153. rep_level_encoding = None
  154. if daph:
  155. encoding_type = _get_name(Encoding, daph.encoding)
  156. def_level_encoding = _get_name(
  157. Encoding, daph.definition_level_encoding)
  158. rep_level_encoding = _get_name(
  159. Encoding, daph.repetition_level_encoding)
  160. println(" page header: type={type} "
  161. "uncompressed_size={raw_bytes} "
  162. "num_values={num_values} encoding={encoding} "
  163. "def_level_encoding={def_level_encoding} "
  164. "rep_level_encoding={rep_level_encoding}".format(
  165. type=type_,
  166. raw_bytes=raw_bytes,
  167. num_values=num_values,
  168. encoding=encoding_type,
  169. def_level_encoding=def_level_encoding,
  170. rep_level_encoding=rep_level_encoding))
  171. def _read_page(fo, page_header, column_metadata):
  172. """Internal function to read the data page from the given file-object
  173. and convert it to raw, uncompressed bytes (if necessary)."""
  174. bytes_from_file = fo.read(page_header.compressed_page_size)
  175. codec = column_metadata.codec
  176. if codec is not None and codec != CompressionCodec.UNCOMPRESSED:
  177. if column_metadata.codec == CompressionCodec.SNAPPY:
  178. raw_bytes = snappy.decompress(bytes_from_file)
  179. elif column_metadata.codec == CompressionCodec.GZIP:
  180. io_obj = StringIO.StringIO(bytes_from_file)
  181. with gzip.GzipFile(fileobj=io_obj, mode='rb') as f:
  182. raw_bytes = f.read()
  183. else:
  184. raise ParquetFormatException(
  185. "Unsupported Codec: {0}".format(codec))
  186. else:
  187. raw_bytes = bytes_from_file
  188. logger.debug(
  189. "Read page with compression type {0}. Bytes {1} -> {2}".format(
  190. _get_name(CompressionCodec, codec),
  191. page_header.compressed_page_size,
  192. page_header.uncompressed_page_size))
  193. assert len(raw_bytes) == page_header.uncompressed_page_size, \
  194. "found {0} raw bytes (expected {1})".format(
  195. len(raw_bytes),
  196. page_header.uncompressed_page_size)
  197. return raw_bytes
  198. def _read_data(fo, fo_encoding, value_count, bit_width):
  199. """Internal method to read data from the file-object using the given
  200. encoding. The data could be definition levels, repetition levels, or
  201. actual values.
  202. """
  203. vals = []
  204. if fo_encoding == Encoding.RLE:
  205. seen = 0
  206. while seen < value_count:
  207. values = encoding.read_rle_bit_packed_hybrid(fo, bit_width)
  208. if values is None:
  209. break # EOF was reached.
  210. vals += values
  211. seen += len(values)
  212. elif fo_encoding == Encoding.BIT_PACKED:
  213. raise NotImplementedError("Bit packing not yet supported")
  214. return vals
  215. def read_data_page(fo, schema_helper, page_header, column_metadata,
  216. dictionary):
  217. """Reads the datapage from the given file-like object based upon the
  218. metadata in the schema_helper, page_header, column_metadata, and
  219. (optional) dictionary. Returns a list of values.
  220. """
  221. daph = page_header.data_page_header
  222. raw_bytes = _read_page(fo, page_header, column_metadata)
  223. io_obj = StringIO.StringIO(raw_bytes)
  224. vals = []
  225. logger.debug(" definition_level_encoding: %s",
  226. _get_name(Encoding, daph.definition_level_encoding))
  227. logger.debug(" repetition_level_encoding: %s",
  228. _get_name(Encoding, daph.repetition_level_encoding))
  229. logger.debug(" encoding: %s", _get_name(Encoding, daph.encoding))
  230. # definition levels are skipped if data is required.
  231. if not schema_helper.is_required(column_metadata.path_in_schema[-1]):
  232. max_definition_level = schema_helper.max_definition_level(
  233. column_metadata.path_in_schema)
  234. bit_width = encoding.width_from_max_int(max_definition_level)
  235. logger.debug(" max def level: %s bit_width: %s",
  236. max_definition_level, bit_width)
  237. if bit_width == 0:
  238. definition_levels = [0] * daph.num_values
  239. else:
  240. definition_levels = _read_data(io_obj,
  241. daph.definition_level_encoding,
  242. daph.num_values,
  243. bit_width)
  244. logger.debug(" Definition levels: %s",
  245. ",".join([str(dl) for dl in definition_levels]))
  246. # repetition levels are skipped if data is at the first level.
  247. if len(column_metadata.path_in_schema) > 1:
  248. max_repetition_level = schema_helper.max_repetition_level(
  249. column_metadata.path_in_schema)
  250. bit_width = encoding.width_from_max_int(max_repetition_level)
  251. repetition_levels = _read_data(io_obj,
  252. daph.repetition_level_encoding,
  253. daph.num_values)
  254. # TODO Actually use the definition and repetition levels.
  255. if daph.encoding == Encoding.PLAIN:
  256. for i in range(daph.num_values):
  257. vals.append(
  258. encoding.read_plain(io_obj, column_metadata.type, None))
  259. logger.debug(" Values: %s", ",".join([str(x) for x in vals]))
  260. elif daph.encoding == Encoding.PLAIN_DICTIONARY:
  261. # bit_width is stored as single byte.
  262. bit_width = struct.unpack("<B", io_obj.read(1))[0]
  263. logger.debug("bit_width: %d", bit_width)
  264. total_seen = 0
  265. dict_values_bytes = io_obj.read()
  266. dict_values_io_obj = StringIO.StringIO(dict_values_bytes)
  267. # TODO jcrobak -- not sure that this loop is needed?
  268. while total_seen < daph.num_values:
  269. values = encoding.read_rle_bit_packed_hybrid(
  270. dict_values_io_obj, bit_width, len(dict_values_bytes))
  271. vals += [dictionary[v] for v in values]
  272. total_seen += len(values)
  273. else:
  274. raise ParquetFormatException("Unsupported encoding: %s",
  275. _get_name(Encoding, daph.encoding))
  276. return vals
  277. def read_dictionary_page(fo, page_header, column_metadata):
  278. raw_bytes = _read_page(fo, page_header, column_metadata)
  279. io_obj = StringIO.StringIO(raw_bytes)
  280. dict_items = []
  281. while io_obj.tell() < len(raw_bytes):
  282. # TODO - length for fixed byte array
  283. dict_items.append(
  284. encoding.read_plain(io_obj, column_metadata.type, None))
  285. return dict_items
  286. def dump(filename, options, out=sys.stdout):
  287. def println(value):
  288. out.write(value + "\n")
  289. footer = read_footer(filename)
  290. schema_helper = schema.SchemaHelper(footer.schema)
  291. total_count = 0
  292. for rg in footer.row_groups:
  293. res = defaultdict(list)
  294. row_group_rows = rg.num_rows
  295. for idx, cg in enumerate(rg.columns):
  296. dict_items = []
  297. cmd = cg.meta_data
  298. # skip if the list of columns is specified and this isn't in it
  299. if options.col and not ".".join(cmd.path_in_schema) in options.col:
  300. continue
  301. with open(filename, 'rb') as fo:
  302. offset = _get_offset(cmd)
  303. fo.seek(offset, 0)
  304. values_seen = 0
  305. logger.debug("reading column chunk of type: %s",
  306. _get_name(Type, cmd.type))
  307. while values_seen < row_group_rows:
  308. ph = _read_page_header(fo)
  309. logger.debug("Reading page (type=%s, "
  310. "uncompressed=%s bytes, "
  311. "compressed=%s bytes)",
  312. _get_name(PageType, ph.type),
  313. ph.uncompressed_page_size,
  314. ph.compressed_page_size)
  315. if ph.type == PageType.DATA_PAGE:
  316. values = read_data_page(fo, schema_helper, ph, cmd,
  317. dict_items)
  318. res[".".join(cmd.path_in_schema)] += values
  319. values_seen += cmd.num_values
  320. elif ph.type == PageType.DICTIONARY_PAGE:
  321. logger.debug(ph)
  322. assert dict_items == []
  323. dict_items = read_dictionary_page(fo, ph, cmd)
  324. logger.debug("Dictionary: %s", str(dict_items))
  325. else:
  326. logger.warn("Skipping unknown page type={0}".format(
  327. _get_name(PageType, ph.type)))
  328. keys = options.col if options.col else [s.name for s in
  329. footer.schema if s.name in res]
  330. if options.format == "csv" and not options.no_headers:
  331. println("\t".join(keys))
  332. for i in range(rg.num_rows):
  333. if options.limit != -1 and i + total_count >= options.limit:
  334. return
  335. if options.format == "csv":
  336. println("\t".join(str(res[k][i]) for k in keys))
  337. elif options.format == "json":
  338. println(json.dumps(dict([(k, res[k][i]) for k in keys])))
  339. total_count += rg.num_rows