import gzip import json import logging import struct import StringIO import sys from collections import defaultdict from ttypes import (FileMetaData, CompressionCodec, Encoding, FieldRepetitionType, PageHeader, PageType, Type) from thrift.protocol import TCompactProtocol from thrift.transport import TTransport import encoding import schema logger = logging.getLogger("parquet") try: import snappy except ImportError: logger.warn( "Couldn't import snappy. Support for snappy compression disabled.") class ParquetFormatException(Exception): pass def _check_header_magic_bytes(fo): "Returns true if the file-like obj has the PAR1 magic bytes at the header" fo.seek(0, 0) magic = fo.read(4) return magic == 'PAR1' def _check_footer_magic_bytes(fo): "Returns true if the file-like obj has the PAR1 magic bytes at the footer" fo.seek(-4, 2) # seek to four bytes from the end of the file magic = fo.read(4) return magic == 'PAR1' def _get_footer_size(fo): "Readers the footer size in bytes, which is serialized as little endian" fo.seek(-8, 2) tup = struct.unpack(" 0: for kv in footer.key_value_metadata: println(" {0}={1}".format(kv.key, kv.value)) else: println(" (none)") println(" schema: ") for se in footer.schema: println(" {name} ({type}): length={type_length}, " "repetition={repetition_type}, " "children={num_children}, " "converted_type={converted_type}".format( name=se.name, type=Type._VALUES_TO_NAMES[se.type] if se.type else None, type_length=se.type_length, repetition_type=_get_name(FieldRepetitionType, se.repetition_type), num_children=se.num_children, converted_type=se.converted_type)) if show_row_group_metadata: println(" row groups: ") for rg in footer.row_groups: num_rows = rg.num_rows bytes = rg.total_byte_size println(" rows={num_rows}, bytes={bytes}".format(num_rows=num_rows, bytes=bytes)) println(" chunks:") for cg in rg.columns: cmd = cg.meta_data println(" type={type} file_offset={offset} " "compression={codec} " "encodings={encodings} path_in_schema={path_in_schema} " "num_values={num_values} uncompressed_bytes={raw_bytes} " "compressed_bytes={compressed_bytes} " "data_page_offset={data_page_offset} " "dictionary_page_offset={dictionary_page_offset}".format( type=cmd.type, offset=cg.file_offset, codec=_get_name(CompressionCodec, cmd.codec), encodings=",".join( [_get_name(Encoding, s) for s in cmd.encodings]), path_in_schema=cmd.path_in_schema, num_values=cmd.num_values, raw_bytes=cmd.total_uncompressed_size, compressed_bytes=cmd.total_compressed_size, data_page_offset=cmd.data_page_offset, dictionary_page_offset=cmd.dictionary_page_offset)) with open(filename, 'rb') as fo: offset = _get_offset(cmd) fo.seek(offset, 0) values_read = 0 println(" pages: ") while values_read < num_rows: ph = _read_page_header(fo) # seek past current page. fo.seek(ph.compressed_page_size, 1) daph = ph.data_page_header type_ = _get_name(PageType, ph.type) raw_bytes = ph.uncompressed_page_size num_values = None if ph.type == PageType.DATA_PAGE: num_values = daph.num_values values_read += num_values if ph.type == PageType.DICTIONARY_PAGE: pass #num_values = diph.num_values encoding_type = None def_level_encoding = None rep_level_encoding = None if daph: encoding_type = _get_name(Encoding, daph.encoding) def_level_encoding = _get_name(Encoding, daph.definition_level_encoding) rep_level_encoding = _get_name(Encoding, daph.repetition_level_encoding) println(" page header: type={type} " "uncompressed_size={raw_bytes} " "num_values={num_values} encoding={encoding} " "def_level_encoding={def_level_encoding} " "rep_level_encoding={rep_level_encoding}".format( type=type_, raw_bytes=raw_bytes, num_values=num_values, encoding=encoding_type, def_level_encoding=def_level_encoding, rep_level_encoding=rep_level_encoding)) def _read_page(fo, page_header, column_metadata): """Internal function to read the data page from the given file-object and convert it to raw, uncompressed bytes (if necessary).""" bytes_from_file = fo.read(page_header.compressed_page_size) codec = column_metadata.codec if codec is not None and codec != CompressionCodec.UNCOMPRESSED: if column_metadata.codec == CompressionCodec.SNAPPY: raw_bytes = snappy.decompress(bytes_from_file) elif column_metadata.codec == CompressionCodec.GZIP: io_obj = StringIO.StringIO(bytes_from_file) with gzip.GzipFile(fileobj=io_obj, mode='rb') as f: raw_bytes = f.read() else: raise ParquetFormatException( "Unsupported Codec: {0}".format(codec)) else: raw_bytes = bytes_from_file return raw_bytes def _read_data(fo, fo_encoding, value_count, bit_width): """Internal method to read data from the file-object using the given encoding. The data could be definition levels, repetition levels, or actual values. """ vals = [] if fo_encoding == Encoding.RLE: seen = 0 while seen < value_count: values = encoding.read_rle_bit_packed_hybrid(fo, bit_width) if values is None: break # EOF was reached. vals += values seen += len(values) elif fo_encoding == Encoding.BIT_PACKED: raise NotImplementedError("Bit packing not yet supported") return vals def read_data_page(fo, schema_helper, page_header, column_metadata, dictionary): """Reads the datapage from the given file-like object based upon the metadata in the schema_helper, page_header, column_metadata, and (optional) dictionary. Returns a list of values. """ daph = page_header.data_page_header raw_bytes = _read_page(fo, page_header, column_metadata) io_obj = StringIO.StringIO(raw_bytes) vals = [] logger.debug(" definition_level_encoding: %s", _get_name(Encoding, daph.definition_level_encoding)) logger.debug(" repetition_level_encoding: %s", _get_name(Encoding, daph.repetition_level_encoding)) logger.debug(" encoding: %s", _get_name(Encoding, daph.encoding)) # definition levels are skipped if data is required. if not schema_helper.is_required(column_metadata.path_in_schema[-1]): max_definition_level = schema_helper.max_definition_level( column_metadata.path_in_schema) bit_width = encoding.width_from_max_int(max_definition_level) logger.debug(" max def level: %s bit_width: %s", max_definition_level, bit_width) if bit_width == 0: definition_levels = [0] * daph.num_values else: definition_levels = _read_data(io_obj, daph.definition_level_encoding, daph.num_values, bit_width) logger.debug(" Definition levels: %s", ",".join([str(dl) for dl in definition_levels])) # repetition levels are skipped if data is at the first level. if len(column_metadata.path_in_schema) > 1: max_repetition_level = schema_helper.max_repetition_level( column_metadata.path_in_schema) bit_width = encoding.width_from_max_int(max_repetition_level) repetition_levels = _read_data(io_obj, daph.repetition_level_encoding, daph.num_values) # TODO Actually use the definition and repetition levels. if daph.encoding == Encoding.PLAIN: for i in range(daph.num_values): vals.append( encoding.read_plain(io_obj, column_metadata.type, None)) logger.debug(" Values: %s", ",".join([str(x) for x in vals])) elif daph.encoding == Encoding.PLAIN_DICTIONARY: # bit_width is stored as single byte. bit_width = struct.unpack("= options.limit: return if options.format == "csv": println("\t".join(str(res[k][i]) for k in keys)) elif options.format == "json": println(json.dumps(dict([(k, res[k][i]) for k in keys]))) total_count += rg.num_rows