Browse Source

Merge upstream history from jcrobak/parquet-python as a subtree.

==
Add 'desktop/core/ext-py/parquet-python/' from commit 'b7284a412ca0a053d97f780e690efdcc7955e5d6'

git-subtree-dir: desktop/core/ext-py/parquet-python
git-subtree-mainline: fc2f0085d1d8df277d7f8e128d898cafaf1d4248
git-subtree-split: b7284a412ca0a053d97f780e690efdcc7955e5d6
Matías Javier Rossi 9 years ago
parent
commit
ffcf82bc08

+ 6 - 0
desktop/core/ext-py/parquet-python/.gitignore

@@ -0,0 +1,6 @@
+*.pyc
+.coverage
+cover
+build
+dist
+parquet.egg-info

+ 177 - 0
desktop/core/ext-py/parquet-python/LICENSE

@@ -0,0 +1,177 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS

+ 33 - 0
desktop/core/ext-py/parquet-python/README.md

@@ -0,0 +1,33 @@
+# parquet-python
+
+parquet-python is a pure-python implementation (currently with only read-support) of the [parquet format](https://github.com/Parquet/parquet-format). It comes with a script for reading parquet files and outputting the data to stdout as JSON or TSV (without the overhead of JVM startup). Performance has not yet been optimized, but it's useful for debugging and quick viewing of data in files.
+
+Not all parts of the parquet-format have been implemented yet or tested e.g. nested data and the deprecated bit-packing encoding -- see Todos below for a full list. With that said, parquet-python is capable of reading all the data files from the [parquet-compatability](https://github.com/Parquet/parquet-compatibility) project.
+
+
+# requirements
+
+parquet-python has been tested on python 2.7. It depends on `thrift` (0.9) and `python-snappy` (for snappy compressed files).
+
+
+# getting started
+
+parquet-python is not yet uploaded to PyPi as the code has a lot of bugs. To get started, clone the project, change into the parquet-python directory, and run `python -m parquet`.
+
+You may need to install the `thrift` and `python-snappy` projects with `easy_install` or `pip`. To install parquet-python system-wide, run `python setup.py install`.
+
+
+# Todos
+
+* Support the deprecated bitpacking
+* Support for bitwidths > 8
+* Fix handling of repetition-levels and definition-levels
+* Tests for nested schemas, null data
+* Support reading of data from HDFS via snakebite and/or webhdfs.
+* Implement writing
+* performance evaluation and optimization (i.e. how does it compare to the c++, java implementations)
+
+
+# Contributing
+
+Is done via Pull Requests. Please include tests with your changes and follow [pep8](http://www.python.org/dev/peps/pep-0008/).

+ 392 - 0
desktop/core/ext-py/parquet-python/parquet/__init__.py

@@ -0,0 +1,392 @@
+import gzip
+import json
+import logging
+import struct
+import cStringIO
+import sys
+from collections import defaultdict
+from ttypes import (FileMetaData, CompressionCodec, Encoding,
+                    FieldRepetitionType, PageHeader, PageType, Type)
+from thrift.protocol import TCompactProtocol
+from thrift.transport import TTransport
+import encoding
+import schema
+
+
+logger = logging.getLogger("parquet")
+
+try:
+    import snappy
+except ImportError:
+    logger.warn(
+        "Couldn't import snappy. Support for snappy compression disabled.")
+
+
+class ParquetFormatException(Exception):
+    pass
+
+
+def _check_header_magic_bytes(fo):
+    "Returns true if the file-like obj has the PAR1 magic bytes at the header"
+    fo.seek(0, 0)
+    magic = fo.read(4)
+    return magic == 'PAR1'
+
+
+def _check_footer_magic_bytes(fo):
+    "Returns true if the file-like obj has the PAR1 magic bytes at the footer"
+    fo.seek(-4, 2)  # seek to four bytes from the end of the file
+    magic = fo.read(4)
+    return magic == 'PAR1'
+
+
+def _get_footer_size(fo):
+    "Readers the footer size in bytes, which is serialized as little endian"
+    fo.seek(-8, 2)
+    tup = struct.unpack("<i", fo.read(4))
+    return tup[0]
+
+
+def _read_footer(fo):
+    """Reads the footer from the given file object, returning a FileMetaData
+    object. This method assumes that the fo references a valid parquet file"""
+    footer_size = _get_footer_size(fo)
+    logger.debug("Footer size in bytes: %s", footer_size)
+    fo.seek(-(8 + footer_size), 2)  # seek to beginning of footer
+    tin = TTransport.TFileObjectTransport(fo)
+    pin = TCompactProtocol.TCompactProtocol(tin)
+    fmd = FileMetaData()
+    fmd.read(pin)
+    return fmd
+
+
+def _read_page_header(fo):
+    """Reads the page_header from the given fo"""
+    tin = TTransport.TFileObjectTransport(fo)
+    pin = TCompactProtocol.TCompactProtocol(tin)
+    ph = PageHeader()
+    ph.read(pin)
+    return ph
+
+
+def read_footer(filename):
+    """Reads and returns the FileMetaData object for the given file."""
+    with open(filename, 'rb') as fo:
+        if not _check_header_magic_bytes(fo) or \
+           not _check_footer_magic_bytes(fo):
+            raise ParquetFormatException("{0} is not a valid parquet file "
+                                         "(missing magic bytes)"
+                                         .format(filename))
+        return _read_footer(fo)
+
+
+def _get_name(type_, value):
+    """Returns the name for the given value of the given type_ unless value is
+    None, in which case it returns empty string"""
+    return type_._VALUES_TO_NAMES[value] if value is not None else "None"
+
+
+def _get_offset(cmd):
+    """Returns the offset into the cmd based upon if it's a dictionary page or
+    a data page"""
+    dict_offset = cmd.dictionary_page_offset
+    data_offset = cmd.data_page_offset
+    if dict_offset is None or data_offset < dict_offset:
+        return data_offset
+    return dict_offset
+
+
+def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
+    def println(value):
+        out.write(value + "\n")
+    footer = read_footer(filename)
+    println("File Metadata: {0}".format(filename))
+    println("  Version: {0}".format(footer.version))
+    println("  Num Rows: {0}".format(footer.num_rows))
+    println("  k/v metadata: ")
+    if footer.key_value_metadata and len(footer.key_value_metadata) > 0:
+        for kv in footer.key_value_metadata:
+            println("    {0}={1}".format(kv.key, kv.value))
+    else:
+        println("    (none)")
+    println("  schema: ")
+    for se in footer.schema:
+        println("    {name} ({type}): length={type_length}, "
+                "repetition={repetition_type}, "
+                "children={num_children}, "
+                "converted_type={converted_type}".format(
+                    name=se.name,
+                    type=Type._VALUES_TO_NAMES[se.type] if se.type else None,
+                    type_length=se.type_length,
+                    repetition_type=_get_name(FieldRepetitionType,
+                                              se.repetition_type),
+                    num_children=se.num_children,
+                    converted_type=se.converted_type))
+    if show_row_group_metadata:
+        println("  row groups: ")
+        for rg in footer.row_groups:
+            num_rows = rg.num_rows
+            bytes = rg.total_byte_size
+            println(
+                "  rows={num_rows}, bytes={bytes}".format(num_rows=num_rows,
+                                                          bytes=bytes))
+            println("    chunks:")
+            for cg in rg.columns:
+                cmd = cg.meta_data
+                println("      type={type} file_offset={offset} "
+                        "compression={codec} "
+                        "encodings={encodings} path_in_schema={path_in_schema} "
+                        "num_values={num_values} uncompressed_bytes={raw_bytes} "
+                        "compressed_bytes={compressed_bytes} "
+                        "data_page_offset={data_page_offset} "
+                        "dictionary_page_offset={dictionary_page_offset}".format(
+                            type=_get_name(Type, cmd.type),
+                            offset=cg.file_offset,
+                            codec=_get_name(CompressionCodec, cmd.codec),
+                            encodings=",".join(
+                                [_get_name(
+                                    Encoding, s) for s in cmd.encodings]),
+                            path_in_schema=cmd.path_in_schema,
+                            num_values=cmd.num_values,
+                            raw_bytes=cmd.total_uncompressed_size,
+                            compressed_bytes=cmd.total_compressed_size,
+                            data_page_offset=cmd.data_page_offset,
+                            dictionary_page_offset=cmd.dictionary_page_offset))
+                with open(filename, 'rb') as fo:
+                    offset = _get_offset(cmd)
+                    fo.seek(offset, 0)
+                    values_read = 0
+                    println("      pages: ")
+                    while values_read < num_rows:
+                        ph = _read_page_header(fo)
+                        # seek past current page.
+                        fo.seek(ph.compressed_page_size, 1)
+                        daph = ph.data_page_header
+                        type_ = _get_name(PageType, ph.type)
+                        raw_bytes = ph.uncompressed_page_size
+                        num_values = None
+                        if ph.type == PageType.DATA_PAGE:
+                            num_values = daph.num_values
+                            values_read += num_values
+                        if ph.type == PageType.DICTIONARY_PAGE:
+                            pass
+                            #num_values = diph.num_values
+
+                        encoding_type = None
+                        def_level_encoding = None
+                        rep_level_encoding = None
+                        if daph:
+                            encoding_type = _get_name(Encoding, daph.encoding)
+                            def_level_encoding = _get_name(
+                                Encoding, daph.definition_level_encoding)
+                            rep_level_encoding = _get_name(
+                                Encoding, daph.repetition_level_encoding)
+
+                        println("        page header: type={type} "
+                                "uncompressed_size={raw_bytes} "
+                                "num_values={num_values} encoding={encoding} "
+                                "def_level_encoding={def_level_encoding} "
+                                "rep_level_encoding={rep_level_encoding}".format(
+                                    type=type_,
+                                    raw_bytes=raw_bytes,
+                                    num_values=num_values,
+                                    encoding=encoding_type,
+                                    def_level_encoding=def_level_encoding,
+                                    rep_level_encoding=rep_level_encoding))
+
+
+def _read_page(fo, page_header, column_metadata):
+    """Internal function to read the data page from the given file-object
+    and convert it to raw, uncompressed bytes (if necessary)."""
+    bytes_from_file = fo.read(page_header.compressed_page_size)
+    codec = column_metadata.codec
+    if codec is not None and codec != CompressionCodec.UNCOMPRESSED:
+        if column_metadata.codec == CompressionCodec.SNAPPY:
+            raw_bytes = snappy.decompress(bytes_from_file)
+        elif column_metadata.codec == CompressionCodec.GZIP:
+            io_obj = cStringIO.StringIO(bytes_from_file)
+            with gzip.GzipFile(fileobj=io_obj, mode='rb') as f:
+                raw_bytes = f.read()
+        else:
+            raise ParquetFormatException(
+                "Unsupported Codec: {0}".format(codec))
+    else:
+        raw_bytes = bytes_from_file
+    logger.debug(
+        "Read page with compression type {0}. Bytes {1} -> {2}".format(
+        _get_name(CompressionCodec, codec),
+        page_header.compressed_page_size,
+        page_header.uncompressed_page_size))
+    assert len(raw_bytes) == page_header.uncompressed_page_size, \
+        "found {0} raw bytes (expected {1})".format(
+            len(raw_bytes),
+            page_header.uncompressed_page_size)
+    return raw_bytes
+
+
+def _read_data(fo, fo_encoding, value_count, bit_width):
+    """Internal method to read data from the file-object using the given
+    encoding. The data could be definition levels, repetition levels, or
+    actual values.
+    """
+    vals = []
+    if fo_encoding == Encoding.RLE:
+        seen = 0
+        while seen < value_count:
+            values = encoding.read_rle_bit_packed_hybrid(fo, bit_width)
+            if values is None:
+                break  # EOF was reached.
+            vals += values
+            seen += len(values)
+    elif fo_encoding == Encoding.BIT_PACKED:
+        raise NotImplementedError("Bit packing not yet supported")
+
+    return vals
+
+
+def read_data_page(fo, schema_helper, page_header, column_metadata,
+                   dictionary):
+    """Reads the datapage from the given file-like object based upon the
+    metadata in the schema_helper, page_header, column_metadata, and
+    (optional) dictionary. Returns a list of values.
+    """
+    daph = page_header.data_page_header
+    raw_bytes = _read_page(fo, page_header, column_metadata)
+    io_obj = cStringIO.StringIO(raw_bytes)
+    vals = []
+
+    logger.debug("  definition_level_encoding: %s",
+                 _get_name(Encoding, daph.definition_level_encoding))
+    logger.debug("  repetition_level_encoding: %s",
+                 _get_name(Encoding, daph.repetition_level_encoding))
+    logger.debug("  encoding: %s", _get_name(Encoding, daph.encoding))
+
+    # definition levels are skipped if data is required.
+    if not schema_helper.is_required(column_metadata.path_in_schema[-1]):
+        max_definition_level = schema_helper.max_definition_level(
+            column_metadata.path_in_schema)
+        bit_width = encoding.width_from_max_int(max_definition_level)
+        logger.debug("  max def level: %s   bit_width: %s",
+                     max_definition_level, bit_width)
+        if bit_width == 0:
+            definition_levels = [0] * daph.num_values
+        else:
+            definition_levels = _read_data(io_obj,
+                                           daph.definition_level_encoding,
+                                           daph.num_values,
+                                           bit_width)
+
+        logger.debug("  Definition levels: %s", len(definition_levels))
+
+    # repetition levels are skipped if data is at the first level.
+    if len(column_metadata.path_in_schema) > 1:
+        max_repetition_level = schema_helper.max_repetition_level(
+            column_metadata.path_in_schema)
+        bit_width = encoding.width_from_max_int(max_repetition_level)
+        repetition_levels = _read_data(io_obj,
+                                       daph.repetition_level_encoding,
+                                       daph.num_values)
+
+    # TODO Actually use the definition and repetition levels.
+
+    if daph.encoding == Encoding.PLAIN:
+        for i in range(daph.num_values):
+            vals.append(
+                encoding.read_plain(io_obj, column_metadata.type, None))
+        logger.debug("  Values: %s", len(vals))
+    elif daph.encoding == Encoding.PLAIN_DICTIONARY:
+        # bit_width is stored as single byte.
+        bit_width = struct.unpack("<B", io_obj.read(1))[0]
+        logger.debug("bit_width: %d", bit_width)
+        total_seen = 0
+        dict_values_bytes = io_obj.read()
+        dict_values_io_obj = cStringIO.StringIO(dict_values_bytes)
+        # TODO jcrobak -- not sure that this loop is needed?
+        while total_seen < daph.num_values:
+            values = encoding.read_rle_bit_packed_hybrid(
+                dict_values_io_obj, bit_width, len(dict_values_bytes))
+            if len(values) + total_seen > daph.num_values:
+                values = values[0: daph.num_values - total_seen]
+            vals += [dictionary[v] for v in values]
+            total_seen += len(values)
+    else:
+        raise ParquetFormatException("Unsupported encoding: %s",
+                                     _get_name(Encoding, daph.encoding))
+    return vals
+
+
+def read_dictionary_page(fo, page_header, column_metadata):
+    raw_bytes = _read_page(fo, page_header, column_metadata)
+    io_obj = cStringIO.StringIO(raw_bytes)
+    dict_items = []
+    while io_obj.tell() < len(raw_bytes):
+        # TODO - length for fixed byte array
+        dict_items.append(
+            encoding.read_plain(io_obj, column_metadata.type, None))
+    return dict_items
+
+
+def _dump(fo, options, out=sys.stdout):
+    def println(value):
+        out.write(value + "\n")
+
+    footer = _read_footer(fo)
+    schema_helper = schema.SchemaHelper(footer.schema)
+    total_count = 0
+    for rg in footer.row_groups:
+        res = defaultdict(list)
+        row_group_rows = rg.num_rows
+        for idx, cg in enumerate(rg.columns):
+            dict_items = []
+            cmd = cg.meta_data
+            # skip if the list of columns is specified and this isn't in it
+            if options.col and not ".".join(cmd.path_in_schema) in options.col:
+                continue
+
+            offset = _get_offset(cmd)
+            fo.seek(offset, 0)
+            values_seen = 0
+            logger.debug("reading column chunk of type: %s",
+                         _get_name(Type, cmd.type))
+            while values_seen < row_group_rows:
+                ph = _read_page_header(fo)
+                logger.debug("Reading page (type=%s, "
+                             "uncompressed=%s bytes, "
+                             "compressed=%s bytes)",
+                             _get_name(PageType, ph.type),
+                             ph.uncompressed_page_size,
+                             ph.compressed_page_size)
+
+                if ph.type == PageType.DATA_PAGE:
+                    values = read_data_page(fo, schema_helper, ph, cmd,
+                                            dict_items)
+                    res[".".join(cmd.path_in_schema)] += values
+                    values_seen += ph.data_page_header.num_values
+                elif ph.type == PageType.DICTIONARY_PAGE:
+                    logger.debug(ph)
+                    assert dict_items == []
+                    dict_items = read_dictionary_page(fo, ph, cmd)
+                    logger.debug("Dictionary: %s", str(dict_items))
+                else:
+                    logger.warn("Skipping unknown page type={0}".format(
+                        _get_name(PageType, ph.type)))
+        keys = options.col if options.col else [s.name for s in
+                                                footer.schema if s.name in res]
+        if options.format == 'custom':
+            custom_datatype = out(res, keys)
+            return custom_datatype
+        if options.format == "csv" and not options.no_headers:
+            println("\t".join(keys))
+        for i in range(rg.num_rows):
+            if options.limit != -1 and i + total_count >= options.limit:
+                return
+            if options.format == "csv":
+                println("\t".join(str(res[k][i]) for k in keys))
+            elif options.format == "json":
+                println(json.dumps(dict([(k, res[k][i]) for k in keys])))
+        total_count += rg.num_rows
+
+
+def dump(filename, options, out=sys.stdout):
+    with open(filename, 'rb') as fo:
+        return _dump(fo, options=options, out=out)

+ 54 - 0
desktop/core/ext-py/parquet-python/parquet/__main__.py

@@ -0,0 +1,54 @@
+import argparse
+import logging
+import sys
+
+
+def setup_logging(options=None):
+    level = logging.DEBUG if options is not None and options.debug \
+        else logging.WARNING
+    console = logging.StreamHandler()
+    console.setLevel(level)
+    formatter = logging.Formatter('%(name)s: %(levelname)-8s %(message)s')
+    console.setFormatter(formatter)
+    logging.getLogger('parquet').addHandler(console)
+
+
+def main(argv=None):
+    argv = argv or sys.argv[1:]
+
+    parser = argparse.ArgumentParser('parquet',
+                                     description='Read parquet files')
+    parser.add_argument('--metadata', action='store_true',
+                        help='show metadata on file')
+    parser.add_argument('--row-group-metadata', action='store_true',
+                        help="show per row group metadata")
+    parser.add_argument('--no-data', action='store_true',
+                        help="don't dump any data from the file")
+    parser.add_argument('--limit', action='store', type=int, default=-1,
+                        help='max records to output')
+    parser.add_argument('--col', action='append', type=str,
+                        help='only include this column (can be '
+                             'specified multiple times)')
+    parser.add_argument('--no-headers', action='store_true',
+                        help='skip headers in output (only applies if '
+                             'format=csv)')
+    parser.add_argument('--format', action='store', type=str, default='csv',
+                        help='format for the output data. can be csv or json.')
+    parser.add_argument('--debug', action='store_true',
+                        help='log debug info to stderr')
+    parser.add_argument('file',
+                        help='path to the file to parse')
+
+    args = parser.parse_args(argv)
+
+    setup_logging(args)
+
+    import parquet
+
+    if args.metadata:
+        parquet.dump_metadata(args.file, args.row_group_metadata)
+    if not args.no_data:
+        parquet.dump(args.file, args)
+
+if __name__ == '__main__':
+    main()

+ 20 - 0
desktop/core/ext-py/parquet-python/parquet/bitstring.py

@@ -0,0 +1,20 @@
+
+SINGLE_BIT_MASK =  [1 << x for x in range(7, -1, -1)]
+
+class BitString(object):
+
+	def __init__(self, bytes, length=None, offset=None):
+		self.bytes = bytes
+		self.offset = offset if offset is not None else 0
+		self.length = length if length is not None else 8 * len(data) - self.offset 
+
+
+	def __getitem__(self, key):
+		try:
+			start = key.start
+			stop = key.stop
+		except AttributeError:
+			if key < 0 or key >= length:
+				raise IndexError()
+			byte_index, bit_offset = (divmod(self.offset + key), 8)
+			return self.bytes[byte_index] & SINGLE_BIT_MASK[bit_offset]

+ 11 - 0
desktop/core/ext-py/parquet-python/parquet/constants.py

@@ -0,0 +1,11 @@
+#
+# Autogenerated by Thrift Compiler (0.9.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TException, TApplicationException
+from ttypes import *
+

+ 219 - 0
desktop/core/ext-py/parquet-python/parquet/encoding.py

@@ -0,0 +1,219 @@
+import array
+import math
+import struct
+import cStringIO
+import logging
+
+from ttypes import Type
+
+logger = logging.getLogger("parquet")
+
+
+def read_plain_boolean(fo):
+    """Reads a boolean using the plain encoding"""
+    raise NotImplemented
+
+
+def read_plain_int32(fo):
+    """Reads a 32-bit int using the plain encoding"""
+    tup = struct.unpack("<i", fo.read(4))
+    return tup[0]
+
+
+def read_plain_int64(fo):
+    """Reads a 64-bit int using the plain encoding"""
+    tup = struct.unpack("<q", fo.read(8))
+    return tup[0]
+
+
+def read_plain_int96(fo):
+    """Reads a 96-bit int using the plain encoding"""
+    tup = struct.unpack("<qi", fo.read(12))
+    return tup[0] << 32 | tup[1]
+
+
+def read_plain_float(fo):
+    """Reads a 32-bit float using the plain encoding"""
+    tup = struct.unpack("<f", fo.read(4))
+    return tup[0]
+
+
+def read_plain_double(fo):
+    """Reads a 64-bit float (double) using the plain encoding"""
+    tup = struct.unpack("<d", fo.read(8))
+    return tup[0]
+
+
+def read_plain_byte_array(fo):
+    """Reads a byte array using the plain encoding"""
+    length = read_plain_int32(fo)
+    return fo.read(length)
+
+
+def read_plain_byte_array_fixed(fo, fixed_length):
+    """Reads a byte array of the given fixed_length"""
+    return fo.read(fixed_length)
+
+DECODE_PLAIN = {
+    Type.BOOLEAN: read_plain_boolean,
+    Type.INT32: read_plain_int32,
+    Type.INT64: read_plain_int64,
+    Type.INT96: read_plain_int96,
+    Type.FLOAT: read_plain_float,
+    Type.DOUBLE: read_plain_double,
+    Type.BYTE_ARRAY: read_plain_byte_array,
+    Type.FIXED_LEN_BYTE_ARRAY: read_plain_byte_array_fixed
+}
+
+
+def read_plain(fo, type_, type_length):
+    conv = DECODE_PLAIN[type_]
+    if type_ == Type.FIXED_LEN_BYTE_ARRAY:
+        return conv(fo, type_length)
+    return conv(fo)
+
+
+def read_unsigned_var_int(fo):
+    result = 0
+    shift = 0
+    while True:
+        byte = struct.unpack("<B", fo.read(1))[0]
+        result |= ((byte & 0x7F) << shift)
+        if (byte & 0x80) == 0:
+            break
+        shift += 7
+    return result
+
+
+def byte_width(bit_width):
+    "Returns the byte width for the given bit_width"
+    return (bit_width + 7) / 8
+
+
+def read_rle(fo, header, bit_width):
+    """Read a run-length encoded run from the given fo with the given header
+    and bit_width.
+
+    The count is determined from the header and the width is used to grab the
+    value that's repeated. Yields the value repeated count times.
+    """
+    count = header >> 1
+    zero_data = "\x00\x00\x00\x00"
+    data = ""
+    width = byte_width(bit_width)
+    if width >= 1:
+        data += fo.read(1)
+    if width >= 2:
+        data += fo.read(1)
+    if width >= 3:
+        data += fo.read(1)
+    if width == 4:
+        data += fo.read(1)
+    data = data + zero_data[len(data):]
+    value = struct.unpack("<i", data)[0]
+    logger.debug("Read RLE group with value %s of byte-width %s and count %s",
+                 value, width, count)
+    for i in range(count):
+        yield value
+
+
+def width_from_max_int(value):
+    """Converts the value specified to a bit_width."""
+    return int(math.ceil(math.log(value + 1, 2)))
+
+
+def _mask_for_bits(i):
+    """Helper function for read_bitpacked to generage a mask to grab i bits."""
+    return (1 << i) - 1
+
+
+def read_bitpacked(fo, header, width):
+    """Reads a bitpacked run of the rle/bitpack hybrid.
+
+    Supports width >8 (crossing bytes).
+    """
+    num_groups = header >> 1
+    count = num_groups * 8
+    byte_count = (width * count)/8
+    logger.debug("Reading a bit-packed run with: %s groups, count %s, bytes %s",
+        num_groups, count, byte_count)
+    raw_bytes = array.array('B', fo.read(byte_count)).tolist()
+    current_byte = 0
+    b = raw_bytes[current_byte]
+    mask = _mask_for_bits(width)
+    bits_wnd_l = 8
+    bits_wnd_r = 0
+    res = []
+    total = len(raw_bytes)*8;
+    while (total >= width):
+        # TODO zero-padding could produce extra zero-values
+        logger.debug("  read bitpacked: width=%s window=(%s %s) b=%s,"
+                     " current_byte=%s",
+                     width, bits_wnd_l, bits_wnd_r, bin(b), current_byte)
+        if bits_wnd_r >= 8:
+            bits_wnd_r -= 8
+            bits_wnd_l -= 8
+            b >>= 8
+        elif bits_wnd_l - bits_wnd_r >= width:
+            res.append((b >> bits_wnd_r) & mask)
+            total -= width
+            bits_wnd_r += width
+            logger.debug("  read bitpackage: added: %s", res[-1])
+        elif current_byte + 1 < len(raw_bytes):
+            current_byte += 1
+            b |= (raw_bytes[current_byte] << bits_wnd_l)
+            bits_wnd_l += 8
+    return res
+
+
+def read_bitpacked_deprecated(fo, byte_count, count, width):
+    raw_bytes = array.array('B', fo.read(byte_count)).tolist()
+
+    mask = _mask_for_bits(width)
+    index = 0
+    res = []
+    word = 0
+    bits_in_word = 0
+    while len(res) < count and index <= len(raw_bytes):
+        logger.debug("index = %d", index)
+        logger.debug("bits in word = %d", bits_in_word)
+        logger.debug("word = %s", bin(word))
+        if bits_in_word >= width:
+            # how many bits over the value is stored
+            offset = (bits_in_word - width)
+            logger.debug("offset = %d", offset)
+
+            # figure out the value
+            value = (word & (mask << offset)) >> offset
+            logger.debug("value = %d (%s)", value, bin(value))
+            res.append(value)
+
+            bits_in_word -= width
+        else:
+            word = (word << 8) | raw_bytes[index]
+            index += 1
+            bits_in_word += 8
+    return res
+
+
+def read_rle_bit_packed_hybrid(fo, width, length=None):
+    """Implemenation of a decoder for the rel/bit-packed hybrid encoding.
+
+    If length is not specified, then a 32-bit int is read first to grab the
+    length of the encoded data.
+    """
+    io_obj = fo
+    if length is None:
+        length = read_plain_int32(fo)
+        raw_bytes = fo.read(length)
+        if raw_bytes == '':
+            return None
+        io_obj = cStringIO.StringIO(raw_bytes)
+    res = []
+    while io_obj.tell() < length:
+        header = read_unsigned_var_int(io_obj)
+        if header & 1 == 0:
+            res += read_rle(io_obj, header, width)
+        else:
+            res += read_bitpacked(io_obj, header, width)
+    return res

+ 39 - 0
desktop/core/ext-py/parquet-python/parquet/schema.py

@@ -0,0 +1,39 @@
+"""Utils for working with the parquet thrift models"""
+
+from ttypes import FieldRepetitionType
+
+
+class SchemaHelper(object):
+
+    def __init__(self, schema_elements):
+        self.schema_elements = schema_elements
+        self.schema_elements_by_name = dict(
+            [(se.name, se) for se in schema_elements])
+        assert len(self.schema_elements) == len(self.schema_elements_by_name)
+
+    def schema_element(self, name):
+        """Get the schema element with the given name."""
+        return self.schema_elements_by_name[name]
+
+    def is_required(self, name):
+        """Returns true iff the schema element with the given name is
+        required"""
+        return self.schema_element(name).repetition_type == FieldRepetitionType.REQUIRED
+
+    def max_repetition_level(self, path):
+        """get the max repetition level for the given schema path."""
+        max_level = 0
+        for part in path:
+            se = self.schema_element(part)
+            if se.repetition_type == FieldRepetitionType.REQUIRED:
+                max_level += 1
+        return max_level
+
+    def max_definition_level(self, path):
+        """get the max definition level for the given schema path."""
+        max_level = 0
+        for part in path:
+            se = self.schema_element(part)
+            if se.repetition_type != FieldRepetitionType.REQUIRED:
+                max_level += 1
+        return max_level

+ 1424 - 0
desktop/core/ext-py/parquet-python/parquet/ttypes.py

@@ -0,0 +1,1424 @@
+#
+# Autogenerated by Thrift Compiler (0.9.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TException, TApplicationException
+
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol, TProtocol
+try:
+  from thrift.protocol import fastbinary
+except:
+  fastbinary = None
+
+
+class Type:
+  """
+  Types supported by Parquet.  These types are intended to be used in combination
+  with the encodings to control the on disk storage format.
+  For example INT16 is not included as a type since a good encoding of INT32
+  would handle this.
+  """
+  BOOLEAN = 0
+  INT32 = 1
+  INT64 = 2
+  INT96 = 3
+  FLOAT = 4
+  DOUBLE = 5
+  BYTE_ARRAY = 6
+  FIXED_LEN_BYTE_ARRAY = 7
+
+  _VALUES_TO_NAMES = {
+    0: "BOOLEAN",
+    1: "INT32",
+    2: "INT64",
+    3: "INT96",
+    4: "FLOAT",
+    5: "DOUBLE",
+    6: "BYTE_ARRAY",
+    7: "FIXED_LEN_BYTE_ARRAY",
+  }
+
+  _NAMES_TO_VALUES = {
+    "BOOLEAN": 0,
+    "INT32": 1,
+    "INT64": 2,
+    "INT96": 3,
+    "FLOAT": 4,
+    "DOUBLE": 5,
+    "BYTE_ARRAY": 6,
+    "FIXED_LEN_BYTE_ARRAY": 7,
+  }
+
+class ConvertedType:
+  """
+  Common types used by frameworks(e.g. hive, pig) using parquet.  This helps map
+  between types in those frameworks to the base types in parquet.  This is only
+  metadata and not needed to read or write the data.
+  """
+  UTF8 = 0
+  MAP = 1
+  MAP_KEY_VALUE = 2
+  LIST = 3
+
+  _VALUES_TO_NAMES = {
+    0: "UTF8",
+    1: "MAP",
+    2: "MAP_KEY_VALUE",
+    3: "LIST",
+  }
+
+  _NAMES_TO_VALUES = {
+    "UTF8": 0,
+    "MAP": 1,
+    "MAP_KEY_VALUE": 2,
+    "LIST": 3,
+  }
+
+class FieldRepetitionType:
+  """
+  Representation of Schemas
+  """
+  REQUIRED = 0
+  OPTIONAL = 1
+  REPEATED = 2
+
+  _VALUES_TO_NAMES = {
+    0: "REQUIRED",
+    1: "OPTIONAL",
+    2: "REPEATED",
+  }
+
+  _NAMES_TO_VALUES = {
+    "REQUIRED": 0,
+    "OPTIONAL": 1,
+    "REPEATED": 2,
+  }
+
+class Encoding:
+  """
+  Encodings supported by Parquet.  Not all encodings are valid for all types.  These
+  enums are also used to specify the encoding of definition and repetition levels.
+  See the accompanying doc for the details of the more complicated encodings.
+  """
+  PLAIN = 0
+  GROUP_VAR_INT = 1
+  PLAIN_DICTIONARY = 2
+  RLE = 3
+  BIT_PACKED = 4
+
+  _VALUES_TO_NAMES = {
+    0: "PLAIN",
+    1: "GROUP_VAR_INT",
+    2: "PLAIN_DICTIONARY",
+    3: "RLE",
+    4: "BIT_PACKED",
+  }
+
+  _NAMES_TO_VALUES = {
+    "PLAIN": 0,
+    "GROUP_VAR_INT": 1,
+    "PLAIN_DICTIONARY": 2,
+    "RLE": 3,
+    "BIT_PACKED": 4,
+  }
+
+class CompressionCodec:
+  """
+  Supported compression algorithms.
+  """
+  UNCOMPRESSED = 0
+  SNAPPY = 1
+  GZIP = 2
+  LZO = 3
+
+  _VALUES_TO_NAMES = {
+    0: "UNCOMPRESSED",
+    1: "SNAPPY",
+    2: "GZIP",
+    3: "LZO",
+  }
+
+  _NAMES_TO_VALUES = {
+    "UNCOMPRESSED": 0,
+    "SNAPPY": 1,
+    "GZIP": 2,
+    "LZO": 3,
+  }
+
+class PageType:
+  DATA_PAGE = 0
+  INDEX_PAGE = 1
+  DICTIONARY_PAGE = 2
+
+  _VALUES_TO_NAMES = {
+    0: "DATA_PAGE",
+    1: "INDEX_PAGE",
+    2: "DICTIONARY_PAGE",
+  }
+
+  _NAMES_TO_VALUES = {
+    "DATA_PAGE": 0,
+    "INDEX_PAGE": 1,
+    "DICTIONARY_PAGE": 2,
+  }
+
+
+class SchemaElement:
+  """
+  Represents a element inside a schema definition.
+   - if it is a group (inner node) then type is undefined and num_children is defined
+   - if it is a primitive type (leaf) then type is defined and num_children is undefined
+  the nodes are listed in depth first traversal order.
+
+  Attributes:
+   - type: Data type for this field. Not set if the current element is a non-leaf node
+   - type_length: If type is FIXED_LEN_BYTE_ARRAY, this is the byte length of the vales.
+  Otherwise, if specified, this is the maximum bit length to store any of the values.
+  (e.g. a low cardinality INT col could have this set to 3).  Note that this is
+  in the schema, and therefore fixed for the entire file.
+   - repetition_type: repetition of the field. The root of the schema does not have a repetition_type.
+  All other nodes must have one
+   - name: Name of the field in the schema
+   - num_children: Nested fields.  Since thrift does not support nested fields,
+  the nesting is flattened to a single list by a depth-first traversal.
+  The children count is used to construct the nested relationship.
+  This field is not set when the element is a primitive type
+   - converted_type: When the schema is the result of a conversion from another model
+  Used to record the original type to help with cross conversion.
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.I32, 'type', None, None, ), # 1
+    (2, TType.I32, 'type_length', None, None, ), # 2
+    (3, TType.I32, 'repetition_type', None, None, ), # 3
+    (4, TType.STRING, 'name', None, None, ), # 4
+    (5, TType.I32, 'num_children', None, None, ), # 5
+    (6, TType.I32, 'converted_type', None, None, ), # 6
+  )
+
+  def __init__(self, type=None, type_length=None, repetition_type=None, name=None, num_children=None, converted_type=None,):
+    self.type = type
+    self.type_length = type_length
+    self.repetition_type = repetition_type
+    self.name = name
+    self.num_children = num_children
+    self.converted_type = converted_type
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.I32:
+          self.type = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.type_length = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I32:
+          self.repetition_type = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.STRING:
+          self.name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.I32:
+          self.num_children = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.I32:
+          self.converted_type = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('SchemaElement')
+    if self.type is not None:
+      oprot.writeFieldBegin('type', TType.I32, 1)
+      oprot.writeI32(self.type)
+      oprot.writeFieldEnd()
+    if self.type_length is not None:
+      oprot.writeFieldBegin('type_length', TType.I32, 2)
+      oprot.writeI32(self.type_length)
+      oprot.writeFieldEnd()
+    if self.repetition_type is not None:
+      oprot.writeFieldBegin('repetition_type', TType.I32, 3)
+      oprot.writeI32(self.repetition_type)
+      oprot.writeFieldEnd()
+    if self.name is not None:
+      oprot.writeFieldBegin('name', TType.STRING, 4)
+      oprot.writeString(self.name)
+      oprot.writeFieldEnd()
+    if self.num_children is not None:
+      oprot.writeFieldBegin('num_children', TType.I32, 5)
+      oprot.writeI32(self.num_children)
+      oprot.writeFieldEnd()
+    if self.converted_type is not None:
+      oprot.writeFieldBegin('converted_type', TType.I32, 6)
+      oprot.writeI32(self.converted_type)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.name is None:
+      raise TProtocol.TProtocolException(message='Required field name is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class DataPageHeader:
+  """
+  Data page header
+
+  Attributes:
+   - num_values: Number of values, including NULLs, in this data page. *
+   - encoding: Encoding used for this data page *
+   - definition_level_encoding: Encoding used for definition levels *
+   - repetition_level_encoding: Encoding used for repetition levels *
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.I32, 'num_values', None, None, ), # 1
+    (2, TType.I32, 'encoding', None, None, ), # 2
+    (3, TType.I32, 'definition_level_encoding', None, None, ), # 3
+    (4, TType.I32, 'repetition_level_encoding', None, None, ), # 4
+  )
+
+  def __init__(self, num_values=None, encoding=None, definition_level_encoding=None, repetition_level_encoding=None,):
+    self.num_values = num_values
+    self.encoding = encoding
+    self.definition_level_encoding = definition_level_encoding
+    self.repetition_level_encoding = repetition_level_encoding
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.I32:
+          self.num_values = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.encoding = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I32:
+          self.definition_level_encoding = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.I32:
+          self.repetition_level_encoding = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('DataPageHeader')
+    if self.num_values is not None:
+      oprot.writeFieldBegin('num_values', TType.I32, 1)
+      oprot.writeI32(self.num_values)
+      oprot.writeFieldEnd()
+    if self.encoding is not None:
+      oprot.writeFieldBegin('encoding', TType.I32, 2)
+      oprot.writeI32(self.encoding)
+      oprot.writeFieldEnd()
+    if self.definition_level_encoding is not None:
+      oprot.writeFieldBegin('definition_level_encoding', TType.I32, 3)
+      oprot.writeI32(self.definition_level_encoding)
+      oprot.writeFieldEnd()
+    if self.repetition_level_encoding is not None:
+      oprot.writeFieldBegin('repetition_level_encoding', TType.I32, 4)
+      oprot.writeI32(self.repetition_level_encoding)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.num_values is None:
+      raise TProtocol.TProtocolException(message='Required field num_values is unset!')
+    if self.encoding is None:
+      raise TProtocol.TProtocolException(message='Required field encoding is unset!')
+    if self.definition_level_encoding is None:
+      raise TProtocol.TProtocolException(message='Required field definition_level_encoding is unset!')
+    if self.repetition_level_encoding is None:
+      raise TProtocol.TProtocolException(message='Required field repetition_level_encoding is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class IndexPageHeader:
+
+  thrift_spec = (
+  )
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('IndexPageHeader')
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class DictionaryPageHeader:
+  """
+  TODO: *
+
+  Attributes:
+   - num_values: Number of values in the dictionary *
+   - encoding: Encoding using this dictionary page *
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.I32, 'num_values', None, None, ), # 1
+    (2, TType.I32, 'encoding', None, None, ), # 2
+  )
+
+  def __init__(self, num_values=None, encoding=None,):
+    self.num_values = num_values
+    self.encoding = encoding
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.I32:
+          self.num_values = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.encoding = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('DictionaryPageHeader')
+    if self.num_values is not None:
+      oprot.writeFieldBegin('num_values', TType.I32, 1)
+      oprot.writeI32(self.num_values)
+      oprot.writeFieldEnd()
+    if self.encoding is not None:
+      oprot.writeFieldBegin('encoding', TType.I32, 2)
+      oprot.writeI32(self.encoding)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.num_values is None:
+      raise TProtocol.TProtocolException(message='Required field num_values is unset!')
+    if self.encoding is None:
+      raise TProtocol.TProtocolException(message='Required field encoding is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class PageHeader:
+  """
+  Attributes:
+   - type: the type of the page: indicates which of the *_header fields is set *
+   - uncompressed_page_size: Uncompressed page size in bytes (not including this header) *
+   - compressed_page_size: Compressed page size in bytes (not including this header) *
+   - crc: 32bit crc for the data below. This allows for disabling checksumming in HDFS
+  if only a few pages needs to be read
+
+   - data_page_header
+   - index_page_header
+   - dictionary_page_header
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.I32, 'type', None, None, ), # 1
+    (2, TType.I32, 'uncompressed_page_size', None, None, ), # 2
+    (3, TType.I32, 'compressed_page_size', None, None, ), # 3
+    (4, TType.I32, 'crc', None, None, ), # 4
+    (5, TType.STRUCT, 'data_page_header', (DataPageHeader, DataPageHeader.thrift_spec), None, ), # 5
+    (6, TType.STRUCT, 'index_page_header', (IndexPageHeader, IndexPageHeader.thrift_spec), None, ), # 6
+    (7, TType.STRUCT, 'dictionary_page_header', (DictionaryPageHeader, DictionaryPageHeader.thrift_spec), None, ), # 7
+  )
+
+  def __init__(self, type=None, uncompressed_page_size=None, compressed_page_size=None, crc=None, data_page_header=None, index_page_header=None, dictionary_page_header=None,):
+    self.type = type
+    self.uncompressed_page_size = uncompressed_page_size
+    self.compressed_page_size = compressed_page_size
+    self.crc = crc
+    self.data_page_header = data_page_header
+    self.index_page_header = index_page_header
+    self.dictionary_page_header = dictionary_page_header
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.I32:
+          self.type = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.uncompressed_page_size = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I32:
+          self.compressed_page_size = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.I32:
+          self.crc = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.STRUCT:
+          self.data_page_header = DataPageHeader()
+          self.data_page_header.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.STRUCT:
+          self.index_page_header = IndexPageHeader()
+          self.index_page_header.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.STRUCT:
+          self.dictionary_page_header = DictionaryPageHeader()
+          self.dictionary_page_header.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('PageHeader')
+    if self.type is not None:
+      oprot.writeFieldBegin('type', TType.I32, 1)
+      oprot.writeI32(self.type)
+      oprot.writeFieldEnd()
+    if self.uncompressed_page_size is not None:
+      oprot.writeFieldBegin('uncompressed_page_size', TType.I32, 2)
+      oprot.writeI32(self.uncompressed_page_size)
+      oprot.writeFieldEnd()
+    if self.compressed_page_size is not None:
+      oprot.writeFieldBegin('compressed_page_size', TType.I32, 3)
+      oprot.writeI32(self.compressed_page_size)
+      oprot.writeFieldEnd()
+    if self.crc is not None:
+      oprot.writeFieldBegin('crc', TType.I32, 4)
+      oprot.writeI32(self.crc)
+      oprot.writeFieldEnd()
+    if self.data_page_header is not None:
+      oprot.writeFieldBegin('data_page_header', TType.STRUCT, 5)
+      self.data_page_header.write(oprot)
+      oprot.writeFieldEnd()
+    if self.index_page_header is not None:
+      oprot.writeFieldBegin('index_page_header', TType.STRUCT, 6)
+      self.index_page_header.write(oprot)
+      oprot.writeFieldEnd()
+    if self.dictionary_page_header is not None:
+      oprot.writeFieldBegin('dictionary_page_header', TType.STRUCT, 7)
+      self.dictionary_page_header.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.type is None:
+      raise TProtocol.TProtocolException(message='Required field type is unset!')
+    if self.uncompressed_page_size is None:
+      raise TProtocol.TProtocolException(message='Required field uncompressed_page_size is unset!')
+    if self.compressed_page_size is None:
+      raise TProtocol.TProtocolException(message='Required field compressed_page_size is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class KeyValue:
+  """
+  Wrapper struct to store key values
+
+  Attributes:
+   - key
+   - value
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'key', None, None, ), # 1
+    (2, TType.STRING, 'value', None, None, ), # 2
+  )
+
+  def __init__(self, key=None, value=None,):
+    self.key = key
+    self.value = value
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.key = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.value = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('KeyValue')
+    if self.key is not None:
+      oprot.writeFieldBegin('key', TType.STRING, 1)
+      oprot.writeString(self.key)
+      oprot.writeFieldEnd()
+    if self.value is not None:
+      oprot.writeFieldBegin('value', TType.STRING, 2)
+      oprot.writeString(self.value)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.key is None:
+      raise TProtocol.TProtocolException(message='Required field key is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class SortingColumn:
+  """
+  Wrapper struct to specify sort order
+
+  Attributes:
+   - nulls_first: The column index (in this row group)
+  1: required i32 column_idx
+
+  /** If true, indicates this column is sorted in descending order.
+  2: required bool descending
+
+  /** If true, nulls will come before non-null values, otherwise,
+   * nulls go at the end.
+  """
+
+  thrift_spec = (
+    None, # 0
+    None, # 1
+    None, # 2
+    (3, TType.BOOL, 'nulls_first', None, None, ), # 3
+  )
+
+  def __init__(self, nulls_first=None,):
+    self.nulls_first = nulls_first
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 3:
+        if ftype == TType.BOOL:
+          self.nulls_first = iprot.readBool();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('SortingColumn')
+    if self.nulls_first is not None:
+      oprot.writeFieldBegin('nulls_first', TType.BOOL, 3)
+      oprot.writeBool(self.nulls_first)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.nulls_first is None:
+      raise TProtocol.TProtocolException(message='Required field nulls_first is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class ColumnMetaData:
+  """
+  Description for column metadata
+
+  Attributes:
+   - type: Type of this column *
+   - encodings: Set of all encodings used for this column. The purpose is to validate
+  whether we can decode those pages. *
+   - path_in_schema: Path in schema *
+   - codec: Compression codec *
+   - num_values: Number of values in this column *
+   - total_uncompressed_size: total byte size of all uncompressed pages in this column chunk (including the headers) *
+   - total_compressed_size: total byte size of all compressed pages in this column chunk (including the headers) *
+   - key_value_metadata: Optional key/value metadata *
+   - data_page_offset: Byte offset from beginning of file to first data page *
+   - index_page_offset: Byte offset from beginning of file to root index page *
+   - dictionary_page_offset: Byte offset from the beginning of file to first (only) dictionary page *
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.I32, 'type', None, None, ), # 1
+    (2, TType.LIST, 'encodings', (TType.I32,None), None, ), # 2
+    (3, TType.LIST, 'path_in_schema', (TType.STRING,None), None, ), # 3
+    (4, TType.I32, 'codec', None, None, ), # 4
+    (5, TType.I64, 'num_values', None, None, ), # 5
+    (6, TType.I64, 'total_uncompressed_size', None, None, ), # 6
+    (7, TType.I64, 'total_compressed_size', None, None, ), # 7
+    (8, TType.LIST, 'key_value_metadata', (TType.STRUCT,(KeyValue, KeyValue.thrift_spec)), None, ), # 8
+    (9, TType.I64, 'data_page_offset', None, None, ), # 9
+    (10, TType.I64, 'index_page_offset', None, None, ), # 10
+    (11, TType.I64, 'dictionary_page_offset', None, None, ), # 11
+  )
+
+  def __init__(self, type=None, encodings=None, path_in_schema=None, codec=None, num_values=None, total_uncompressed_size=None, total_compressed_size=None, key_value_metadata=None, data_page_offset=None, index_page_offset=None, dictionary_page_offset=None,):
+    self.type = type
+    self.encodings = encodings
+    self.path_in_schema = path_in_schema
+    self.codec = codec
+    self.num_values = num_values
+    self.total_uncompressed_size = total_uncompressed_size
+    self.total_compressed_size = total_compressed_size
+    self.key_value_metadata = key_value_metadata
+    self.data_page_offset = data_page_offset
+    self.index_page_offset = index_page_offset
+    self.dictionary_page_offset = dictionary_page_offset
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.I32:
+          self.type = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.LIST:
+          self.encodings = []
+          (_etype3, _size0) = iprot.readListBegin()
+          for _i4 in xrange(_size0):
+            _elem5 = iprot.readI32();
+            self.encodings.append(_elem5)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.LIST:
+          self.path_in_schema = []
+          (_etype9, _size6) = iprot.readListBegin()
+          for _i10 in xrange(_size6):
+            _elem11 = iprot.readString();
+            self.path_in_schema.append(_elem11)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.I32:
+          self.codec = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.I64:
+          self.num_values = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.I64:
+          self.total_uncompressed_size = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.I64:
+          self.total_compressed_size = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 8:
+        if ftype == TType.LIST:
+          self.key_value_metadata = []
+          (_etype15, _size12) = iprot.readListBegin()
+          for _i16 in xrange(_size12):
+            _elem17 = KeyValue()
+            _elem17.read(iprot)
+            self.key_value_metadata.append(_elem17)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 9:
+        if ftype == TType.I64:
+          self.data_page_offset = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 10:
+        if ftype == TType.I64:
+          self.index_page_offset = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 11:
+        if ftype == TType.I64:
+          self.dictionary_page_offset = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('ColumnMetaData')
+    if self.type is not None:
+      oprot.writeFieldBegin('type', TType.I32, 1)
+      oprot.writeI32(self.type)
+      oprot.writeFieldEnd()
+    if self.encodings is not None:
+      oprot.writeFieldBegin('encodings', TType.LIST, 2)
+      oprot.writeListBegin(TType.I32, len(self.encodings))
+      for iter18 in self.encodings:
+        oprot.writeI32(iter18)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.path_in_schema is not None:
+      oprot.writeFieldBegin('path_in_schema', TType.LIST, 3)
+      oprot.writeListBegin(TType.STRING, len(self.path_in_schema))
+      for iter19 in self.path_in_schema:
+        oprot.writeString(iter19)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.codec is not None:
+      oprot.writeFieldBegin('codec', TType.I32, 4)
+      oprot.writeI32(self.codec)
+      oprot.writeFieldEnd()
+    if self.num_values is not None:
+      oprot.writeFieldBegin('num_values', TType.I64, 5)
+      oprot.writeI64(self.num_values)
+      oprot.writeFieldEnd()
+    if self.total_uncompressed_size is not None:
+      oprot.writeFieldBegin('total_uncompressed_size', TType.I64, 6)
+      oprot.writeI64(self.total_uncompressed_size)
+      oprot.writeFieldEnd()
+    if self.total_compressed_size is not None:
+      oprot.writeFieldBegin('total_compressed_size', TType.I64, 7)
+      oprot.writeI64(self.total_compressed_size)
+      oprot.writeFieldEnd()
+    if self.key_value_metadata is not None:
+      oprot.writeFieldBegin('key_value_metadata', TType.LIST, 8)
+      oprot.writeListBegin(TType.STRUCT, len(self.key_value_metadata))
+      for iter20 in self.key_value_metadata:
+        iter20.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.data_page_offset is not None:
+      oprot.writeFieldBegin('data_page_offset', TType.I64, 9)
+      oprot.writeI64(self.data_page_offset)
+      oprot.writeFieldEnd()
+    if self.index_page_offset is not None:
+      oprot.writeFieldBegin('index_page_offset', TType.I64, 10)
+      oprot.writeI64(self.index_page_offset)
+      oprot.writeFieldEnd()
+    if self.dictionary_page_offset is not None:
+      oprot.writeFieldBegin('dictionary_page_offset', TType.I64, 11)
+      oprot.writeI64(self.dictionary_page_offset)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.type is None:
+      raise TProtocol.TProtocolException(message='Required field type is unset!')
+    if self.encodings is None:
+      raise TProtocol.TProtocolException(message='Required field encodings is unset!')
+    if self.path_in_schema is None:
+      raise TProtocol.TProtocolException(message='Required field path_in_schema is unset!')
+    if self.codec is None:
+      raise TProtocol.TProtocolException(message='Required field codec is unset!')
+    if self.num_values is None:
+      raise TProtocol.TProtocolException(message='Required field num_values is unset!')
+    if self.total_uncompressed_size is None:
+      raise TProtocol.TProtocolException(message='Required field total_uncompressed_size is unset!')
+    if self.total_compressed_size is None:
+      raise TProtocol.TProtocolException(message='Required field total_compressed_size is unset!')
+    if self.data_page_offset is None:
+      raise TProtocol.TProtocolException(message='Required field data_page_offset is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class ColumnChunk:
+  """
+  Attributes:
+   - file_path: File where column data is stored.  If not set, assumed to be same file as
+  metadata.  This path is relative to the current file.
+
+   - file_offset: Byte offset in file_path to the ColumnMetaData *
+   - meta_data: Column metadata for this chunk. This is the same content as what is at
+  file_path/file_offset.  Having it here has it replicated in the file
+  metadata.
+
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'file_path', None, None, ), # 1
+    (2, TType.I64, 'file_offset', None, None, ), # 2
+    (3, TType.STRUCT, 'meta_data', (ColumnMetaData, ColumnMetaData.thrift_spec), None, ), # 3
+  )
+
+  def __init__(self, file_path=None, file_offset=None, meta_data=None,):
+    self.file_path = file_path
+    self.file_offset = file_offset
+    self.meta_data = meta_data
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.file_path = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I64:
+          self.file_offset = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRUCT:
+          self.meta_data = ColumnMetaData()
+          self.meta_data.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('ColumnChunk')
+    if self.file_path is not None:
+      oprot.writeFieldBegin('file_path', TType.STRING, 1)
+      oprot.writeString(self.file_path)
+      oprot.writeFieldEnd()
+    if self.file_offset is not None:
+      oprot.writeFieldBegin('file_offset', TType.I64, 2)
+      oprot.writeI64(self.file_offset)
+      oprot.writeFieldEnd()
+    if self.meta_data is not None:
+      oprot.writeFieldBegin('meta_data', TType.STRUCT, 3)
+      self.meta_data.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.file_offset is None:
+      raise TProtocol.TProtocolException(message='Required field file_offset is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class RowGroup:
+  """
+  Attributes:
+   - columns
+   - total_byte_size: Total byte size of all the uncompressed column data in this row group *
+   - num_rows: Number of rows in this row group *
+   - sorting_columns: If set, specifies a sort ordering of the rows in this RowGroup.
+  The sorting columns can be a subset of all the columns.
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'columns', (TType.STRUCT,(ColumnChunk, ColumnChunk.thrift_spec)), None, ), # 1
+    (2, TType.I64, 'total_byte_size', None, None, ), # 2
+    (3, TType.I64, 'num_rows', None, None, ), # 3
+    (4, TType.LIST, 'sorting_columns', (TType.STRUCT,(SortingColumn, SortingColumn.thrift_spec)), None, ), # 4
+  )
+
+  def __init__(self, columns=None, total_byte_size=None, num_rows=None, sorting_columns=None,):
+    self.columns = columns
+    self.total_byte_size = total_byte_size
+    self.num_rows = num_rows
+    self.sorting_columns = sorting_columns
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.columns = []
+          (_etype24, _size21) = iprot.readListBegin()
+          for _i25 in xrange(_size21):
+            _elem26 = ColumnChunk()
+            _elem26.read(iprot)
+            self.columns.append(_elem26)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I64:
+          self.total_byte_size = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I64:
+          self.num_rows = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.LIST:
+          self.sorting_columns = []
+          (_etype30, _size27) = iprot.readListBegin()
+          for _i31 in xrange(_size27):
+            _elem32 = SortingColumn()
+            _elem32.read(iprot)
+            self.sorting_columns.append(_elem32)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('RowGroup')
+    if self.columns is not None:
+      oprot.writeFieldBegin('columns', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.columns))
+      for iter33 in self.columns:
+        iter33.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.total_byte_size is not None:
+      oprot.writeFieldBegin('total_byte_size', TType.I64, 2)
+      oprot.writeI64(self.total_byte_size)
+      oprot.writeFieldEnd()
+    if self.num_rows is not None:
+      oprot.writeFieldBegin('num_rows', TType.I64, 3)
+      oprot.writeI64(self.num_rows)
+      oprot.writeFieldEnd()
+    if self.sorting_columns is not None:
+      oprot.writeFieldBegin('sorting_columns', TType.LIST, 4)
+      oprot.writeListBegin(TType.STRUCT, len(self.sorting_columns))
+      for iter34 in self.sorting_columns:
+        iter34.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.columns is None:
+      raise TProtocol.TProtocolException(message='Required field columns is unset!')
+    if self.total_byte_size is None:
+      raise TProtocol.TProtocolException(message='Required field total_byte_size is unset!')
+    if self.num_rows is None:
+      raise TProtocol.TProtocolException(message='Required field num_rows is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class FileMetaData:
+  """
+  Description for file metadata
+
+  Attributes:
+   - version: Version of this file *
+   - schema: Parquet schema for this file.  This schema contains metadata for all the columns.
+  The schema is represented as a tree with a single root.  The nodes of the tree
+  are flattened to a list by doing a depth-first traversal.
+  The column metadata contains the path in the schema for that column which can be
+  used to map columns to nodes in the schema.
+  The first element is the root *
+   - num_rows: Number of rows in this file *
+   - row_groups: Row groups in this file *
+   - key_value_metadata: Optional key/value metadata *
+   - created_by: String for application that wrote this file.  This should be in the format
+  <Application> version <App Version> (build <App Build Hash>).
+  e.g. impala version 1.0 (build 6cf94d29b2b7115df4de2c06e2ab4326d721eb55)
+
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.I32, 'version', None, None, ), # 1
+    (2, TType.LIST, 'schema', (TType.STRUCT,(SchemaElement, SchemaElement.thrift_spec)), None, ), # 2
+    (3, TType.I64, 'num_rows', None, None, ), # 3
+    (4, TType.LIST, 'row_groups', (TType.STRUCT,(RowGroup, RowGroup.thrift_spec)), None, ), # 4
+    (5, TType.LIST, 'key_value_metadata', (TType.STRUCT,(KeyValue, KeyValue.thrift_spec)), None, ), # 5
+    (6, TType.STRING, 'created_by', None, None, ), # 6
+  )
+
+  def __init__(self, version=None, schema=None, num_rows=None, row_groups=None, key_value_metadata=None, created_by=None,):
+    self.version = version
+    self.schema = schema
+    self.num_rows = num_rows
+    self.row_groups = row_groups
+    self.key_value_metadata = key_value_metadata
+    self.created_by = created_by
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.I32:
+          self.version = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.LIST:
+          self.schema = []
+          (_etype38, _size35) = iprot.readListBegin()
+          for _i39 in xrange(_size35):
+            _elem40 = SchemaElement()
+            _elem40.read(iprot)
+            self.schema.append(_elem40)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I64:
+          self.num_rows = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.LIST:
+          self.row_groups = []
+          (_etype44, _size41) = iprot.readListBegin()
+          for _i45 in xrange(_size41):
+            _elem46 = RowGroup()
+            _elem46.read(iprot)
+            self.row_groups.append(_elem46)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.LIST:
+          self.key_value_metadata = []
+          (_etype50, _size47) = iprot.readListBegin()
+          for _i51 in xrange(_size47):
+            _elem52 = KeyValue()
+            _elem52.read(iprot)
+            self.key_value_metadata.append(_elem52)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.STRING:
+          self.created_by = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('FileMetaData')
+    if self.version is not None:
+      oprot.writeFieldBegin('version', TType.I32, 1)
+      oprot.writeI32(self.version)
+      oprot.writeFieldEnd()
+    if self.schema is not None:
+      oprot.writeFieldBegin('schema', TType.LIST, 2)
+      oprot.writeListBegin(TType.STRUCT, len(self.schema))
+      for iter53 in self.schema:
+        iter53.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.num_rows is not None:
+      oprot.writeFieldBegin('num_rows', TType.I64, 3)
+      oprot.writeI64(self.num_rows)
+      oprot.writeFieldEnd()
+    if self.row_groups is not None:
+      oprot.writeFieldBegin('row_groups', TType.LIST, 4)
+      oprot.writeListBegin(TType.STRUCT, len(self.row_groups))
+      for iter54 in self.row_groups:
+        iter54.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.key_value_metadata is not None:
+      oprot.writeFieldBegin('key_value_metadata', TType.LIST, 5)
+      oprot.writeListBegin(TType.STRUCT, len(self.key_value_metadata))
+      for iter55 in self.key_value_metadata:
+        iter55.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.created_by is not None:
+      oprot.writeFieldBegin('created_by', TType.STRING, 6)
+      oprot.writeString(self.created_by)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.version is None:
+      raise TProtocol.TProtocolException(message='Required field version is unset!')
+    if self.schema is None:
+      raise TProtocol.TProtocolException(message='Required field schema is unset!')
+    if self.num_rows is None:
+      raise TProtocol.TProtocolException(message='Required field num_rows is unset!')
+    if self.row_groups is None:
+      raise TProtocol.TProtocolException(message='Required field row_groups is unset!')
+    return
+
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)

+ 23 - 0
desktop/core/ext-py/parquet-python/setup.py

@@ -0,0 +1,23 @@
+try:
+    from setuptools import setup
+except ImportError:
+    from distutils.core import setup
+
+setup(name='parquet',
+    version='1.0',
+    description='Python support for Parquet file format',
+    author='Joe Crobak',
+    author_email='joecrow@gmail.com',
+    packages=[ 'parquet' ],
+    install_requires=[
+        'thrift',
+    ],
+    extras_require = {
+        'snappy support': ['python-snappy']
+    },
+    entry_points={
+        'console_scripts': [
+            'parquet = parquet.__main__:main',
+        ]
+    },
+)

BIN
desktop/core/ext-py/parquet-python/test-data/gzip-nation.impala.parquet


+ 25 - 0
desktop/core/ext-py/parquet-python/test-data/nation.csv

@@ -0,0 +1,25 @@
+0|ALGERIA|0| haggle. carefully final deposits detect slyly agai
+1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
+2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 
+3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold
+4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d
+5|ETHIOPIA|0|ven packages wake quickly. regu
+6|FRANCE|3|refully final requests. regular, ironi
+7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco
+8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun
+9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull
+10|IRAN|4|efully alongside of the slyly final dependencies. 
+11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula
+12|JAPAN|2|ously. final, express gifts cajole a
+13|JORDAN|4|ic deposits are blithely about the carefully regular pa
+14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t
+15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets?
+16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r
+17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun
+18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos
+19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account
+20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely
+21|VIETNAM|2|hely enticingly express accounts. even, final 
+22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint
+23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull
+24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be

BIN
desktop/core/ext-py/parquet-python/test-data/nation.dict.parquet


BIN
desktop/core/ext-py/parquet-python/test-data/nation.impala.parquet


BIN
desktop/core/ext-py/parquet-python/test-data/nation.plain.parquet


BIN
desktop/core/ext-py/parquet-python/test-data/snappy-nation.impala.parquet


+ 127 - 0
desktop/core/ext-py/parquet-python/test/test_encoding.py

@@ -0,0 +1,127 @@
+import array
+import struct
+import StringIO
+import unittest
+
+import parquet.encoding
+from parquet.ttypes import Type
+from nose import SkipTest
+
+
+class TestPlain(unittest.TestCase):
+
+    def test_int32(self):
+        self.assertEquals(
+            999,
+            parquet.encoding.read_plain_int32(
+                StringIO.StringIO(struct.pack("<i", 999))))
+
+    def test_int64(self):
+        self.assertEquals(
+            999,
+            parquet.encoding.read_plain_int64(
+                StringIO.StringIO(struct.pack("<q", 999))))
+
+    def test_int96(self):
+        self.assertEquals(
+            999,
+            parquet.encoding.read_plain_int96(
+                StringIO.StringIO(struct.pack("<qi", 0, 999))))
+
+    def test_float(self):
+        self.assertAlmostEquals(
+            9.99,
+            parquet.encoding.read_plain_float(
+                StringIO.StringIO(struct.pack("<f", 9.99))),
+            2)
+
+    def test_double(self):
+        self.assertEquals(
+            9.99,
+            parquet.encoding.read_plain_double(
+                StringIO.StringIO(struct.pack("<d", 9.99))))
+
+    def test_fixed(self):
+        data = "foobar"
+        fo = StringIO.StringIO(data)
+        self.assertEquals(
+            data[:3],
+            parquet.encoding.read_plain_byte_array_fixed(
+                fo, 3))
+        self.assertEquals(
+            data[3:],
+            parquet.encoding.read_plain_byte_array_fixed(
+                fo, 3))
+
+    def test_fixed_read_plain(self):
+        data = "foobar"
+        fo = StringIO.StringIO(data)
+        self.assertEquals(
+            data[:3],
+            parquet.encoding.read_plain(
+                fo, Type.FIXED_LEN_BYTE_ARRAY, 3))
+
+
+class TestRle(unittest.TestCase):
+
+    def testFourByteValue(self):
+        fo = StringIO.StringIO(struct.pack("<i", 1 << 30))
+        out = parquet.encoding.read_rle(fo, 2 << 1, 30)
+        self.assertEquals([1 << 30] * 2, list(out))
+
+
+class TestVarInt(unittest.TestCase):
+
+    def testSingleByte(self):
+        fo = StringIO.StringIO(struct.pack("<B", 0x7F))
+        out = parquet.encoding.read_unsigned_var_int(fo)
+        self.assertEquals(0x7F, out)
+
+    def testFourByte(self):
+        fo = StringIO.StringIO(struct.pack("<BBBB", 0xFF, 0xFF, 0xFF, 0x7F))
+        out = parquet.encoding.read_unsigned_var_int(fo)
+        self.assertEquals(0x0FFFFFFF, out)
+
+
+class TestBitPacked(unittest.TestCase):
+
+    def testFromExample(self):
+        raw_data_in = [0b10001000, 0b11000110, 0b11111010]
+        encoded_bitstring = array.array('B', raw_data_in).tostring()
+        fo = StringIO.StringIO(encoded_bitstring)
+        count = 3 << 1
+        res = parquet.encoding.read_bitpacked(fo, count, 3)
+        self.assertEquals(range(8), res)
+
+
+class TestBitPackedDeprecated(unittest.TestCase):
+
+    def testFromExample(self):
+        encoded_bitstring = array.array(
+            'B', [0b00000101, 0b00111001, 0b01110111]).tostring()
+        fo = StringIO.StringIO(encoded_bitstring)
+        res = parquet.encoding.read_bitpacked_deprecated(fo, 3, 8, 3)
+        self.assertEquals(range(8), res)
+
+
+class TestWidthFromMaxInt(unittest.TestCase):
+
+    def testWidths(self):
+        self.assertEquals(0, parquet.encoding.width_from_max_int(0))
+        self.assertEquals(1, parquet.encoding.width_from_max_int(1))
+        self.assertEquals(2, parquet.encoding.width_from_max_int(2))
+        self.assertEquals(2, parquet.encoding.width_from_max_int(3))
+        self.assertEquals(3, parquet.encoding.width_from_max_int(4))
+        self.assertEquals(3, parquet.encoding.width_from_max_int(5))
+        self.assertEquals(3, parquet.encoding.width_from_max_int(6))
+        self.assertEquals(3, parquet.encoding.width_from_max_int(7))
+        self.assertEquals(4, parquet.encoding.width_from_max_int(8))
+        self.assertEquals(4, parquet.encoding.width_from_max_int(15))
+        self.assertEquals(5, parquet.encoding.width_from_max_int(16))
+        self.assertEquals(5, parquet.encoding.width_from_max_int(31))
+        self.assertEquals(6, parquet.encoding.width_from_max_int(32))
+        self.assertEquals(6, parquet.encoding.width_from_max_int(63))
+        self.assertEquals(7, parquet.encoding.width_from_max_int(64))
+        self.assertEquals(7, parquet.encoding.width_from_max_int(127))
+        self.assertEquals(8, parquet.encoding.width_from_max_int(128))
+        self.assertEquals(8, parquet.encoding.width_from_max_int(255))

+ 166 - 0
desktop/core/ext-py/parquet-python/test/test_read_support.py

@@ -0,0 +1,166 @@
+import csv
+import json
+import os
+import StringIO
+import tempfile
+import unittest
+
+import parquet
+
+
+class TestFileFormat(unittest.TestCase):
+    def test_header_magic_bytes(self):
+        with tempfile.NamedTemporaryFile() as t:
+            t.write("PAR1_some_bogus_data")
+            t.flush()
+            self.assertTrue(parquet._check_header_magic_bytes(t))
+
+    def test_footer_magic_bytes(self):
+        with tempfile.NamedTemporaryFile() as t:
+            t.write("PAR1_some_bogus_data_PAR1")
+            t.flush()
+            self.assertTrue(parquet._check_footer_magic_bytes(t))
+
+    def test_not_parquet_file(self):
+        with tempfile.NamedTemporaryFile() as t:
+            t.write("blah")
+            t.flush()
+            self.assertFalse(parquet._check_header_magic_bytes(t))
+            self.assertFalse(parquet._check_footer_magic_bytes(t))
+
+
+class TestMetadata(unittest.TestCase):
+
+    f = "test-data/nation.impala.parquet"
+
+    def test_footer_bytes(self):
+        with open(self.f) as fo:
+            self.assertEquals(327, parquet._get_footer_size(fo))
+
+    def test_read_footer(self):
+        footer = parquet.read_footer(self.f)
+        self.assertEquals(
+            set([s.name for s in footer.schema]),
+            set(["schema", "n_regionkey", "n_name", "n_nationkey",
+                 "n_comment"]))
+
+    def test_dump_metadata(self):
+        data = StringIO.StringIO()
+        parquet.dump_metadata(self.f, data)
+
+
+class Options(object):
+
+    def __init__(self, col=None, format='csv', no_headers=True, limit=-1):
+        self.col = col
+        self.format = format
+        self.no_headers = no_headers
+        self.limit = limit
+
+
+class TestReadApi(unittest.TestCase):
+
+    def test_projection(self):
+        pass
+
+    def test_limit(self):
+        pass
+
+class TestCompatibility(object):
+
+    td = "test-data"
+    files = [(os.path.join(td, p), os.path.join(td, "nation.csv")) for p in
+             ["gzip-nation.impala.parquet", "nation.dict.parquet",
+              "nation.impala.parquet", "nation.plain.parquet",
+              "snappy-nation.impala.parquet"]]
+
+    def _test_file_csv(self, parquet_file, csv_file):
+        """ Given the parquet_file and csv_file representation, converts the
+            parquet_file to a csv using the dump utility and then compares the
+            result to the csv_file.
+        """
+        expected_data = []
+        with open(csv_file, 'rb') as f:
+            expected_data = list(csv.reader(f, delimiter='|'))
+
+        actual_raw_data = StringIO.StringIO()
+        parquet.dump(parquet_file, Options(), out=actual_raw_data)
+        actual_raw_data.seek(0, 0)
+        actual_data = list(csv.reader(actual_raw_data, delimiter='\t'))
+
+        assert expected_data == actual_data, "{0} != {1}".format(
+            str(expected_data), str(actual_data))
+
+        actual_raw_data = StringIO.StringIO()
+        parquet.dump(parquet_file, Options(no_headers=False),
+                     out=actual_raw_data)
+        actual_raw_data.seek(0, 0)
+        actual_data = list(csv.reader(actual_raw_data, delimiter='\t'))[1:]
+
+        assert expected_data == actual_data, "{0} != {1}".format(
+            str(expected_data), str(actual_data))
+
+    def _test_file_json(self, parquet_file, csv_file):
+        """ Given the parquet_file and csv_file representation, converts the
+            parquet_file to json using the dump utility and then compares the
+            result to the csv_file using column agnostic ordering.
+        """
+        expected_data = []
+        with open(csv_file, 'rb') as f:
+            expected_data = list(csv.reader(f, delimiter='|'))
+
+        actual_raw_data = StringIO.StringIO()
+        parquet.dump(parquet_file, Options(format='json'),
+                     out=actual_raw_data)
+        actual_raw_data.seek(0, 0)
+        actual_data = [json.loads(x.rstrip()) for x in
+                       actual_raw_data.read().split("\n") if len(x) > 0]
+
+        assert len(expected_data) == len(actual_data)
+        footer = parquet.read_footer(parquet_file)
+        cols = [s.name for s in footer.schema]
+        for expected, actual in zip(expected_data, actual_raw_data):
+            assert len(expected) == len(actual)
+            for i, c in enumerate(cols):
+                if c in actual:
+                    assert expected[i] == actual[c]
+
+    def _test_file_custom(self, parquet_file, csv_file):
+        """ Given the parquet_file and csv_file representation, converts the
+            parquet_file to json using the dump utility and then compares the
+            result to the csv_file using column agnostic ordering.
+        """
+        expected_data = []
+        with open(csv_file, 'rb') as f:
+            expected_data = list(csv.reader(f, delimiter='|'))
+
+        def _custom_datatype(in_dict, keys):
+            '''
+            return rows like the csv outputter
+
+            Could convert to a dataframe like this:
+                import pandas
+                df = pandas.DataFrame(in_dict)
+                return df
+            '''
+            columns = [in_dict[key] for key in keys]
+            rows = zip(*columns)
+            return rows
+
+        actual_data = parquet.dump(parquet_file, Options(format='custom'), out=_custom_datatype)
+
+        assert len(expected_data) == len(actual_data)
+        footer = parquet.read_footer(parquet_file)
+        cols = [s.name for s in footer.schema]
+
+        for expected, actual in zip(expected_data, actual_data):
+            assert len(expected) == len(actual)
+            for i, c in enumerate(cols):
+                if c in actual:
+                    assert expected[i] == actual[c]
+
+    def test_all_files(self):
+        for parquet_file, csv_file in self.files:
+            yield self._test_file_csv, parquet_file, csv_file
+            yield self._test_file_json, parquet_file, csv_file
+            yield self._test_file_custom, parquet_file, csv_file