Bladeren bron

HUE-5009 [core] Backport parquet-python Fixes errors reported by flake8 and pylint

Commit https://github.com/jcrobak/parquet-python/commit/8f29bb7a3b0d154800d28a6e812c88e2b96d164e

Jenny Kim 9 jaren geleden
bovenliggende
commit
e895374

+ 156 - 130
desktop/core/ext-py/parquet-1.1/parquet/__init__.py

@@ -1,3 +1,4 @@
+"""parquet - read parquet files."""
 from __future__ import absolute_import
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import division
 from __future__ import print_function
 from __future__ import print_function
@@ -20,100 +21,104 @@ except ImportError:
 import thriftpy
 import thriftpy
 from thriftpy.protocol.compact import TCompactProtocolFactory
 from thriftpy.protocol.compact import TCompactProtocolFactory
 
 
-from .converted_types import convert_column
-from .thrift_filetransport import TFileTransport
 from . import encoding
 from . import encoding
 from . import schema
 from . import schema
+from .converted_types import convert_column
+from .thrift_filetransport import TFileTransport
 
 
 PY3 = sys.version_info > (3,)
 PY3 = sys.version_info > (3,)
 
 
 if PY3:
 if PY3:
     import csv
     import csv
 else:
 else:
-    from backports import csv
+    from backports import csv  # pylint: disable=import-error
 
 
 THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
 THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
-parquet_thrift = thriftpy.load(THRIFT_FILE, module_name="parquet_thrift")
+parquet_thrift = thriftpy.load(THRIFT_FILE, module_name="parquet_thrift")  # pylint: disable=invalid-name
 
 
 
 
+logger = logging.getLogger("parquet")  # pylint: disable=invalid-name
 
 
-logger = logging.getLogger("parquet")
 
 
 try:
 try:
     import snappy
     import snappy
 except ImportError:
 except ImportError:
-    logger.warn(
+    logger.info(
         "Couldn't import snappy. Support for snappy compression disabled.")
         "Couldn't import snappy. Support for snappy compression disabled.")
 
 
 
 
 class ParquetFormatException(Exception):
 class ParquetFormatException(Exception):
+    """Generic Exception related to unexpected data format when reading parquet file."""
     pass
     pass
 
 
 
 
-def _check_header_magic_bytes(fo):
-    "Returns true if the file-like obj has the PAR1 magic bytes at the header"
-    fo.seek(0, 0)
-    magic = fo.read(4)
+def _check_header_magic_bytes(file_obj):
+    """Check if the file-like obj has the PAR1 magic bytes at the header."""
+    file_obj.seek(0, 0)
+    magic = file_obj.read(4)
     return magic == b'PAR1'
     return magic == b'PAR1'
 
 
 
 
-def _check_footer_magic_bytes(fo):
-    "Returns true if the file-like obj has the PAR1 magic bytes at the footer"
-    fo.seek(-4, 2)  # seek to four bytes from the end of the file
-    magic = fo.read(4)
+def _check_footer_magic_bytes(file_obj):
+    """Check if the file-like obj has the PAR1 magic bytes at the footer."""
+    file_obj.seek(-4, 2)  # seek to four bytes from the end of the file
+    magic = file_obj.read(4)
     return magic == b'PAR1'
     return magic == b'PAR1'
 
 
 
 
-def _get_footer_size(fo):
-    "Readers the footer size in bytes, which is serialized as little endian"
-    fo.seek(-8, 2)
-    tup = struct.unpack(b"<i", fo.read(4))
+def _get_footer_size(file_obj):
+    """Read the footer size in bytes, which is serialized as little endian."""
+    file_obj.seek(-8, 2)
+    tup = struct.unpack(b"<i", file_obj.read(4))
     return tup[0]
     return tup[0]
 
 
 
 
-def _read_footer(fo):
-    """Reads the footer from the given file object, returning a FileMetaData
-    object. This method assumes that the fo references a valid parquet file"""
-    footer_size = _get_footer_size(fo)
+def _read_footer(file_obj):
+    """Read the footer from the given file object and returns a FileMetaData object.
+
+    This method assumes that the fo references a valid parquet file.
+    """
+    footer_size = _get_footer_size(file_obj)
     if logger.isEnabledFor(logging.DEBUG):
     if logger.isEnabledFor(logging.DEBUG):
         logger.debug("Footer size in bytes: %s", footer_size)
         logger.debug("Footer size in bytes: %s", footer_size)
-    fo.seek(-(8 + footer_size), 2)  # seek to beginning of footer
-    tin = TFileTransport(fo)
+    file_obj.seek(-(8 + footer_size), 2)  # seek to beginning of footer
+    tin = TFileTransport(file_obj)
     pin = TCompactProtocolFactory().get_protocol(tin)
     pin = TCompactProtocolFactory().get_protocol(tin)
     fmd = parquet_thrift.FileMetaData()
     fmd = parquet_thrift.FileMetaData()
     fmd.read(pin)
     fmd.read(pin)
     return fmd
     return fmd
 
 
 
 
-def _read_page_header(fo):
-    """Reads the page_header from the given fo"""
-    tin = TFileTransport(fo)
+def _read_page_header(file_obj):
+    """Read the page_header from the given fo."""
+    tin = TFileTransport(file_obj)
     pin = TCompactProtocolFactory().get_protocol(tin)
     pin = TCompactProtocolFactory().get_protocol(tin)
-    ph = parquet_thrift.PageHeader()
-    ph.read(pin)
-    return ph
+    page_header = parquet_thrift.PageHeader()
+    page_header.read(pin)
+    return page_header
 
 
 
 
 def read_footer(filename):
 def read_footer(filename):
-    """Reads and returns the FileMetaData object for the given file."""
-    with open(filename, 'rb') as fo:
-        if not _check_header_magic_bytes(fo) or \
-           not _check_footer_magic_bytes(fo):
+    """Read the footer and return the FileMetaData for the specified filename."""
+    with open(filename, 'rb') as file_obj:
+        if not _check_header_magic_bytes(file_obj) or \
+           not _check_footer_magic_bytes(file_obj):
             raise ParquetFormatException("{0} is not a valid parquet file "
             raise ParquetFormatException("{0} is not a valid parquet file "
                                          "(missing magic bytes)"
                                          "(missing magic bytes)"
                                          .format(filename))
                                          .format(filename))
-        return _read_footer(fo)
+        return _read_footer(file_obj)
 
 
 
 
 def _get_name(type_, value):
 def _get_name(type_, value):
-    """Returns the name for the given value of the given type_ unless value is
-    None, in which case it returns empty string"""
-    return type_._VALUES_TO_NAMES[value] if value is not None else "None"
+    """Return the name for the given value of the given type_.
+
+    The value `None` returns empty string.
+    """
+    return type_._VALUES_TO_NAMES[value] if value is not None else "None"  # pylint: disable=protected-access
 
 
 
 
 def _get_offset(cmd):
 def _get_offset(cmd):
-    """Returns the offset into the cmd based upon if it's a dictionary page or
-    a data page"""
+    """Return the offset into the cmd based upon if it's a dictionary page or a data page."""
     dict_offset = cmd.dictionary_page_offset
     dict_offset = cmd.dictionary_page_offset
     data_offset = cmd.data_page_offset
     data_offset = cmd.data_page_offset
     if dict_offset is None or data_offset < dict_offset:
     if dict_offset is None or data_offset < dict_offset:
@@ -122,7 +127,12 @@ def _get_offset(cmd):
 
 
 
 
 def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
 def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
+    """Dump metadata about the parquet object with the given filename.
+
+    Dump human-readable metadata to specified `out`. Optionally dump the row group metadata as well.
+    """
     def println(value):
     def println(value):
+        """Write a new line containing `value` to `out`."""
         out.write(value + "\n")
         out.write(value + "\n")
     footer = read_footer(filename)
     footer = read_footer(filename)
     println("File Metadata: {0}".format(filename))
     println("File Metadata: {0}".format(filename))
@@ -130,34 +140,35 @@ def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
     println("  Num Rows: {0}".format(footer.num_rows))
     println("  Num Rows: {0}".format(footer.num_rows))
     println("  k/v metadata: ")
     println("  k/v metadata: ")
     if footer.key_value_metadata and len(footer.key_value_metadata) > 0:
     if footer.key_value_metadata and len(footer.key_value_metadata) > 0:
-        for kv in footer.key_value_metadata:
-            println("    {0}={1}".format(kv.key, kv.value))
+        for item in footer.key_value_metadata:
+            println("    {0}={1}".format(item.key, item.value))
     else:
     else:
         println("    (none)")
         println("    (none)")
     println("  schema: ")
     println("  schema: ")
-    for se in footer.schema:
+    for element in footer.schema:
         println("    {name} ({type}): length={type_length}, "
         println("    {name} ({type}): length={type_length}, "
                 "repetition={repetition_type}, "
                 "repetition={repetition_type}, "
                 "children={num_children}, "
                 "children={num_children}, "
                 "converted_type={converted_type}".format(
                 "converted_type={converted_type}".format(
-                    name=se.name,
-                    type=parquet_thrift.Type._VALUES_TO_NAMES[se.type] if se.type else None,
-                    type_length=se.type_length,
+                    name=element.name,
+                    type=parquet_thrift.Type._VALUES_TO_NAMES[element.type]  # pylint: disable=protected-access
+                    if element.type else None,
+                    type_length=element.type_length,
                     repetition_type=_get_name(parquet_thrift.FieldRepetitionType,
                     repetition_type=_get_name(parquet_thrift.FieldRepetitionType,
-                                              se.repetition_type),
-                    num_children=se.num_children,
-                    converted_type=se.converted_type))
+                                              element.repetition_type),
+                    num_children=element.num_children,
+                    converted_type=element.converted_type))
     if show_row_group_metadata:
     if show_row_group_metadata:
         println("  row groups: ")
         println("  row groups: ")
-        for rg in footer.row_groups:
-            num_rows = rg.num_rows
-            bytes = rg.total_byte_size
+        for row_group in footer.row_groups:
+            num_rows = row_group.num_rows
+            size_bytes = row_group.total_byte_size
             println(
             println(
                 "  rows={num_rows}, bytes={bytes}".format(num_rows=num_rows,
                 "  rows={num_rows}, bytes={bytes}".format(num_rows=num_rows,
-                                                          bytes=bytes))
+                                                          bytes=size_bytes))
             println("    chunks:")
             println("    chunks:")
-            for cg in rg.columns:
-                cmd = cg.meta_data
+            for col_group in row_group.columns:
+                cmd = col_group.meta_data
                 println("      type={type} file_offset={offset} "
                 println("      type={type} file_offset={offset} "
                         "compression={codec} "
                         "compression={codec} "
                         "encodings={encodings} path_in_schema={path_in_schema} "
                         "encodings={encodings} path_in_schema={path_in_schema} "
@@ -166,7 +177,7 @@ def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
                         "data_page_offset={data_page_offset} "
                         "data_page_offset={data_page_offset} "
                         "dictionary_page_offset={dictionary_page_offset}".format(
                         "dictionary_page_offset={dictionary_page_offset}".format(
                             type=_get_name(parquet_thrift.Type, cmd.type),
                             type=_get_name(parquet_thrift.Type, cmd.type),
-                            offset=cg.file_offset,
+                            offset=col_group.file_offset,
                             codec=_get_name(parquet_thrift.CompressionCodec, cmd.codec),
                             codec=_get_name(parquet_thrift.CompressionCodec, cmd.codec),
                             encodings=",".join(
                             encodings=",".join(
                                 [_get_name(
                                 [_get_name(
@@ -177,25 +188,24 @@ def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
                             compressed_bytes=cmd.total_compressed_size,
                             compressed_bytes=cmd.total_compressed_size,
                             data_page_offset=cmd.data_page_offset,
                             data_page_offset=cmd.data_page_offset,
                             dictionary_page_offset=cmd.dictionary_page_offset))
                             dictionary_page_offset=cmd.dictionary_page_offset))
-                with open(filename, 'rb') as fo:
+                with open(filename, 'rb') as file_obj:
                     offset = _get_offset(cmd)
                     offset = _get_offset(cmd)
-                    fo.seek(offset, 0)
+                    file_obj.seek(offset, 0)
                     values_read = 0
                     values_read = 0
                     println("      pages: ")
                     println("      pages: ")
                     while values_read < num_rows:
                     while values_read < num_rows:
-                        ph = _read_page_header(fo)
+                        page_header = _read_page_header(file_obj)
                         # seek past current page.
                         # seek past current page.
-                        fo.seek(ph.compressed_page_size, 1)
-                        daph = ph.data_page_header
-                        type_ = _get_name(parquet_thrift.PageType, ph.type)
-                        raw_bytes = ph.uncompressed_page_size
+                        file_obj.seek(page_header.compressed_page_size, 1)
+                        daph = page_header.data_page_header
+                        type_ = _get_name(parquet_thrift.PageType, page_header.type)
+                        raw_bytes = page_header.uncompressed_page_size
                         num_values = None
                         num_values = None
-                        if ph.type == parquet_thrift.PageType.DATA_PAGE:
+                        if page_header.type == parquet_thrift.PageType.DATA_PAGE:
                             num_values = daph.num_values
                             num_values = daph.num_values
                             values_read += num_values
                             values_read += num_values
-                        if ph.type == parquet_thrift.PageType.DICTIONARY_PAGE:
+                        if page_header.type == parquet_thrift.PageType.DICTIONARY_PAGE:
                             pass
                             pass
-                            #num_values = diph.num_values
 
 
                         encoding_type = None
                         encoding_type = None
                         def_level_encoding = None
                         def_level_encoding = None
@@ -220,18 +230,17 @@ def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
                                     rep_level_encoding=rep_level_encoding))
                                     rep_level_encoding=rep_level_encoding))
 
 
 
 
-def _read_page(fo, page_header, column_metadata):
-    """Internal function to read the data page from the given file-object
-    and convert it to raw, uncompressed bytes (if necessary)."""
-    bytes_from_file = fo.read(page_header.compressed_page_size)
+def _read_page(file_obj, page_header, column_metadata):
+    """Read the data page from the given file-object and convert it to raw, uncompressed bytes (if necessary)."""
+    bytes_from_file = file_obj.read(page_header.compressed_page_size)
     codec = column_metadata.codec
     codec = column_metadata.codec
     if codec is not None and codec != parquet_thrift.CompressionCodec.UNCOMPRESSED:
     if codec is not None and codec != parquet_thrift.CompressionCodec.UNCOMPRESSED:
         if column_metadata.codec == parquet_thrift.CompressionCodec.SNAPPY:
         if column_metadata.codec == parquet_thrift.CompressionCodec.SNAPPY:
             raw_bytes = snappy.decompress(bytes_from_file)
             raw_bytes = snappy.decompress(bytes_from_file)
         elif column_metadata.codec == parquet_thrift.CompressionCodec.GZIP:
         elif column_metadata.codec == parquet_thrift.CompressionCodec.GZIP:
             io_obj = io.BytesIO(bytes_from_file)
             io_obj = io.BytesIO(bytes_from_file)
-            with gzip.GzipFile(fileobj=io_obj, mode='rb') as f:
-                raw_bytes = f.read()
+            with gzip.GzipFile(fileobj=io_obj, mode='rb') as file_data:
+                raw_bytes = file_data.read()
         else:
         else:
             raise ParquetFormatException(
             raise ParquetFormatException(
                 "Unsupported Codec: {0}".format(codec))
                 "Unsupported Codec: {0}".format(codec))
@@ -240,10 +249,10 @@ def _read_page(fo, page_header, column_metadata):
 
 
     if logger.isEnabledFor(logging.DEBUG):
     if logger.isEnabledFor(logging.DEBUG):
         logger.debug(
         logger.debug(
-            "Read page with compression type {0}. Bytes {1} -> {2}".format(
+            "Read page with compression type %s. Bytes %d -> %d",
             _get_name(parquet_thrift.CompressionCodec, codec),
             _get_name(parquet_thrift.CompressionCodec, codec),
             page_header.compressed_page_size,
             page_header.compressed_page_size,
-            page_header.uncompressed_page_size))
+            page_header.uncompressed_page_size)
     assert len(raw_bytes) == page_header.uncompressed_page_size, \
     assert len(raw_bytes) == page_header.uncompressed_page_size, \
         "found {0} raw bytes (expected {1})".format(
         "found {0} raw bytes (expected {1})".format(
             len(raw_bytes),
             len(raw_bytes),
@@ -251,16 +260,16 @@ def _read_page(fo, page_header, column_metadata):
     return raw_bytes
     return raw_bytes
 
 
 
 
-def _read_data(fo, fo_encoding, value_count, bit_width):
-    """Internal method to read data from the file-object using the given
-    encoding. The data could be definition levels, repetition levels, or
-    actual values.
+def _read_data(file_obj, fo_encoding, value_count, bit_width):
+    """Read data from the file-object using the given encoding.
+
+    The data could be definition levels, repetition levels, or actual values.
     """
     """
     vals = []
     vals = []
     if fo_encoding == parquet_thrift.Encoding.RLE:
     if fo_encoding == parquet_thrift.Encoding.RLE:
         seen = 0
         seen = 0
         while seen < value_count:
         while seen < value_count:
-            values = encoding.read_rle_bit_packed_hybrid(fo, bit_width)
+            values = encoding.read_rle_bit_packed_hybrid(file_obj, bit_width)
             if values is None:
             if values is None:
                 break  # EOF was reached.
                 break  # EOF was reached.
             vals += values
             vals += values
@@ -271,14 +280,17 @@ def _read_data(fo, fo_encoding, value_count, bit_width):
     return vals
     return vals
 
 
 
 
-def read_data_page(fo, schema_helper, page_header, column_metadata,
+def read_data_page(file_obj, schema_helper, page_header, column_metadata,
                    dictionary):
                    dictionary):
-    """Reads the datapage from the given file-like object based upon the
-    metadata in the schema_helper, page_header, column_metadata, and
-    (optional) dictionary. Returns a list of values.
+    """Read the data page from the given file-like object based upon the parameters.
+
+    Metadata in the the schema_helper, page_header, column_metadata, and (optional) dictionary
+    are used for parsing data.
+
+    Returns a list of values.
     """
     """
     daph = page_header.data_page_header
     daph = page_header.data_page_header
-    raw_bytes = _read_page(fo, page_header, column_metadata)
+    raw_bytes = _read_page(file_obj, page_header, column_metadata)
     io_obj = io.BytesIO(raw_bytes)
     io_obj = io.BytesIO(raw_bytes)
     vals = []
     vals = []
     debug_logging = logger.isEnabledFor(logging.DEBUG)
     debug_logging = logger.isEnabledFor(logging.DEBUG)
@@ -315,7 +327,7 @@ def read_data_page(fo, schema_helper, page_header, column_metadata,
             logger.debug("  Definition levels: %s", len(definition_levels))
             logger.debug("  Definition levels: %s", len(definition_levels))
 
 
     # repetition levels are skipped if data is at the first level.
     # repetition levels are skipped if data is at the first level.
-    repetition_levels = None
+    repetition_levels = None  # pylint: disable=unused-variable
     if len(column_metadata.path_in_schema) > 1:
     if len(column_metadata.path_in_schema) > 1:
         max_repetition_level = schema_helper.max_repetition_level(
         max_repetition_level = schema_helper.max_repetition_level(
             column_metadata.path_in_schema)
             column_metadata.path_in_schema)
@@ -325,13 +337,13 @@ def read_data_page(fo, schema_helper, page_header, column_metadata,
                                        daph.num_values,
                                        daph.num_values,
                                        bit_width)
                                        bit_width)
 
 
-    # TODO Actually use the repetition levels.
+    # NOTE: The repetition levels aren't yet used.
     if daph.encoding == parquet_thrift.Encoding.PLAIN:
     if daph.encoding == parquet_thrift.Encoding.PLAIN:
         read_values = \
         read_values = \
             encoding.read_plain(io_obj, column_metadata.type, daph.num_values - num_nulls)
             encoding.read_plain(io_obj, column_metadata.type, daph.num_values - num_nulls)
         if definition_levels:
         if definition_levels:
-            it = iter(read_values)
-            vals.extend([next(it) if level == max_definition_level else None for level in definition_levels])
+            itr = iter(read_values)
+            vals.extend([next(itr) if level == max_definition_level else None for level in definition_levels])
         else:
         else:
             vals.extend(read_values)
             vals.extend(read_values)
         if debug_logging:
         if debug_logging:
@@ -344,7 +356,6 @@ def read_data_page(fo, schema_helper, page_header, column_metadata,
         total_seen = 0
         total_seen = 0
         dict_values_bytes = io_obj.read()
         dict_values_bytes = io_obj.read()
         dict_values_io_obj = io.BytesIO(dict_values_bytes)
         dict_values_io_obj = io.BytesIO(dict_values_bytes)
-        # TODO jcrobak -- not sure that this loop is needed?
         while total_seen < daph.num_values:
         while total_seen < daph.num_values:
             values = encoding.read_rle_bit_packed_hybrid(
             values = encoding.read_rle_bit_packed_hybrid(
                 dict_values_io_obj, bit_width, len(dict_values_bytes))
                 dict_values_io_obj, bit_width, len(dict_values_bytes))
@@ -354,18 +365,25 @@ def read_data_page(fo, schema_helper, page_header, column_metadata,
             total_seen += len(values)
             total_seen += len(values)
     else:
     else:
         raise ParquetFormatException("Unsupported encoding: %s",
         raise ParquetFormatException("Unsupported encoding: %s",
-                                     _get_name(Encoding, daph.encoding))
+                                     _get_name(parquet_thrift.Encoding, daph.encoding))
     return vals
     return vals
 
 
 
 
-def read_dictionary_page(fo, page_header, column_metadata):
-    raw_bytes = _read_page(fo, page_header, column_metadata)
+def _read_dictionary_page(file_obj, page_header, column_metadata):
+    """Read a page containing dictionary data.
+
+    Consumes data using the plain encoding and returns an array of values.
+    """
+    raw_bytes = _read_page(file_obj, page_header, column_metadata)
     io_obj = io.BytesIO(raw_bytes)
     io_obj = io.BytesIO(raw_bytes)
-    return encoding.read_plain(io_obj, column_metadata.type,
-        page_header.dictionary_page_header.num_values)
+    return encoding.read_plain(
+        io_obj,
+        column_metadata.type,
+        page_header.dictionary_page_header.num_values
+    )
 
 
 
 
-def DictReader(fo, columns=None):
+def DictReader(file_obj, columns=None):  # pylint: disable=invalid-name
     """
     """
     Reader for a parquet file object.
     Reader for a parquet file object.
 
 
@@ -378,14 +396,15 @@ def DictReader(fo, columns=None):
     :param columns: the columns to include. If None (default), all columns
     :param columns: the columns to include. If None (default), all columns
                     are included. Nested values are referenced with "." notation
                     are included. Nested values are referenced with "." notation
     """
     """
-    footer = _read_footer(fo)
+    footer = _read_footer(file_obj)
     keys = columns if columns else [s.name for s in
     keys = columns if columns else [s.name for s in
                                     footer.schema if s.type]
                                     footer.schema if s.type]
 
 
-    for row in reader(fo, columns):
+    for row in reader(file_obj, columns):
         yield OrderedDict(zip(keys, row))
         yield OrderedDict(zip(keys, row))
 
 
-def reader(fo, columns=None):
+
+def reader(file_obj, columns=None):
     """
     """
     Reader for a parquet file object.
     Reader for a parquet file object.
 
 
@@ -396,86 +415,92 @@ def reader(fo, columns=None):
     :param columns: the columns to include. If None (default), all columns
     :param columns: the columns to include. If None (default), all columns
                     are included. Nested values are referenced with "." notation
                     are included. Nested values are referenced with "." notation
     """
     """
-    if hasattr(fo, 'mode') and 'b' not in fo.mode:
+    if hasattr(file_obj, 'mode') and 'b' not in file_obj.mode:
         logger.error("parquet.reader requires the fileobj to be opened in binary mode!")
         logger.error("parquet.reader requires the fileobj to be opened in binary mode!")
-    footer = _read_footer(fo)
+    footer = _read_footer(file_obj)
     schema_helper = schema.SchemaHelper(footer.schema)
     schema_helper = schema.SchemaHelper(footer.schema)
     keys = columns if columns else [s.name for s in
     keys = columns if columns else [s.name for s in
                                     footer.schema if s.type]
                                     footer.schema if s.type]
     debug_logging = logger.isEnabledFor(logging.DEBUG)
     debug_logging = logger.isEnabledFor(logging.DEBUG)
-    for rg in footer.row_groups:
+    for row_group in footer.row_groups:
         res = defaultdict(list)
         res = defaultdict(list)
-        row_group_rows = rg.num_rows
-        for idx, cg in enumerate(rg.columns):
+        row_group_rows = row_group.num_rows
+        for col_group in row_group.columns:
             dict_items = []
             dict_items = []
-            cmd = cg.meta_data
+            cmd = col_group.meta_data
             # skip if the list of columns is specified and this isn't in it
             # skip if the list of columns is specified and this isn't in it
             if columns and not ".".join(cmd.path_in_schema) in columns:
             if columns and not ".".join(cmd.path_in_schema) in columns:
                 continue
                 continue
 
 
             offset = _get_offset(cmd)
             offset = _get_offset(cmd)
-            fo.seek(offset, 0)
+            file_obj.seek(offset, 0)
             values_seen = 0
             values_seen = 0
             if debug_logging:
             if debug_logging:
                 logger.debug("reading column chunk of type: %s",
                 logger.debug("reading column chunk of type: %s",
                              _get_name(parquet_thrift.Type, cmd.type))
                              _get_name(parquet_thrift.Type, cmd.type))
             while values_seen < row_group_rows:
             while values_seen < row_group_rows:
-                ph = _read_page_header(fo)
+                page_header = _read_page_header(file_obj)
                 if debug_logging:
                 if debug_logging:
                     logger.debug("Reading page (type=%s, "
                     logger.debug("Reading page (type=%s, "
                                  "uncompressed=%s bytes, "
                                  "uncompressed=%s bytes, "
                                  "compressed=%s bytes)",
                                  "compressed=%s bytes)",
-                                 _get_name(parquet_thrift.PageType, ph.type),
-                                 ph.uncompressed_page_size,
-                                 ph.compressed_page_size)
+                                 _get_name(parquet_thrift.PageType, page_header.type),
+                                 page_header.uncompressed_page_size,
+                                 page_header.compressed_page_size)
 
 
-                if ph.type == parquet_thrift.PageType.DATA_PAGE:
-                    values = read_data_page(fo, schema_helper, ph, cmd,
+                if page_header.type == parquet_thrift.PageType.DATA_PAGE:
+                    values = read_data_page(file_obj, schema_helper, page_header, cmd,
                                             dict_items)
                                             dict_items)
                     schema_element = schema_helper.schema_element(cmd.path_in_schema[-1])
                     schema_element = schema_helper.schema_element(cmd.path_in_schema[-1])
-                    res[".".join(cmd.path_in_schema)] += convert_column(values,
-                                                                        schema_element) if schema_element.converted_type else values
-                    values_seen += ph.data_page_header.num_values
-                elif ph.type == parquet_thrift.PageType.DICTIONARY_PAGE:
+                    res[".".join(cmd.path_in_schema)] += convert_column(values, schema_element) \
+                        if schema_element.converted_type else values
+                    values_seen += page_header.data_page_header.num_values
+                elif page_header.type == parquet_thrift.PageType.DICTIONARY_PAGE:
                     if debug_logging:
                     if debug_logging:
-                        logger.debug(ph)
+                        logger.debug(page_header)
                     assert dict_items == []
                     assert dict_items == []
-                    dict_items = read_dictionary_page(fo, ph, cmd)
+                    dict_items = _read_dictionary_page(file_obj, page_header, cmd)
                     if debug_logging:
                     if debug_logging:
                         logger.debug("Dictionary: %s", str(dict_items))
                         logger.debug("Dictionary: %s", str(dict_items))
                 else:
                 else:
-                    logger.warn("Skipping unknown page type={0}".format(
-                        _get_name(parquet_thrift.PageType, ph.type)))
+                    logger.info("Skipping unknown page type=%s",
+                                _get_name(parquet_thrift.PageType, page_header.type))
 
 
-        for i in range(rg.num_rows):
+        for i in range(row_group.num_rows):
             yield [res[k][i] for k in keys if res[k]]
             yield [res[k][i] for k in keys if res[k]]
 
 
-class JsonWriter(object):
+
+class JsonWriter(object):  # pylint: disable=too-few-public-methods
+    """Utility for dumping rows as JSON objects."""
+
     def __init__(self, out):
     def __init__(self, out):
+        """Initialize with output destination."""
         self._out = out
         self._out = out
 
 
     def writerow(self, row):
     def writerow(self, row):
+        """Write a single row."""
         json_text = json.dumps(row)
         json_text = json.dumps(row)
-        if type(json_text) is bytes:
+        if isinstance(json_text, bytes):
             json_text = json_text.decode('utf-8')
             json_text = json_text.decode('utf-8')
         self._out.write(json_text)
         self._out.write(json_text)
         self._out.write(u'\n')
         self._out.write(u'\n')
 
 
-def _dump(fo, options, out=sys.stdout):
 
 
+def _dump(file_obj, options, out=sys.stdout):
+    """Dump to fo with given options."""
     # writer and keys are lazily loaded. We don't know the keys until we have
     # writer and keys are lazily loaded. We don't know the keys until we have
     # the first item. And we need the keys for the csv writer.
     # the first item. And we need the keys for the csv writer.
     total_count = 0
     total_count = 0
     writer = None
     writer = None
     keys = None
     keys = None
-    for row in DictReader(fo, options.col):
+    for row in DictReader(file_obj, options.col):
         if not keys:
         if not keys:
             keys = row.keys()
             keys = row.keys()
         if not writer:
         if not writer:
-            writer = csv.DictWriter(out, keys, delimiter=u'\t', quotechar=u'\'',
-                quoting=csv.QUOTE_MINIMAL) if options.format == 'csv' \
-                    else JsonWriter(out) if options.format == 'json' \
-                    else None
+            writer = csv.DictWriter(out, keys, delimiter=u'\t', quotechar=u'\'', quoting=csv.QUOTE_MINIMAL) \
+                if options.format == 'csv' \
+                else JsonWriter(out) if options.format == 'json' \
+                else None
         if total_count == 0 and options.format == "csv" and not options.no_headers:
         if total_count == 0 and options.format == "csv" and not options.no_headers:
             writer.writeheader()
             writer.writeheader()
         if options.limit != -1 and total_count >= options.limit:
         if options.limit != -1 and total_count >= options.limit:
@@ -486,5 +511,6 @@ def _dump(fo, options, out=sys.stdout):
 
 
 
 
 def dump(filename, options, out=sys.stdout):
 def dump(filename, options, out=sys.stdout):
-    with open(filename, 'rb') as fo:
-        return _dump(fo, options=options, out=out)
+    """Dump parquet file with given filename using options to `out`."""
+    with open(filename, 'rb') as file_obj:
+        return _dump(file_obj, options=options, out=out)

+ 3 - 0
desktop/core/ext-py/parquet-1.1/parquet/__main__.py

@@ -1,3 +1,4 @@
+"""parquet - tool for inspecting parquet files."""
 from __future__ import absolute_import
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import division
 from __future__ import print_function
 from __future__ import print_function
@@ -10,6 +11,7 @@ import sys
 
 
 
 
 def setup_logging(options=None):
 def setup_logging(options=None):
+    """Configure logging based on options."""
     level = logging.DEBUG if options is not None and options.debug \
     level = logging.DEBUG if options is not None and options.debug \
         else logging.WARNING
         else logging.WARNING
     console = logging.StreamHandler()
     console = logging.StreamHandler()
@@ -21,6 +23,7 @@ def setup_logging(options=None):
 
 
 
 
 def main(argv=None):
 def main(argv=None):
+    """Run parquet utility application."""
     argv = argv or sys.argv[1:]
     argv = argv or sys.argv[1:]
 
 
     parser = argparse.ArgumentParser('parquet',
     parser = argparse.ArgumentParser('parquet',

+ 0 - 20
desktop/core/ext-py/parquet-1.1/parquet/bitstring.py

@@ -1,20 +0,0 @@
-
-SINGLE_BIT_MASK =  [1 << x for x in range(7, -1, -1)]
-
-class BitString(object):
-
-	def __init__(self, bytes, length=None, offset=None):
-		self.bytes = bytes
-		self.offset = offset if offset is not None else 0
-		self.length = length if length is not None else 8 * len(data) - self.offset 
-
-
-	def __getitem__(self, key):
-		try:
-			start = key.start
-			stop = key.stop
-		except AttributeError:
-			if key < 0 or key >= length:
-				raise IndexError()
-			byte_index, bit_offset = (divmod(self.offset + key), 8)
-			return self.bytes[byte_index] & SINGLE_BIT_MASK[bit_offset]

+ 24 - 19
desktop/core/ext-py/parquet-1.1/parquet/converted_types.py

@@ -1,7 +1,6 @@
 # -#- coding: utf-8 -#-
 # -#- coding: utf-8 -#-
 """
 """
-Deal with parquet logical types (aka converted types), higher-order
-things built from primitive types.
+Deal with parquet logical types (aka converted types), higher-order things built from primitive types.
 
 
 The implementations in this class are pure python for the widest compatibility,
 The implementations in this class are pure python for the widest compatibility,
 but they're not necessarily the most performant.
 but they're not necessarily the most performant.
@@ -14,8 +13,8 @@ from __future__ import unicode_literals
 
 
 import codecs
 import codecs
 import datetime
 import datetime
-import logging
 import json
 import json
+import logging
 import os
 import os
 import struct
 import struct
 import sys
 import sys
@@ -24,11 +23,11 @@ from decimal import Decimal
 import thriftpy
 import thriftpy
 
 
 THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
 THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
-parquet_thrift = thriftpy.load(THRIFT_FILE, module_name=str("parquet_thrift"))
+parquet_thrift = thriftpy.load(THRIFT_FILE, module_name=str("parquet_thrift"))  # pylint: disable=invalid-name
 
 
-logger = logging.getLogger('parquet')
+logger = logging.getLogger('parquet')  # pylint: disable=invalid-name
 
 
-bson = None
+bson = None  # pylint: disable=invalid-name
 try:
 try:
     import bson
     import bson
 except ImportError:
 except ImportError:
@@ -38,20 +37,26 @@ PY3 = sys.version_info.major > 2
 
 
 # define bytes->int for non 2, 4, 8 byte ints
 # define bytes->int for non 2, 4, 8 byte ints
 if PY3:
 if PY3:
-    intbig = lambda x: int.from_bytes(x, 'big', signed=True)
+    def intbig(data):
+        """Convert big ints using python 3's built-in support."""
+        return int.from_bytes(data, 'big', signed=True)
 else:
 else:
-    intbig = lambda x: int(codecs.encode(x, 'hex'), 16)
+    def intbig(data):
+        """Convert big ints using a hack of encoding bytes as hex and decoding to int."""
+        return int(codecs.encode(data, 'hex'), 16)
 
 
 DAYS_TO_MILLIS = 86400000000000
 DAYS_TO_MILLIS = 86400000000000
 """Number of millis in a day. Used to convert a Date to a date"""
 """Number of millis in a day. Used to convert a Date to a date"""
 
 
 
 
-def convert_unsigned(data, fmt):
+def _convert_unsigned(data, fmt):
+    """Convert data from signed to unsigned in bulk."""
     num = len(data)
     num = len(data)
     return struct.unpack(
     return struct.unpack(
-        "{}{}".format(num, fmt.upper()),
+        b"{}{}".format(num, fmt.upper()),
         struct.pack("{}{}".format(num, fmt), *data)
         struct.pack("{}{}".format(num, fmt), *data)
-        )
+    )
+
 
 
 def convert_column(data, schemae):
 def convert_column(data, schemae):
     """Convert known types from primitive to rich."""
     """Convert known types from primitive to rich."""
@@ -66,22 +71,22 @@ def convert_column(data, schemae):
     elif ctype == parquet_thrift.ConvertedType.TIME_MILLIS:
     elif ctype == parquet_thrift.ConvertedType.TIME_MILLIS:
         return [datetime.timedelta(milliseconds=d) for d in data]
         return [datetime.timedelta(milliseconds=d) for d in data]
     elif ctype == parquet_thrift.ConvertedType.TIMESTAMP_MILLIS:
     elif ctype == parquet_thrift.ConvertedType.TIMESTAMP_MILLIS:
-        return [datetime.datetime.utcfromtimestamp(d/1000.0) for d in data]
+        return [datetime.datetime.utcfromtimestamp(d / 1000.0) for d in data]
     elif ctype == parquet_thrift.ConvertedType.UTF8:
     elif ctype == parquet_thrift.ConvertedType.UTF8:
         return list(codecs.iterdecode(data, "utf-8"))
         return list(codecs.iterdecode(data, "utf-8"))
     elif ctype == parquet_thrift.ConvertedType.UINT_8:
     elif ctype == parquet_thrift.ConvertedType.UINT_8:
-        return convert_unsigned(data, 'b')
+        return _convert_unsigned(data, 'b')
     elif ctype == parquet_thrift.ConvertedType.UINT_16:
     elif ctype == parquet_thrift.ConvertedType.UINT_16:
-        return convert_unsigned(data, 'h')
+        return _convert_unsigned(data, 'h')
     elif ctype == parquet_thrift.ConvertedType.UINT_32:
     elif ctype == parquet_thrift.ConvertedType.UINT_32:
-        return convert_unsigned(data, 'i')
+        return _convert_unsigned(data, 'i')
     elif ctype == parquet_thrift.ConvertedType.UINT_64:
     elif ctype == parquet_thrift.ConvertedType.UINT_64:
-        return convert_unsigned(data, 'q')
+        return _convert_unsigned(data, 'q')
     elif ctype == parquet_thrift.ConvertedType.JSON:
     elif ctype == parquet_thrift.ConvertedType.JSON:
         return [json.loads(s) for s in codecs.iterdecode(data, "utf-8")]
         return [json.loads(s) for s in codecs.iterdecode(data, "utf-8")]
     elif ctype == parquet_thrift.ConvertedType.BSON and bson:
     elif ctype == parquet_thrift.ConvertedType.BSON and bson:
         return [bson.BSON(s).decode() for s in data]
         return [bson.BSON(s).decode() for s in data]
     else:
     else:
-        logger.warn("Converted type '{}'' not handled".format(
-            parquet_thrift.ConvertedType._VALUES_TO_NAMES[ctype]))
-    return data
+        logger.info("Converted type '%s'' not handled",
+                    parquet_thrift.ConvertedType._VALUES_TO_NAMES[ctype])  # pylint:disable=protected-access
+    return data

+ 59 - 56
desktop/core/ext-py/parquet-1.1/parquet/encoding.py

@@ -1,3 +1,4 @@
+"""encoding.py - methods for reading parquet encoded data blocks."""
 from __future__ import absolute_import
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import division
 from __future__ import print_function
 from __future__ import print_function
@@ -5,67 +6,67 @@ from __future__ import unicode_literals
 
 
 import array
 import array
 import io
 import io
+import logging
 import math
 import math
 import os
 import os
 import struct
 import struct
-import logging
 
 
 import thriftpy
 import thriftpy
 
 
 THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
 THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
-parquet_thrift = thriftpy.load(THRIFT_FILE, module_name=str("parquet_thrift"))
+parquet_thrift = thriftpy.load(THRIFT_FILE, module_name=str("parquet_thrift"))  # pylint: disable=invalid-name
 
 
-logger = logging.getLogger("parquet")
+logger = logging.getLogger("parquet")  # pylint: disable=invalid-name
 
 
 
 
-def read_plain_boolean(fo, count):
-    """Reads `count` booleans using the plain encoding"""
+def read_plain_boolean(file_obj, count):
+    """Read `count` booleans using the plain encoding."""
     # for bit packed, the count is stored shifted up. But we want to pass in a count,
     # for bit packed, the count is stored shifted up. But we want to pass in a count,
     # so we shift up.
     # so we shift up.
     # bit width is 1 for a single-bit boolean.
     # bit width is 1 for a single-bit boolean.
-    return read_bitpacked(fo, count << 1, 1, logger.isEnabledFor(logging.DEBUG))
+    return read_bitpacked(file_obj, count << 1, 1, logger.isEnabledFor(logging.DEBUG))
 
 
 
 
-def read_plain_int32(fo, count):
-    """Reads `count` 32-bit ints using the plain encoding"""
+def read_plain_int32(file_obj, count):
+    """Read `count` 32-bit ints using the plain encoding."""
     length = 4 * count
     length = 4 * count
-    data = fo.read(length)
+    data = file_obj.read(length)
     if len(data) != length:
     if len(data) != length:
         raise EOFError("Expected {} bytes but got {0} bytes".format(length, len(data)))
         raise EOFError("Expected {} bytes but got {0} bytes".format(length, len(data)))
     res = struct.unpack(b"<{0}i".format(count), data)
     res = struct.unpack(b"<{0}i".format(count), data)
     return res
     return res
 
 
 
 
-def read_plain_int64(fo, count):
-    """Reads `count` 64-bit ints using the plain encoding"""
-    return struct.unpack(b"<{0}q".format(count), fo.read(8 * count))
+def read_plain_int64(file_obj, count):
+    """Read `count` 64-bit ints using the plain encoding."""
+    return struct.unpack(b"<{0}q".format(count), file_obj.read(8 * count))
 
 
 
 
-def read_plain_int96(fo, count):
-    """Reads `count` 96-bit ints using the plain encoding"""
-    items = struct.unpack(b"<qi" * count, fo.read(12) * count)
+def read_plain_int96(file_obj, count):
+    """Read `count` 96-bit ints using the plain encoding."""
+    items = struct.unpack(b"<qi" * count, file_obj.read(12) * count)
     args = [iter(items)] * 2
     args = [iter(items)] * 2
     return [q << 32 | i for (q, i) in zip(*args)]
     return [q << 32 | i for (q, i) in zip(*args)]
 
 
 
 
-def read_plain_float(fo, count):
-    """Reads `count` 32-bit floats using the plain encoding"""
-    return struct.unpack(b"<{0}f".format(count), fo.read(4 * count))
+def read_plain_float(file_obj, count):
+    """Read `count` 32-bit floats using the plain encoding."""
+    return struct.unpack(b"<{0}f".format(count), file_obj.read(4 * count))
 
 
 
 
-def read_plain_double(fo, count):
-    """Reads `count` 64-bit float (double) using the plain encoding"""
-    return struct.unpack(b"<{0}d".format(count), fo.read(8 * count))
+def read_plain_double(file_obj, count):
+    """Read `count` 64-bit float (double) using the plain encoding."""
+    return struct.unpack(b"<{0}d".format(count), file_obj.read(8 * count))
 
 
 
 
-def read_plain_byte_array(fo, count):
-    """Read `count` byte arrays using the plain encoding"""
-    return [fo.read(struct.unpack(b"<i", fo.read(4))[0]) for i in range(count)]
+def read_plain_byte_array(file_obj, count):
+    """Read `count` byte arrays using the plain encoding."""
+    return [file_obj.read(struct.unpack(b"<i", file_obj.read(4))[0]) for i in range(count)]
 
 
 
 
-def read_plain_byte_array_fixed(fo, fixed_length):
-    """Reads a byte array of the given fixed_length"""
-    return fo.read(fixed_length)
+def read_plain_byte_array_fixed(file_obj, fixed_length):
+    """Read a byte array of the given fixed_length."""
+    return file_obj.read(fixed_length)
 
 
 
 
 DECODE_PLAIN = {
 DECODE_PLAIN = {
@@ -80,19 +81,20 @@ DECODE_PLAIN = {
 }
 }
 
 
 
 
-def read_plain(fo, type_, count):
-    """Reads `count` items `type` from the fo using the plain encoding."""
+def read_plain(file_obj, type_, count):
+    """Read `count` items `type` from the fo using the plain encoding."""
     if count == 0:
     if count == 0:
         return []
         return []
     conv = DECODE_PLAIN[type_]
     conv = DECODE_PLAIN[type_]
-    return conv(fo, count)
+    return conv(file_obj, count)
 
 
 
 
-def read_unsigned_var_int(fo):
+def read_unsigned_var_int(file_obj):
+    """Read a value using the unsigned, variable int encoding."""
     result = 0
     result = 0
     shift = 0
     shift = 0
     while True:
     while True:
-        byte = struct.unpack(b"<B", fo.read(1))[0]
+        byte = struct.unpack(b"<B", file_obj.read(1))[0]
         result |= ((byte & 0x7F) << shift)
         result |= ((byte & 0x7F) << shift)
         if (byte & 0x80) == 0:
         if (byte & 0x80) == 0:
             break
             break
@@ -100,7 +102,7 @@ def read_unsigned_var_int(fo):
     return result
     return result
 
 
 
 
-def read_rle(fo, header, bit_width, debug_logging):
+def read_rle(file_obj, header, bit_width, debug_logging):
     """Read a run-length encoded run from the given fo with the given header
     """Read a run-length encoded run from the given fo with the given header
     and bit_width.
     and bit_width.
 
 
@@ -110,28 +112,28 @@ def read_rle(fo, header, bit_width, debug_logging):
     count = header >> 1
     count = header >> 1
     zero_data = b"\x00\x00\x00\x00"
     zero_data = b"\x00\x00\x00\x00"
     width = (bit_width + 7) // 8
     width = (bit_width + 7) // 8
-    data = fo.read(width)
+    data = file_obj.read(width)
     data = data + zero_data[len(data):]
     data = data + zero_data[len(data):]
     value = struct.unpack(b"<i", data)[0]
     value = struct.unpack(b"<i", data)[0]
     if debug_logging:
     if debug_logging:
         logger.debug("Read RLE group with value %s of byte-width %s and count %s",
         logger.debug("Read RLE group with value %s of byte-width %s and count %s",
                      value, width, count)
                      value, width, count)
-    for i in range(count):
+    for _ in range(count):
         yield value
         yield value
 
 
 
 
 def width_from_max_int(value):
 def width_from_max_int(value):
-    """Converts the value specified to a bit_width."""
+    """Convert the value specified to a bit_width."""
     return int(math.ceil(math.log(value + 1, 2)))
     return int(math.ceil(math.log(value + 1, 2)))
 
 
 
 
 def _mask_for_bits(i):
 def _mask_for_bits(i):
-    """Helper function for read_bitpacked to generage a mask to grab i bits."""
+    """Generate a mask to grab `i` bits from an int value."""
     return (1 << i) - 1
     return (1 << i) - 1
 
 
 
 
-def read_bitpacked(fo, header, width, debug_logging):
-    """Reads a bitpacked run of the rle/bitpack hybrid.
+def read_bitpacked(file_obj, header, width, debug_logging):
+    """Read a bitpacked run of the rle/bitpack hybrid.
 
 
     Supports width >8 (crossing bytes).
     Supports width >8 (crossing bytes).
     """
     """
@@ -140,40 +142,41 @@ def read_bitpacked(fo, header, width, debug_logging):
     byte_count = (width * count) // 8
     byte_count = (width * count) // 8
     if debug_logging:
     if debug_logging:
         logger.debug("Reading a bit-packed run with: %s groups, count %s, bytes %s",
         logger.debug("Reading a bit-packed run with: %s groups, count %s, bytes %s",
-            num_groups, count, byte_count)
-    raw_bytes = array.array(str('B'), fo.read(byte_count)).tolist()
+                     num_groups, count, byte_count)
+    raw_bytes = array.array(str('B'), file_obj.read(byte_count)).tolist()
     current_byte = 0
     current_byte = 0
-    b = raw_bytes[current_byte]
+    data = raw_bytes[current_byte]
     mask = _mask_for_bits(width)
     mask = _mask_for_bits(width)
     bits_wnd_l = 8
     bits_wnd_l = 8
     bits_wnd_r = 0
     bits_wnd_r = 0
     res = []
     res = []
-    total = len(raw_bytes)*8;
-    while (total >= width):
-        # TODO zero-padding could produce extra zero-values
+    total = len(raw_bytes) * 8
+    while total >= width:
+        # NOTE zero-padding could produce extra zero-values
         if debug_logging:
         if debug_logging:
             logger.debug("  read bitpacked: width=%s window=(%s %s) b=%s,"
             logger.debug("  read bitpacked: width=%s window=(%s %s) b=%s,"
                          " current_byte=%s",
                          " current_byte=%s",
-                         width, bits_wnd_l, bits_wnd_r, bin(b), current_byte)
+                         width, bits_wnd_l, bits_wnd_r, bin(data), current_byte)
         if bits_wnd_r >= 8:
         if bits_wnd_r >= 8:
             bits_wnd_r -= 8
             bits_wnd_r -= 8
             bits_wnd_l -= 8
             bits_wnd_l -= 8
-            b >>= 8
+            data >>= 8
         elif bits_wnd_l - bits_wnd_r >= width:
         elif bits_wnd_l - bits_wnd_r >= width:
-            res.append((b >> bits_wnd_r) & mask)
+            res.append((data >> bits_wnd_r) & mask)
             total -= width
             total -= width
             bits_wnd_r += width
             bits_wnd_r += width
             if debug_logging:
             if debug_logging:
                 logger.debug("  read bitpackage: added: %s", res[-1])
                 logger.debug("  read bitpackage: added: %s", res[-1])
         elif current_byte + 1 < len(raw_bytes):
         elif current_byte + 1 < len(raw_bytes):
             current_byte += 1
             current_byte += 1
-            b |= (raw_bytes[current_byte] << bits_wnd_l)
+            data |= (raw_bytes[current_byte] << bits_wnd_l)
             bits_wnd_l += 8
             bits_wnd_l += 8
     return res
     return res
 
 
 
 
-def read_bitpacked_deprecated(fo, byte_count, count, width, debug_logging):
-    raw_bytes = array.array(str('B'), fo.read(byte_count)).tolist()
+def read_bitpacked_deprecated(file_obj, byte_count, count, width, debug_logging):
+    """Read `count` values from `fo` using the deprecated bitpacking encoding."""
+    raw_bytes = array.array(str('B'), file_obj.read(byte_count)).tolist()
 
 
     mask = _mask_for_bits(width)
     mask = _mask_for_bits(width)
     index = 0
     index = 0
@@ -204,17 +207,17 @@ def read_bitpacked_deprecated(fo, byte_count, count, width, debug_logging):
     return res
     return res
 
 
 
 
-def read_rle_bit_packed_hybrid(fo, width, length=None):
-    """Implemenation of a decoder for the rel/bit-packed hybrid encoding.
+def read_rle_bit_packed_hybrid(file_obj, width, length=None):
+    """Read values from `fo` using the rel/bit-packed hybrid encoding.
 
 
     If length is not specified, then a 32-bit int is read first to grab the
     If length is not specified, then a 32-bit int is read first to grab the
     length of the encoded data.
     length of the encoded data.
     """
     """
     debug_logging = logger.isEnabledFor(logging.DEBUG)
     debug_logging = logger.isEnabledFor(logging.DEBUG)
-    io_obj = fo
+    io_obj = file_obj
     if length is None:
     if length is None:
-        length = read_plain_int32(fo, 1)[0]
-        raw_bytes = fo.read(length)
+        length = read_plain_int32(file_obj, 1)[0]
+        raw_bytes = file_obj.read(length)
         if raw_bytes == b'':
         if raw_bytes == b'':
             return None
             return None
         io_obj = io.BytesIO(raw_bytes)
         io_obj = io.BytesIO(raw_bytes)

+ 12 - 10
desktop/core/ext-py/parquet-1.1/parquet/schema.py

@@ -1,4 +1,4 @@
-"""Utils for working with the parquet thrift models"""
+"""Utils for working with the parquet thrift models."""
 from __future__ import absolute_import
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import division
 from __future__ import print_function
 from __future__ import print_function
@@ -10,11 +10,14 @@ import thriftpy
 
 
 
 
 THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
 THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
-parquet_thrift = thriftpy.load(THRIFT_FILE, module_name=str("parquet_thrift"))
+parquet_thrift = thriftpy.load(THRIFT_FILE, module_name=str("parquet_thrift"))  # pylint: disable=invalid-name
+
 
 
 class SchemaHelper(object):
 class SchemaHelper(object):
+    """Utility providing convenience methods for schema_elements."""
 
 
     def __init__(self, schema_elements):
     def __init__(self, schema_elements):
+        """Initialize with the specified schema_elements."""
         self.schema_elements = schema_elements
         self.schema_elements = schema_elements
         self.schema_elements_by_name = dict(
         self.schema_elements_by_name = dict(
             [(se.name, se) for se in schema_elements])
             [(se.name, se) for se in schema_elements])
@@ -25,24 +28,23 @@ class SchemaHelper(object):
         return self.schema_elements_by_name[name]
         return self.schema_elements_by_name[name]
 
 
     def is_required(self, name):
     def is_required(self, name):
-        """Returns true iff the schema element with the given name is
-        required"""
+        """Return true iff the schema element with the given name is required."""
         return self.schema_element(name).repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED
         return self.schema_element(name).repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED
 
 
     def max_repetition_level(self, path):
     def max_repetition_level(self, path):
-        """get the max repetition level for the given schema path."""
+        """Get the max repetition level for the given schema path."""
         max_level = 0
         max_level = 0
         for part in path:
         for part in path:
-            se = self.schema_element(part)
-            if se.repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED:
+            element = self.schema_element(part)
+            if element.repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED:
                 max_level += 1
                 max_level += 1
         return max_level
         return max_level
 
 
     def max_definition_level(self, path):
     def max_definition_level(self, path):
-        """get the max definition level for the given schema path."""
+        """Get the max definition level for the given schema path."""
         max_level = 0
         max_level = 0
         for part in path:
         for part in path:
-            se = self.schema_element(part)
-            if se.repetition_type != parquet_thrift.FieldRepetitionType.REQUIRED:
+            element = self.schema_element(part)
+            if element.repetition_type != parquet_thrift.FieldRepetitionType.REQUIRED:
                 max_level += 1
                 max_level += 1
         return max_level
         return max_level

+ 7 - 4
desktop/core/ext-py/parquet-1.1/parquet/thrift_filetransport.py

@@ -1,17 +1,20 @@
+"""thrift_filetransport.py - read thrift encoded data from a file object."""
 from __future__ import absolute_import
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import division
 from __future__ import print_function
 from __future__ import print_function
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-
 from thriftpy.transport import TTransportBase
 from thriftpy.transport import TTransportBase
 
 
-class TFileTransport(TTransportBase):
+
+class TFileTransport(TTransportBase):  # pylint: disable=too-few-public-methods
+    """TTransportBase implementation for decoding data from a file object."""
 
 
     def __init__(self, fo):
     def __init__(self, fo):
-        """fo -- the file object to read from"""
+        """Initialize with `fo`, the file object to read from."""
         self._fo = fo
         self._fo = fo
         self._pos = fo.tell()
         self._pos = fo.tell()
 
 
     def _read(self, sz):
     def _read(self, sz):
-        return self._fo.read(sz)
+        """Read data `sz` bytes."""
+        return self._fo.read(sz)

+ 5 - 2
desktop/core/ext-py/parquet-1.1/setup.py

@@ -1,9 +1,12 @@
+"""setup.py - build script for parquet-python."""
+
 try:
 try:
     from setuptools import setup
     from setuptools import setup
 except ImportError:
 except ImportError:
     from distutils.core import setup
     from distutils.core import setup
 
 
-setup(name='parquet',
+setup(
+    name='parquet',
     version='1.1',
     version='1.1',
     description='Python support for Parquet file format',
     description='Python support for Parquet file format',
     author='Joe Crobak',
     author='Joe Crobak',
@@ -24,7 +27,7 @@ setup(name='parquet',
         'Programming Language :: Python :: Implementation :: CPython',
         'Programming Language :: Python :: Implementation :: CPython',
         'Programming Language :: Python :: Implementation :: PyPy',
         'Programming Language :: Python :: Implementation :: PyPy',
     ],
     ],
-    packages=[ 'parquet' ],
+    packages=['parquet'],
     install_requires=[
     install_requires=[
         'thriftpy>=0.3.6',
         'thriftpy>=0.3.6',
     ],
     ],

+ 197 - 173
desktop/core/ext-py/parquet-1.1/test/test_converted_types.py

@@ -1,189 +1,213 @@
 # -*- coding: UTF-8 -*-
 # -*- coding: UTF-8 -*-
+"""test_converted_types.py - tests for decoding data to their logical data types."""
 from __future__ import absolute_import
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import division
 from __future__ import print_function
 from __future__ import print_function
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-import array
 import datetime
 import datetime
-import struct
-import io
 import unittest
 import unittest
 from decimal import Decimal
 from decimal import Decimal
 
 
-from parquet.converted_types import convert_column
 from parquet import parquet_thrift as pt
 from parquet import parquet_thrift as pt
-from bson import Binary
+from parquet.converted_types import convert_column
 
 
-class TestDecimal(unittest.TestCase):
 
 
-	def test_int32(self):
-		schema = pt.SchemaElement(
-			type=pt.Type.INT32,
-			name="test",
-			converted_type=pt.ConvertedType.DECIMAL,
-			scale=10,
-			precision=9
-		)
-
-		self.assertEquals(
-			convert_column([9876543210], schema)[0],
-			Decimal('9.87654321')
-		)
-
-	def test_int64(self):
-		schema = pt.SchemaElement(
-			type=pt.Type.INT64,
-			name="test",
-			converted_type=pt.ConvertedType.DECIMAL,
-			scale=3,
-			precision=13
-		)
-
-		self.assertEquals(
-			convert_column([1099511627776], schema)[0],
-			Decimal('10995116277.76')
-		)
-
-	def test_fixedlength(self):
-		schema = pt.SchemaElement(
-			type=pt.Type.FIXED_LEN_BYTE_ARRAY,
-			type_length=3,
-			name="test",
-			converted_type=pt.ConvertedType.DECIMAL,
-			scale=3,
-			precision=13
-		)
-
-		self.assertEquals(
-			convert_column([b'\x02\x00\x01'], schema)[0],
-			Decimal('1310.73')
-		)
-
-	def test_binary(self):
-		schema = pt.SchemaElement(
-			type=pt.Type.BYTE_ARRAY,
-			name="test",
-			converted_type=pt.ConvertedType.DECIMAL,
-			scale=3,
-			precision=13
-		)
-
-		self.assertEquals(
-			convert_column([b'\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01'], schema)[0],
-			Decimal('94447329657392904273.93')
-		)
+class TestDecimal(unittest.TestCase):
+    """Test the decimal converted type."""
+
+    def test_int32(self):
+        """Test decimal data stored as int32."""
+        schema = pt.SchemaElement(
+            type=pt.Type.INT32,
+            name="test",
+            converted_type=pt.ConvertedType.DECIMAL,
+            scale=10,
+            precision=9
+        )
+
+        self.assertEqual(
+            convert_column([9876543210], schema)[0],
+            Decimal('9.87654321')
+        )
+
+    def test_int64(self):
+        """Test decimal data stored as int64."""
+        schema = pt.SchemaElement(
+            type=pt.Type.INT64,
+            name="test",
+            converted_type=pt.ConvertedType.DECIMAL,
+            scale=3,
+            precision=13
+        )
+
+        self.assertEqual(
+            convert_column([1099511627776], schema)[0],
+            Decimal('10995116277.76')
+        )
+
+    def test_fixedlength(self):
+        """Test decimal data stored as fixed length bytes."""
+        schema = pt.SchemaElement(
+            type=pt.Type.FIXED_LEN_BYTE_ARRAY,
+            type_length=3,
+            name="test",
+            converted_type=pt.ConvertedType.DECIMAL,
+            scale=3,
+            precision=13
+        )
+
+        self.assertEqual(
+            convert_column([b'\x02\x00\x01'], schema)[0],
+            Decimal('1310.73')
+        )
+
+    def test_binary(self):
+        """Test decimal data stored as bytes."""
+        schema = pt.SchemaElement(
+            type=pt.Type.BYTE_ARRAY,
+            name="test",
+            converted_type=pt.ConvertedType.DECIMAL,
+            scale=3,
+            precision=13
+        )
+
+        self.assertEqual(
+            convert_column([b'\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01'], schema)[0],
+            Decimal('94447329657392904273.93')
+        )
 
 
 
 
 class TestDateTypes(unittest.TestCase):
 class TestDateTypes(unittest.TestCase):
-
-	def test_date(self):
-		schema = pt.SchemaElement(
-			type=pt.Type.INT32,
-			name="test",
-			converted_type=pt.ConvertedType.DATE,
-		)
-		self.assertEquals(
-			convert_column([731888], schema)[0],
-			datetime.date(2004, 11, 3)
-		)
-
-	def test_time_millis(self):
-		schema = pt.SchemaElement(
-			type=pt.Type.INT32,
-			name="test",
-			converted_type=pt.ConvertedType.TIME_MILLIS,
-		)
-		self.assertEquals(
-			convert_column([731888], schema)[0],
-			datetime.timedelta(milliseconds=731888)
-		)
-
-	def test_timestamp_millis(self):
-		schema = pt.SchemaElement(
-			type=pt.Type.INT64,
-			name="test",
-			converted_type=pt.ConvertedType.TIMESTAMP_MILLIS,
-		)
-		self.assertEquals(
-			convert_column([1099511625014], schema)[0],
-			datetime.datetime(2004, 11, 3, 19, 53, 45, 14*1000)
-		)
-
-	def test_utf8(self):
-		schema = pt.SchemaElement(
-			type = pt.Type.BYTE_ARRAY,
-			name="test",
-			converted_type=pt.ConvertedType.UTF8
-		)
-		data = b'foo\xf0\x9f\x91\xbe'
-		self.assertEquals(
-			convert_column([data], schema)[0],
-			'foo👾'
-		)
-
-	def test_uint8(self):
-		schema = pt.SchemaElement(
-			type = pt.Type.INT32,
-			name="test",
-			converted_type=pt.ConvertedType.UINT_8
-		)
-		self.assertEquals(
-			convert_column([-3], schema)[0],
-			253
-		)
-
-	def test_uint16(self):
-		schema = pt.SchemaElement(
-			type = pt.Type.INT32,
-			name="test",
-			converted_type=pt.ConvertedType.UINT_16
-		)
-		self.assertEquals(
-			convert_column([-3], schema)[0],
-			65533
-		)
-
-	def test_uint32(self):
-		schema = pt.SchemaElement(
-			type = pt.Type.INT32,
-			name="test",
-			converted_type=pt.ConvertedType.UINT_32
-		)
-		self.assertEquals(
-			convert_column([-6884376], schema)[0],
-			4288082920
-		)
-
-	def test_uint64(self):
-		schema = pt.SchemaElement(
-			type = pt.Type.INT64,
-			name="test",
-			converted_type=pt.ConvertedType.UINT_64
-		)
-		self.assertEquals(
-			convert_column([-6884376], schema)[0],
-			18446744073702667240
-		)
-
-	def test_json(self):
-		schema = pt.SchemaElement(
-			type = pt.Type.BYTE_ARRAY,
-			name="test",
-			converted_type=pt.ConvertedType.JSON
-		)
-		self.assertEquals(
-			convert_column([b'{"foo": ["bar", "\\ud83d\\udc7e"]}'], schema)[0],
-			{'foo': ['bar','👾']}
-		)
-
-	def test_bson(self):
-		schema = pt.SchemaElement(
-			type = pt.Type.BYTE_ARRAY,
-			name="test",
-			converted_type=pt.ConvertedType.BSON
-		)
-		self.assertEquals(
-			convert_column([b'&\x00\x00\x00\x04foo\x00\x1c\x00\x00\x00\x020\x00\x04\x00\x00\x00bar\x00\x021\x00\x05\x00\x00\x00\xf0\x9f\x91\xbe\x00\x00\x00'], schema)[0],
-			{'foo': ['bar','👾']}
-		)
+    """Test date types."""
+
+    def test_date(self):
+        """Test int32 encoding a date."""
+        schema = pt.SchemaElement(
+            type=pt.Type.INT32,
+            name="test",
+            converted_type=pt.ConvertedType.DATE,
+        )
+        self.assertEqual(
+            convert_column([731888], schema)[0],
+            datetime.date(2004, 11, 3)
+        )
+
+    def test_time_millis(self):
+        """Test int32 encoding a timedelta in millis."""
+        schema = pt.SchemaElement(
+            type=pt.Type.INT32,
+            name="test",
+            converted_type=pt.ConvertedType.TIME_MILLIS,
+        )
+        self.assertEqual(
+            convert_column([731888], schema)[0],
+            datetime.timedelta(milliseconds=731888)
+        )
+
+    def test_timestamp_millis(self):
+        """Test int64 encoding a datetime."""
+        schema = pt.SchemaElement(
+            type=pt.Type.INT64,
+            name="test",
+            converted_type=pt.ConvertedType.TIMESTAMP_MILLIS,
+        )
+        self.assertEqual(
+            convert_column([1099511625014], schema)[0],
+            datetime.datetime(2004, 11, 3, 19, 53, 45, 14 * 1000)
+        )
+
+
+class TestSBytes(unittest.TestCase):
+    """Test encoding of bytes."""
+
+    def test_utf8(self):
+        """Test bytes representing utf-8 string."""
+        schema = pt.SchemaElement(
+            type=pt.Type.BYTE_ARRAY,
+            name="test",
+            converted_type=pt.ConvertedType.UTF8
+        )
+        data = b'foo\xf0\x9f\x91\xbe'
+        self.assertEqual(
+            convert_column([data], schema)[0],
+            'foo👾'
+        )
+
+    def test_json(self):
+        """Test bytes representing json."""
+        schema = pt.SchemaElement(
+            type=pt.Type.BYTE_ARRAY,
+            name="test",
+            converted_type=pt.ConvertedType.JSON
+        )
+        self.assertEqual(
+            convert_column([b'{"foo": ["bar", "\\ud83d\\udc7e"]}'], schema)[0],
+            {'foo': ['bar', '👾']}
+        )
+
+    def test_bson(self):
+        """Test bytes representing bson."""
+        schema = pt.SchemaElement(
+            type=pt.Type.BYTE_ARRAY,
+            name="test",
+            converted_type=pt.ConvertedType.BSON
+        )
+        self.assertEqual(
+            convert_column(
+                [b'&\x00\x00\x00\x04foo\x00\x1c\x00\x00\x00\x020'
+                 b'\x00\x04\x00\x00\x00bar\x00\x021\x00\x05\x00\x00\x00\xf0\x9f\x91\xbe\x00\x00\x00'], schema)[0],
+            {'foo': ['bar', '👾']}
+        )
+
+
+class TestUnsignedInts(unittest.TestCase):
+    """Test data types stored signed by representing unsigned ints."""
+
+    def test_uint8(self):
+        """Test decoding int32 as uint8."""
+        schema = pt.SchemaElement(
+            type=pt.Type.INT32,
+            name="test",
+            converted_type=pt.ConvertedType.UINT_8
+        )
+        self.assertEqual(
+            convert_column([-3], schema)[0],
+            253
+        )
+
+    def test_uint16(self):
+        """Test decoding int32 as uint16."""
+        schema = pt.SchemaElement(
+            type=pt.Type.INT32,
+            name="test",
+            converted_type=pt.ConvertedType.UINT_16
+        )
+        self.assertEqual(
+            convert_column([-3], schema)[0],
+            65533
+        )
+
+    def test_uint32(self):
+        """Test decoding int32 as uint32."""
+        schema = pt.SchemaElement(
+            type=pt.Type.INT32,
+            name="test",
+            converted_type=pt.ConvertedType.UINT_32
+        )
+        self.assertEqual(
+            convert_column([-6884376], schema)[0],
+            4288082920
+        )
+
+    def test_uint64(self):
+        """Test decoding int64 as uint64."""
+        schema = pt.SchemaElement(
+            type=pt.Type.INT64,
+            name="test",
+            converted_type=pt.ConvertedType.UINT_64
+        )
+        self.assertEqual(
+            convert_column([-6884376], schema)[0],
+            18446744073702667240
+        )

+ 54 - 34
desktop/core/ext-py/parquet-1.1/test/test_encoding.py

@@ -1,34 +1,39 @@
+"""test_encoding.py - tests for deserializing parquet data."""
 import array
 import array
-import struct
 import io
 import io
+import struct
 import unittest
 import unittest
 
 
-from parquet import parquet_thrift
 import parquet.encoding
 import parquet.encoding
-from nose import SkipTest
+from parquet import parquet_thrift
 
 
 
 
 class TestPlain(unittest.TestCase):
 class TestPlain(unittest.TestCase):
+    """Test plain encoding."""
 
 
     def test_int32(self):
     def test_int32(self):
-        self.assertEquals(
+        """Test reading bytes containing int32 data."""
+        self.assertEqual(
             999,
             999,
             parquet.encoding.read_plain_int32(
             parquet.encoding.read_plain_int32(
                 io.BytesIO(struct.pack("<i", 999)), 1)[0])
                 io.BytesIO(struct.pack("<i", 999)), 1)[0])
 
 
     def test_int64(self):
     def test_int64(self):
-        self.assertEquals(
+        """Test reading bytes containing int64 data."""
+        self.assertEqual(
             999,
             999,
             parquet.encoding.read_plain_int64(
             parquet.encoding.read_plain_int64(
                 io.BytesIO(struct.pack("<q", 999)), 1)[0])
                 io.BytesIO(struct.pack("<q", 999)), 1)[0])
 
 
     def test_int96(self):
     def test_int96(self):
-        self.assertEquals(
+        """Test reading bytes containing int96 data."""
+        self.assertEqual(
             999,
             999,
             parquet.encoding.read_plain_int96(
             parquet.encoding.read_plain_int96(
                 io.BytesIO(struct.pack("<qi", 0, 999)), 1)[0])
                 io.BytesIO(struct.pack("<qi", 0, 999)), 1)[0])
 
 
     def test_float(self):
     def test_float(self):
+        """Test reading bytes containing float data."""
         self.assertAlmostEquals(
         self.assertAlmostEquals(
             9.99,
             9.99,
             parquet.encoding.read_plain_float(
             parquet.encoding.read_plain_float(
@@ -36,100 +41,115 @@ class TestPlain(unittest.TestCase):
             2)
             2)
 
 
     def test_double(self):
     def test_double(self):
-        self.assertEquals(
+        """Test reading bytes containing double data."""
+        self.assertEqual(
             9.99,
             9.99,
             parquet.encoding.read_plain_double(
             parquet.encoding.read_plain_double(
                 io.BytesIO(struct.pack("<d", 9.99)), 1)[0])
                 io.BytesIO(struct.pack("<d", 9.99)), 1)[0])
 
 
     def test_fixed(self):
     def test_fixed(self):
+        """Test reading bytes containing fixed bytes data."""
         data = b"foobar"
         data = b"foobar"
         fo = io.BytesIO(data)
         fo = io.BytesIO(data)
-        self.assertEquals(
+        self.assertEqual(
             data[:3],
             data[:3],
             parquet.encoding.read_plain_byte_array_fixed(
             parquet.encoding.read_plain_byte_array_fixed(
                 fo, 3))
                 fo, 3))
-        self.assertEquals(
+        self.assertEqual(
             data[3:],
             data[3:],
             parquet.encoding.read_plain_byte_array_fixed(
             parquet.encoding.read_plain_byte_array_fixed(
                 fo, 3))
                 fo, 3))
 
 
     def test_fixed_read_plain(self):
     def test_fixed_read_plain(self):
+        """Test reading bytes containing fixed bytes data."""
         data = b"foobar"
         data = b"foobar"
         fo = io.BytesIO(data)
         fo = io.BytesIO(data)
-        self.assertEquals(
+        self.assertEqual(
             data[:3],
             data[:3],
             parquet.encoding.read_plain(
             parquet.encoding.read_plain(
                 fo, parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY, 3))
                 fo, parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY, 3))
 
 
     def test_boolean(self):
     def test_boolean(self):
+        """Test reading bytes containing boolean data."""
         data = 0b1101
         data = 0b1101
         fo = io.BytesIO(struct.pack("<i", data))
         fo = io.BytesIO(struct.pack("<i", data))
-        self.assertEquals(
+        self.assertEqual(
             [True, False, True, True],
             [True, False, True, True],
             parquet.encoding.read_plain_boolean(fo, 1)[:4]
             parquet.encoding.read_plain_boolean(fo, 1)[:4]
         )
         )
 
 
 
 
 class TestRle(unittest.TestCase):
 class TestRle(unittest.TestCase):
+    """Test reading run-length encoded data."""
 
 
     def testFourByteValue(self):
     def testFourByteValue(self):
+        """Test reading a run with a single four-byte value."""
         fo = io.BytesIO(struct.pack("<i", 1 << 30))
         fo = io.BytesIO(struct.pack("<i", 1 << 30))
         out = parquet.encoding.read_rle(fo, 2 << 1, 30, True)
         out = parquet.encoding.read_rle(fo, 2 << 1, 30, True)
-        self.assertEquals([1 << 30] * 2, list(out))
+        self.assertEqual([1 << 30] * 2, list(out))
 
 
 
 
 class TestVarInt(unittest.TestCase):
 class TestVarInt(unittest.TestCase):
+    """Test reading variable-int encoded values."""
 
 
     def testSingleByte(self):
     def testSingleByte(self):
+        """Test reading a single byte value."""
         fo = io.BytesIO(struct.pack("<B", 0x7F))
         fo = io.BytesIO(struct.pack("<B", 0x7F))
         out = parquet.encoding.read_unsigned_var_int(fo)
         out = parquet.encoding.read_unsigned_var_int(fo)
-        self.assertEquals(0x7F, out)
+        self.assertEqual(0x7F, out)
 
 
     def testFourByte(self):
     def testFourByte(self):
+        """Test reading a four byte value."""
         fo = io.BytesIO(struct.pack("<BBBB", 0xFF, 0xFF, 0xFF, 0x7F))
         fo = io.BytesIO(struct.pack("<BBBB", 0xFF, 0xFF, 0xFF, 0x7F))
         out = parquet.encoding.read_unsigned_var_int(fo)
         out = parquet.encoding.read_unsigned_var_int(fo)
-        self.assertEquals(0x0FFFFFFF, out)
+        self.assertEqual(0x0FFFFFFF, out)
 
 
 
 
 class TestBitPacked(unittest.TestCase):
 class TestBitPacked(unittest.TestCase):
+    """Test reading bit-packed encoded data."""
 
 
     def testFromExample(self):
     def testFromExample(self):
+        """Test a simple example."""
         raw_data_in = [0b10001000, 0b11000110, 0b11111010]
         raw_data_in = [0b10001000, 0b11000110, 0b11111010]
         encoded_bitstring = array.array('B', raw_data_in).tostring()
         encoded_bitstring = array.array('B', raw_data_in).tostring()
         fo = io.BytesIO(encoded_bitstring)
         fo = io.BytesIO(encoded_bitstring)
         count = 3 << 1
         count = 3 << 1
         res = parquet.encoding.read_bitpacked(fo, count, 3, True)
         res = parquet.encoding.read_bitpacked(fo, count, 3, True)
-        self.assertEquals(list(range(8)), res)
+        self.assertEqual(list(range(8)), res)
 
 
 
 
 class TestBitPackedDeprecated(unittest.TestCase):
 class TestBitPackedDeprecated(unittest.TestCase):
+    """Test reading the deprecated bit-packed encoded data."""
 
 
     def testFromExample(self):
     def testFromExample(self):
+        """Test a simple example."""
         encoded_bitstring = array.array(
         encoded_bitstring = array.array(
             'B', [0b00000101, 0b00111001, 0b01110111]).tostring()
             'B', [0b00000101, 0b00111001, 0b01110111]).tostring()
         fo = io.BytesIO(encoded_bitstring)
         fo = io.BytesIO(encoded_bitstring)
         res = parquet.encoding.read_bitpacked_deprecated(fo, 3, 8, 3, True)
         res = parquet.encoding.read_bitpacked_deprecated(fo, 3, 8, 3, True)
-        self.assertEquals(list(range(8)), res)
+        self.assertEqual(list(range(8)), res)
 
 
 
 
 class TestWidthFromMaxInt(unittest.TestCase):
 class TestWidthFromMaxInt(unittest.TestCase):
+    """Test determining the max width for an int."""
 
 
     def testWidths(self):
     def testWidths(self):
-        self.assertEquals(0, parquet.encoding.width_from_max_int(0))
-        self.assertEquals(1, parquet.encoding.width_from_max_int(1))
-        self.assertEquals(2, parquet.encoding.width_from_max_int(2))
-        self.assertEquals(2, parquet.encoding.width_from_max_int(3))
-        self.assertEquals(3, parquet.encoding.width_from_max_int(4))
-        self.assertEquals(3, parquet.encoding.width_from_max_int(5))
-        self.assertEquals(3, parquet.encoding.width_from_max_int(6))
-        self.assertEquals(3, parquet.encoding.width_from_max_int(7))
-        self.assertEquals(4, parquet.encoding.width_from_max_int(8))
-        self.assertEquals(4, parquet.encoding.width_from_max_int(15))
-        self.assertEquals(5, parquet.encoding.width_from_max_int(16))
-        self.assertEquals(5, parquet.encoding.width_from_max_int(31))
-        self.assertEquals(6, parquet.encoding.width_from_max_int(32))
-        self.assertEquals(6, parquet.encoding.width_from_max_int(63))
-        self.assertEquals(7, parquet.encoding.width_from_max_int(64))
-        self.assertEquals(7, parquet.encoding.width_from_max_int(127))
-        self.assertEquals(8, parquet.encoding.width_from_max_int(128))
-        self.assertEquals(8, parquet.encoding.width_from_max_int(255))
+        """Test all possible widths for a single byte."""
+        self.assertEqual(0, parquet.encoding.width_from_max_int(0))
+        self.assertEqual(1, parquet.encoding.width_from_max_int(1))
+        self.assertEqual(2, parquet.encoding.width_from_max_int(2))
+        self.assertEqual(2, parquet.encoding.width_from_max_int(3))
+        self.assertEqual(3, parquet.encoding.width_from_max_int(4))
+        self.assertEqual(3, parquet.encoding.width_from_max_int(5))
+        self.assertEqual(3, parquet.encoding.width_from_max_int(6))
+        self.assertEqual(3, parquet.encoding.width_from_max_int(7))
+        self.assertEqual(4, parquet.encoding.width_from_max_int(8))
+        self.assertEqual(4, parquet.encoding.width_from_max_int(15))
+        self.assertEqual(5, parquet.encoding.width_from_max_int(16))
+        self.assertEqual(5, parquet.encoding.width_from_max_int(31))
+        self.assertEqual(6, parquet.encoding.width_from_max_int(32))
+        self.assertEqual(6, parquet.encoding.width_from_max_int(63))
+        self.assertEqual(7, parquet.encoding.width_from_max_int(64))
+        self.assertEqual(7, parquet.encoding.width_from_max_int(127))
+        self.assertEqual(8, parquet.encoding.width_from_max_int(128))
+        self.assertEqual(8, parquet.encoding.width_from_max_int(255))

+ 39 - 19
desktop/core/ext-py/parquet-1.1/test/test_read_support.py

@@ -1,3 +1,5 @@
+"""test_read_support.py - unit and integration tests for reading parquet data."""
+
 from __future__ import absolute_import
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import division
 from __future__ import print_function
 from __future__ import print_function
@@ -26,20 +28,26 @@ CSV_FILE = os.path.join(TEST_DATA, "nation.csv")
 TAB_DELIM = u'\t'
 TAB_DELIM = u'\t'
 PIPE_DELIM = u'|'
 PIPE_DELIM = u'|'
 
 
+
 class TestFileFormat(unittest.TestCase):
 class TestFileFormat(unittest.TestCase):
+    """Test various file-level decoding functions."""
+
     def test_header_magic_bytes(self):
     def test_header_magic_bytes(self):
+        """Test reading the header magic bytes."""
         with tempfile.NamedTemporaryFile() as t:
         with tempfile.NamedTemporaryFile() as t:
             t.write(b"PAR1_some_bogus_data")
             t.write(b"PAR1_some_bogus_data")
             t.flush()
             t.flush()
             self.assertTrue(parquet._check_header_magic_bytes(t))
             self.assertTrue(parquet._check_header_magic_bytes(t))
 
 
     def test_footer_magic_bytes(self):
     def test_footer_magic_bytes(self):
+        """Test reading the footer magic bytes."""
         with tempfile.NamedTemporaryFile() as t:
         with tempfile.NamedTemporaryFile() as t:
             t.write(b"PAR1_some_bogus_data_PAR1")
             t.write(b"PAR1_some_bogus_data_PAR1")
             t.flush()
             t.flush()
             self.assertTrue(parquet._check_footer_magic_bytes(t))
             self.assertTrue(parquet._check_footer_magic_bytes(t))
 
 
     def test_not_parquet_file(self):
     def test_not_parquet_file(self):
+        """Test reading a non-parquet file."""
         with tempfile.NamedTemporaryFile() as t:
         with tempfile.NamedTemporaryFile() as t:
             t.write(b"blah")
             t.write(b"blah")
             t.flush()
             t.flush()
@@ -48,12 +56,15 @@ class TestFileFormat(unittest.TestCase):
 
 
 
 
 class TestMetadata(unittest.TestCase):
 class TestMetadata(unittest.TestCase):
+    """Test various metadata reading functions."""
 
 
     def test_footer_bytes(self):
     def test_footer_bytes(self):
+        """Test reading the footer size value."""
         with io.open(TEST_FILE, 'rb') as fo:
         with io.open(TEST_FILE, 'rb') as fo:
             self.assertEquals(327, parquet._get_footer_size(fo))
             self.assertEquals(327, parquet._get_footer_size(fo))
 
 
     def test_read_footer(self):
     def test_read_footer(self):
+        """Test reading the footer."""
         footer = parquet.read_footer(TEST_FILE)
         footer = parquet.read_footer(TEST_FILE)
         self.assertEquals(
         self.assertEquals(
             set([s.name for s in footer.schema]),
             set([s.name for s in footer.schema]),
@@ -61,12 +72,16 @@ class TestMetadata(unittest.TestCase):
                  "n_comment"]))
                  "n_comment"]))
 
 
     def test_dump_metadata(self):
     def test_dump_metadata(self):
+        """Test dumping metadata."""
         data = io.StringIO()
         data = io.StringIO()
         parquet.dump_metadata(TEST_FILE, data)
         parquet.dump_metadata(TEST_FILE, data)
 
 
+
 class Options(object):
 class Options(object):
+    """Fake Options (a la `__main__.py`)."""
 
 
     def __init__(self, col=None, format='csv', no_headers=True, limit=-1):
     def __init__(self, col=None, format='csv', no_headers=True, limit=-1):
+        """Create a fake options."""
         self.col = col
         self.col = col
         self.format = format
         self.format = format
         self.no_headers = no_headers
         self.no_headers = no_headers
@@ -74,12 +89,10 @@ class Options(object):
 
 
 
 
 class TestReadApi(unittest.TestCase):
 class TestReadApi(unittest.TestCase):
-
-    def test_projection(self):
-        pass
+    """Test the read apis."""
 
 
     def test_limit(self):
     def test_limit(self):
-        """Test the limit option"""
+        """Test the limit option."""
         limit = 2
         limit = 2
         expected_data = []
         expected_data = []
         with io.open(CSV_FILE, 'r', encoding="utf-8") as fo:
         with io.open(CSV_FILE, 'r', encoding="utf-8") as fo:
@@ -94,6 +107,7 @@ class TestReadApi(unittest.TestCase):
 
 
 
 
 class TestCompatibility(object):
 class TestCompatibility(object):
+    """Integration tests for compatibility with reference parquet files."""
 
 
     tc = unittest.TestCase('__init__')
     tc = unittest.TestCase('__init__')
     files = [(os.path.join(TEST_DATA, p), os.path.join(TEST_DATA, "nation.csv")) for p in
     files = [(os.path.join(TEST_DATA, p), os.path.join(TEST_DATA, "nation.csv")) for p in
@@ -102,9 +116,10 @@ class TestCompatibility(object):
               "snappy-nation.impala.parquet"]]
               "snappy-nation.impala.parquet"]]
 
 
     def _test_file_csv(self, parquet_file, csv_file):
     def _test_file_csv(self, parquet_file, csv_file):
-        """ Given the parquet_file and csv_file representation, converts the
-            parquet_file to a csv using the dump utility and then compares the
-            result to the csv_file.
+        """Test the dump function by outputting to a csv file.
+
+        Given the parquet_file and csv_file representation, converts the parquet_file to a csv
+        using the dump utility and then compares the result to the csv_file.
         """
         """
         expected_data = []
         expected_data = []
         with io.open(csv_file, 'r', encoding="utf-8") as f:
         with io.open(csv_file, 'r', encoding="utf-8") as f:
@@ -115,8 +130,6 @@ class TestCompatibility(object):
         actual_raw_data.seek(0, 0)
         actual_raw_data.seek(0, 0)
         actual_data = list(csv.reader(actual_raw_data, delimiter=TAB_DELIM))
         actual_data = list(csv.reader(actual_raw_data, delimiter=TAB_DELIM))
 
 
-        #assert expected_data == actual_data, "{0} != {1}".format(
-        #    str(expected_data), str(actual_data))
         self.tc.assertListEqual(expected_data, actual_data)
         self.tc.assertListEqual(expected_data, actual_data)
 
 
         actual_raw_data = io.StringIO()
         actual_raw_data = io.StringIO()
@@ -126,13 +139,12 @@ class TestCompatibility(object):
         actual_data = list(csv.reader(actual_raw_data, delimiter=TAB_DELIM))[1:]
         actual_data = list(csv.reader(actual_raw_data, delimiter=TAB_DELIM))[1:]
 
 
         self.tc.assertListEqual(expected_data, actual_data)
         self.tc.assertListEqual(expected_data, actual_data)
-        #assert expected_data == actual_data, "{0} != {1}".format(
-        #    str(expected_data), str(actual_data))
 
 
     def _test_file_json(self, parquet_file, csv_file):
     def _test_file_json(self, parquet_file, csv_file):
-        """ Given the parquet_file and csv_file representation, converts the
-            parquet_file to json using the dump utility and then compares the
-            result to the csv_file using column agnostic ordering.
+        """Test the dump function by outputting to a json file.
+
+        Given the parquet_file and csv_file representation, converts the parquet_file to json using
+        the dump utility and then compares the result to the csv_file using column agnostic ordering.
         """
         """
         expected_data = []
         expected_data = []
         with io.open(csv_file, 'r', encoding='utf-8') as f:
         with io.open(csv_file, 'r', encoding='utf-8') as f:
@@ -155,9 +167,10 @@ class TestCompatibility(object):
                     assert expected[i] == actual[c]
                     assert expected[i] == actual[c]
 
 
     def _test_file_custom(self, parquet_file, csv_file):
     def _test_file_custom(self, parquet_file, csv_file):
-        """ Given the parquet_file and csv_file representation, converts the
-            parquet_file to json using the dump utility and then compares the
-            result to the csv_file using column agnostic ordering.
+        """Test the DictReader function against csv data.
+
+        Given the parquet_file and csv_file representation, reads the parquet file using DictReader
+        and then compares the result to the csv_file using column agnostic ordering.
         """
         """
         expected_data = []
         expected_data = []
         with io.open(csv_file, 'r', encoding="utf-8") as f:
         with io.open(csv_file, 'r', encoding="utf-8") as f:
@@ -174,21 +187,28 @@ class TestCompatibility(object):
         for expected, actual in zip(expected_data, actual_data):
         for expected, actual in zip(expected_data, actual_data):
             self.tc.assertEquals(len(expected), len(actual))
             self.tc.assertEquals(len(expected), len(actual))
             for i, c in enumerate([c for c in cols if c in actual]):
             for i, c in enumerate([c for c in cols if c in actual]):
-                self.tc.assertEquals(expected[i],
+                self.tc.assertEquals(
+                    expected[i],
                     actual[c].decode('utf-8') if type(actual[c]) is bytes \
                     actual[c].decode('utf-8') if type(actual[c]) is bytes \
                     # this makes '0' = 0, since csv reads all strings.
                     # this makes '0' = 0, since csv reads all strings.
                     else str(actual[c]))
                     else str(actual[c]))
 
 
     def test_all_files(self):
     def test_all_files(self):
+        """Test all files using the three above test functions.
+
+        This function generates additional tests.
+        """
         for parquet_file, csv_file in self.files:
         for parquet_file, csv_file in self.files:
             yield self._test_file_csv, parquet_file, csv_file
             yield self._test_file_csv, parquet_file, csv_file
             yield self._test_file_json, parquet_file, csv_file
             yield self._test_file_json, parquet_file, csv_file
             yield self._test_file_custom, parquet_file, csv_file
             yield self._test_file_custom, parquet_file, csv_file
 
 
 
 
-class TestDefinitionlevel(unittest.TestCase):
+class TestDefinitionLevel(unittest.TestCase):
+    """Test the DefinitionLevel handling."""
 
 
     def test_null_int(self):
     def test_null_int(self):
+        """Test reading a file that contains null records."""
         with open(os.path.join(TEST_DATA, "test-null.parquet"), "rb") as parquet_fo:
         with open(os.path.join(TEST_DATA, "test-null.parquet"), "rb") as parquet_fo:
             actual_data = list(parquet.DictReader(parquet_fo))
             actual_data = list(parquet.DictReader(parquet_fo))
 
 

+ 29 - 2
desktop/core/ext-py/parquet-1.1/tox.ini

@@ -1,8 +1,35 @@
 [tox]
 [tox]
-envlist = py27, py34, py35, pypy
+envlist = py27, py34, py35, pypy, flake8, pylint
 [testenv]
 [testenv]
 deps=
 deps=
    nose
    nose
    python-snappy
    python-snappy
    pymongo
    pymongo
-commands=nosetests
+commands=nosetests
+
+# Linters
+[testenv:flake8]
+basepython = python3
+skip_install = true
+deps =
+    flake8
+    flake8-docstrings>=0.2.7
+    flake8-import-order>=0.9
+commands =
+    flake8 parquet/ test/ setup.py
+
+
+# flake8 config
+[flake8]
+max-line-length = 120
+ignore = F841
+exclude = build
+
+
+[testenv:pylint]
+basepython = python3
+deps =
+    pyflakes
+    pylint
+commands =
+    pylint parquet/ setup.py