|
@@ -1,3 +1,4 @@
|
|
|
|
|
+"""parquet - read parquet files."""
|
|
|
from __future__ import absolute_import
|
|
from __future__ import absolute_import
|
|
|
from __future__ import division
|
|
from __future__ import division
|
|
|
from __future__ import print_function
|
|
from __future__ import print_function
|
|
@@ -20,100 +21,104 @@ except ImportError:
|
|
|
import thriftpy
|
|
import thriftpy
|
|
|
from thriftpy.protocol.compact import TCompactProtocolFactory
|
|
from thriftpy.protocol.compact import TCompactProtocolFactory
|
|
|
|
|
|
|
|
-from .converted_types import convert_column
|
|
|
|
|
-from .thrift_filetransport import TFileTransport
|
|
|
|
|
from . import encoding
|
|
from . import encoding
|
|
|
from . import schema
|
|
from . import schema
|
|
|
|
|
+from .converted_types import convert_column
|
|
|
|
|
+from .thrift_filetransport import TFileTransport
|
|
|
|
|
|
|
|
PY3 = sys.version_info > (3,)
|
|
PY3 = sys.version_info > (3,)
|
|
|
|
|
|
|
|
if PY3:
|
|
if PY3:
|
|
|
import csv
|
|
import csv
|
|
|
else:
|
|
else:
|
|
|
- from backports import csv
|
|
|
|
|
|
|
+ from backports import csv # pylint: disable=import-error
|
|
|
|
|
|
|
|
THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
|
|
THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
|
|
|
-parquet_thrift = thriftpy.load(THRIFT_FILE, module_name="parquet_thrift")
|
|
|
|
|
|
|
+parquet_thrift = thriftpy.load(THRIFT_FILE, module_name="parquet_thrift") # pylint: disable=invalid-name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
+logger = logging.getLogger("parquet") # pylint: disable=invalid-name
|
|
|
|
|
|
|
|
-logger = logging.getLogger("parquet")
|
|
|
|
|
|
|
|
|
|
try:
|
|
try:
|
|
|
import snappy
|
|
import snappy
|
|
|
except ImportError:
|
|
except ImportError:
|
|
|
- logger.warn(
|
|
|
|
|
|
|
+ logger.info(
|
|
|
"Couldn't import snappy. Support for snappy compression disabled.")
|
|
"Couldn't import snappy. Support for snappy compression disabled.")
|
|
|
|
|
|
|
|
|
|
|
|
|
class ParquetFormatException(Exception):
|
|
class ParquetFormatException(Exception):
|
|
|
|
|
+ """Generic Exception related to unexpected data format when reading parquet file."""
|
|
|
pass
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
-def _check_header_magic_bytes(fo):
|
|
|
|
|
- "Returns true if the file-like obj has the PAR1 magic bytes at the header"
|
|
|
|
|
- fo.seek(0, 0)
|
|
|
|
|
- magic = fo.read(4)
|
|
|
|
|
|
|
+def _check_header_magic_bytes(file_obj):
|
|
|
|
|
+ """Check if the file-like obj has the PAR1 magic bytes at the header."""
|
|
|
|
|
+ file_obj.seek(0, 0)
|
|
|
|
|
+ magic = file_obj.read(4)
|
|
|
return magic == b'PAR1'
|
|
return magic == b'PAR1'
|
|
|
|
|
|
|
|
|
|
|
|
|
-def _check_footer_magic_bytes(fo):
|
|
|
|
|
- "Returns true if the file-like obj has the PAR1 magic bytes at the footer"
|
|
|
|
|
- fo.seek(-4, 2) # seek to four bytes from the end of the file
|
|
|
|
|
- magic = fo.read(4)
|
|
|
|
|
|
|
+def _check_footer_magic_bytes(file_obj):
|
|
|
|
|
+ """Check if the file-like obj has the PAR1 magic bytes at the footer."""
|
|
|
|
|
+ file_obj.seek(-4, 2) # seek to four bytes from the end of the file
|
|
|
|
|
+ magic = file_obj.read(4)
|
|
|
return magic == b'PAR1'
|
|
return magic == b'PAR1'
|
|
|
|
|
|
|
|
|
|
|
|
|
-def _get_footer_size(fo):
|
|
|
|
|
- "Readers the footer size in bytes, which is serialized as little endian"
|
|
|
|
|
- fo.seek(-8, 2)
|
|
|
|
|
- tup = struct.unpack(b"<i", fo.read(4))
|
|
|
|
|
|
|
+def _get_footer_size(file_obj):
|
|
|
|
|
+ """Read the footer size in bytes, which is serialized as little endian."""
|
|
|
|
|
+ file_obj.seek(-8, 2)
|
|
|
|
|
+ tup = struct.unpack(b"<i", file_obj.read(4))
|
|
|
return tup[0]
|
|
return tup[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
-def _read_footer(fo):
|
|
|
|
|
- """Reads the footer from the given file object, returning a FileMetaData
|
|
|
|
|
- object. This method assumes that the fo references a valid parquet file"""
|
|
|
|
|
- footer_size = _get_footer_size(fo)
|
|
|
|
|
|
|
+def _read_footer(file_obj):
|
|
|
|
|
+ """Read the footer from the given file object and returns a FileMetaData object.
|
|
|
|
|
+
|
|
|
|
|
+ This method assumes that the fo references a valid parquet file.
|
|
|
|
|
+ """
|
|
|
|
|
+ footer_size = _get_footer_size(file_obj)
|
|
|
if logger.isEnabledFor(logging.DEBUG):
|
|
if logger.isEnabledFor(logging.DEBUG):
|
|
|
logger.debug("Footer size in bytes: %s", footer_size)
|
|
logger.debug("Footer size in bytes: %s", footer_size)
|
|
|
- fo.seek(-(8 + footer_size), 2) # seek to beginning of footer
|
|
|
|
|
- tin = TFileTransport(fo)
|
|
|
|
|
|
|
+ file_obj.seek(-(8 + footer_size), 2) # seek to beginning of footer
|
|
|
|
|
+ tin = TFileTransport(file_obj)
|
|
|
pin = TCompactProtocolFactory().get_protocol(tin)
|
|
pin = TCompactProtocolFactory().get_protocol(tin)
|
|
|
fmd = parquet_thrift.FileMetaData()
|
|
fmd = parquet_thrift.FileMetaData()
|
|
|
fmd.read(pin)
|
|
fmd.read(pin)
|
|
|
return fmd
|
|
return fmd
|
|
|
|
|
|
|
|
|
|
|
|
|
-def _read_page_header(fo):
|
|
|
|
|
- """Reads the page_header from the given fo"""
|
|
|
|
|
- tin = TFileTransport(fo)
|
|
|
|
|
|
|
+def _read_page_header(file_obj):
|
|
|
|
|
+ """Read the page_header from the given fo."""
|
|
|
|
|
+ tin = TFileTransport(file_obj)
|
|
|
pin = TCompactProtocolFactory().get_protocol(tin)
|
|
pin = TCompactProtocolFactory().get_protocol(tin)
|
|
|
- ph = parquet_thrift.PageHeader()
|
|
|
|
|
- ph.read(pin)
|
|
|
|
|
- return ph
|
|
|
|
|
|
|
+ page_header = parquet_thrift.PageHeader()
|
|
|
|
|
+ page_header.read(pin)
|
|
|
|
|
+ return page_header
|
|
|
|
|
|
|
|
|
|
|
|
|
def read_footer(filename):
|
|
def read_footer(filename):
|
|
|
- """Reads and returns the FileMetaData object for the given file."""
|
|
|
|
|
- with open(filename, 'rb') as fo:
|
|
|
|
|
- if not _check_header_magic_bytes(fo) or \
|
|
|
|
|
- not _check_footer_magic_bytes(fo):
|
|
|
|
|
|
|
+ """Read the footer and return the FileMetaData for the specified filename."""
|
|
|
|
|
+ with open(filename, 'rb') as file_obj:
|
|
|
|
|
+ if not _check_header_magic_bytes(file_obj) or \
|
|
|
|
|
+ not _check_footer_magic_bytes(file_obj):
|
|
|
raise ParquetFormatException("{0} is not a valid parquet file "
|
|
raise ParquetFormatException("{0} is not a valid parquet file "
|
|
|
"(missing magic bytes)"
|
|
"(missing magic bytes)"
|
|
|
.format(filename))
|
|
.format(filename))
|
|
|
- return _read_footer(fo)
|
|
|
|
|
|
|
+ return _read_footer(file_obj)
|
|
|
|
|
|
|
|
|
|
|
|
|
def _get_name(type_, value):
|
|
def _get_name(type_, value):
|
|
|
- """Returns the name for the given value of the given type_ unless value is
|
|
|
|
|
- None, in which case it returns empty string"""
|
|
|
|
|
- return type_._VALUES_TO_NAMES[value] if value is not None else "None"
|
|
|
|
|
|
|
+ """Return the name for the given value of the given type_.
|
|
|
|
|
+
|
|
|
|
|
+ The value `None` returns empty string.
|
|
|
|
|
+ """
|
|
|
|
|
+ return type_._VALUES_TO_NAMES[value] if value is not None else "None" # pylint: disable=protected-access
|
|
|
|
|
|
|
|
|
|
|
|
|
def _get_offset(cmd):
|
|
def _get_offset(cmd):
|
|
|
- """Returns the offset into the cmd based upon if it's a dictionary page or
|
|
|
|
|
- a data page"""
|
|
|
|
|
|
|
+ """Return the offset into the cmd based upon if it's a dictionary page or a data page."""
|
|
|
dict_offset = cmd.dictionary_page_offset
|
|
dict_offset = cmd.dictionary_page_offset
|
|
|
data_offset = cmd.data_page_offset
|
|
data_offset = cmd.data_page_offset
|
|
|
if dict_offset is None or data_offset < dict_offset:
|
|
if dict_offset is None or data_offset < dict_offset:
|
|
@@ -122,7 +127,12 @@ def _get_offset(cmd):
|
|
|
|
|
|
|
|
|
|
|
|
|
def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
|
|
def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
|
|
|
|
|
+ """Dump metadata about the parquet object with the given filename.
|
|
|
|
|
+
|
|
|
|
|
+ Dump human-readable metadata to specified `out`. Optionally dump the row group metadata as well.
|
|
|
|
|
+ """
|
|
|
def println(value):
|
|
def println(value):
|
|
|
|
|
+ """Write a new line containing `value` to `out`."""
|
|
|
out.write(value + "\n")
|
|
out.write(value + "\n")
|
|
|
footer = read_footer(filename)
|
|
footer = read_footer(filename)
|
|
|
println("File Metadata: {0}".format(filename))
|
|
println("File Metadata: {0}".format(filename))
|
|
@@ -130,34 +140,35 @@ def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
|
|
|
println(" Num Rows: {0}".format(footer.num_rows))
|
|
println(" Num Rows: {0}".format(footer.num_rows))
|
|
|
println(" k/v metadata: ")
|
|
println(" k/v metadata: ")
|
|
|
if footer.key_value_metadata and len(footer.key_value_metadata) > 0:
|
|
if footer.key_value_metadata and len(footer.key_value_metadata) > 0:
|
|
|
- for kv in footer.key_value_metadata:
|
|
|
|
|
- println(" {0}={1}".format(kv.key, kv.value))
|
|
|
|
|
|
|
+ for item in footer.key_value_metadata:
|
|
|
|
|
+ println(" {0}={1}".format(item.key, item.value))
|
|
|
else:
|
|
else:
|
|
|
println(" (none)")
|
|
println(" (none)")
|
|
|
println(" schema: ")
|
|
println(" schema: ")
|
|
|
- for se in footer.schema:
|
|
|
|
|
|
|
+ for element in footer.schema:
|
|
|
println(" {name} ({type}): length={type_length}, "
|
|
println(" {name} ({type}): length={type_length}, "
|
|
|
"repetition={repetition_type}, "
|
|
"repetition={repetition_type}, "
|
|
|
"children={num_children}, "
|
|
"children={num_children}, "
|
|
|
"converted_type={converted_type}".format(
|
|
"converted_type={converted_type}".format(
|
|
|
- name=se.name,
|
|
|
|
|
- type=parquet_thrift.Type._VALUES_TO_NAMES[se.type] if se.type else None,
|
|
|
|
|
- type_length=se.type_length,
|
|
|
|
|
|
|
+ name=element.name,
|
|
|
|
|
+ type=parquet_thrift.Type._VALUES_TO_NAMES[element.type] # pylint: disable=protected-access
|
|
|
|
|
+ if element.type else None,
|
|
|
|
|
+ type_length=element.type_length,
|
|
|
repetition_type=_get_name(parquet_thrift.FieldRepetitionType,
|
|
repetition_type=_get_name(parquet_thrift.FieldRepetitionType,
|
|
|
- se.repetition_type),
|
|
|
|
|
- num_children=se.num_children,
|
|
|
|
|
- converted_type=se.converted_type))
|
|
|
|
|
|
|
+ element.repetition_type),
|
|
|
|
|
+ num_children=element.num_children,
|
|
|
|
|
+ converted_type=element.converted_type))
|
|
|
if show_row_group_metadata:
|
|
if show_row_group_metadata:
|
|
|
println(" row groups: ")
|
|
println(" row groups: ")
|
|
|
- for rg in footer.row_groups:
|
|
|
|
|
- num_rows = rg.num_rows
|
|
|
|
|
- bytes = rg.total_byte_size
|
|
|
|
|
|
|
+ for row_group in footer.row_groups:
|
|
|
|
|
+ num_rows = row_group.num_rows
|
|
|
|
|
+ size_bytes = row_group.total_byte_size
|
|
|
println(
|
|
println(
|
|
|
" rows={num_rows}, bytes={bytes}".format(num_rows=num_rows,
|
|
" rows={num_rows}, bytes={bytes}".format(num_rows=num_rows,
|
|
|
- bytes=bytes))
|
|
|
|
|
|
|
+ bytes=size_bytes))
|
|
|
println(" chunks:")
|
|
println(" chunks:")
|
|
|
- for cg in rg.columns:
|
|
|
|
|
- cmd = cg.meta_data
|
|
|
|
|
|
|
+ for col_group in row_group.columns:
|
|
|
|
|
+ cmd = col_group.meta_data
|
|
|
println(" type={type} file_offset={offset} "
|
|
println(" type={type} file_offset={offset} "
|
|
|
"compression={codec} "
|
|
"compression={codec} "
|
|
|
"encodings={encodings} path_in_schema={path_in_schema} "
|
|
"encodings={encodings} path_in_schema={path_in_schema} "
|
|
@@ -166,7 +177,7 @@ def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
|
|
|
"data_page_offset={data_page_offset} "
|
|
"data_page_offset={data_page_offset} "
|
|
|
"dictionary_page_offset={dictionary_page_offset}".format(
|
|
"dictionary_page_offset={dictionary_page_offset}".format(
|
|
|
type=_get_name(parquet_thrift.Type, cmd.type),
|
|
type=_get_name(parquet_thrift.Type, cmd.type),
|
|
|
- offset=cg.file_offset,
|
|
|
|
|
|
|
+ offset=col_group.file_offset,
|
|
|
codec=_get_name(parquet_thrift.CompressionCodec, cmd.codec),
|
|
codec=_get_name(parquet_thrift.CompressionCodec, cmd.codec),
|
|
|
encodings=",".join(
|
|
encodings=",".join(
|
|
|
[_get_name(
|
|
[_get_name(
|
|
@@ -177,25 +188,24 @@ def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
|
|
|
compressed_bytes=cmd.total_compressed_size,
|
|
compressed_bytes=cmd.total_compressed_size,
|
|
|
data_page_offset=cmd.data_page_offset,
|
|
data_page_offset=cmd.data_page_offset,
|
|
|
dictionary_page_offset=cmd.dictionary_page_offset))
|
|
dictionary_page_offset=cmd.dictionary_page_offset))
|
|
|
- with open(filename, 'rb') as fo:
|
|
|
|
|
|
|
+ with open(filename, 'rb') as file_obj:
|
|
|
offset = _get_offset(cmd)
|
|
offset = _get_offset(cmd)
|
|
|
- fo.seek(offset, 0)
|
|
|
|
|
|
|
+ file_obj.seek(offset, 0)
|
|
|
values_read = 0
|
|
values_read = 0
|
|
|
println(" pages: ")
|
|
println(" pages: ")
|
|
|
while values_read < num_rows:
|
|
while values_read < num_rows:
|
|
|
- ph = _read_page_header(fo)
|
|
|
|
|
|
|
+ page_header = _read_page_header(file_obj)
|
|
|
# seek past current page.
|
|
# seek past current page.
|
|
|
- fo.seek(ph.compressed_page_size, 1)
|
|
|
|
|
- daph = ph.data_page_header
|
|
|
|
|
- type_ = _get_name(parquet_thrift.PageType, ph.type)
|
|
|
|
|
- raw_bytes = ph.uncompressed_page_size
|
|
|
|
|
|
|
+ file_obj.seek(page_header.compressed_page_size, 1)
|
|
|
|
|
+ daph = page_header.data_page_header
|
|
|
|
|
+ type_ = _get_name(parquet_thrift.PageType, page_header.type)
|
|
|
|
|
+ raw_bytes = page_header.uncompressed_page_size
|
|
|
num_values = None
|
|
num_values = None
|
|
|
- if ph.type == parquet_thrift.PageType.DATA_PAGE:
|
|
|
|
|
|
|
+ if page_header.type == parquet_thrift.PageType.DATA_PAGE:
|
|
|
num_values = daph.num_values
|
|
num_values = daph.num_values
|
|
|
values_read += num_values
|
|
values_read += num_values
|
|
|
- if ph.type == parquet_thrift.PageType.DICTIONARY_PAGE:
|
|
|
|
|
|
|
+ if page_header.type == parquet_thrift.PageType.DICTIONARY_PAGE:
|
|
|
pass
|
|
pass
|
|
|
- #num_values = diph.num_values
|
|
|
|
|
|
|
|
|
|
encoding_type = None
|
|
encoding_type = None
|
|
|
def_level_encoding = None
|
|
def_level_encoding = None
|
|
@@ -220,18 +230,17 @@ def dump_metadata(filename, show_row_group_metadata, out=sys.stdout):
|
|
|
rep_level_encoding=rep_level_encoding))
|
|
rep_level_encoding=rep_level_encoding))
|
|
|
|
|
|
|
|
|
|
|
|
|
-def _read_page(fo, page_header, column_metadata):
|
|
|
|
|
- """Internal function to read the data page from the given file-object
|
|
|
|
|
- and convert it to raw, uncompressed bytes (if necessary)."""
|
|
|
|
|
- bytes_from_file = fo.read(page_header.compressed_page_size)
|
|
|
|
|
|
|
+def _read_page(file_obj, page_header, column_metadata):
|
|
|
|
|
+ """Read the data page from the given file-object and convert it to raw, uncompressed bytes (if necessary)."""
|
|
|
|
|
+ bytes_from_file = file_obj.read(page_header.compressed_page_size)
|
|
|
codec = column_metadata.codec
|
|
codec = column_metadata.codec
|
|
|
if codec is not None and codec != parquet_thrift.CompressionCodec.UNCOMPRESSED:
|
|
if codec is not None and codec != parquet_thrift.CompressionCodec.UNCOMPRESSED:
|
|
|
if column_metadata.codec == parquet_thrift.CompressionCodec.SNAPPY:
|
|
if column_metadata.codec == parquet_thrift.CompressionCodec.SNAPPY:
|
|
|
raw_bytes = snappy.decompress(bytes_from_file)
|
|
raw_bytes = snappy.decompress(bytes_from_file)
|
|
|
elif column_metadata.codec == parquet_thrift.CompressionCodec.GZIP:
|
|
elif column_metadata.codec == parquet_thrift.CompressionCodec.GZIP:
|
|
|
io_obj = io.BytesIO(bytes_from_file)
|
|
io_obj = io.BytesIO(bytes_from_file)
|
|
|
- with gzip.GzipFile(fileobj=io_obj, mode='rb') as f:
|
|
|
|
|
- raw_bytes = f.read()
|
|
|
|
|
|
|
+ with gzip.GzipFile(fileobj=io_obj, mode='rb') as file_data:
|
|
|
|
|
+ raw_bytes = file_data.read()
|
|
|
else:
|
|
else:
|
|
|
raise ParquetFormatException(
|
|
raise ParquetFormatException(
|
|
|
"Unsupported Codec: {0}".format(codec))
|
|
"Unsupported Codec: {0}".format(codec))
|
|
@@ -240,10 +249,10 @@ def _read_page(fo, page_header, column_metadata):
|
|
|
|
|
|
|
|
if logger.isEnabledFor(logging.DEBUG):
|
|
if logger.isEnabledFor(logging.DEBUG):
|
|
|
logger.debug(
|
|
logger.debug(
|
|
|
- "Read page with compression type {0}. Bytes {1} -> {2}".format(
|
|
|
|
|
|
|
+ "Read page with compression type %s. Bytes %d -> %d",
|
|
|
_get_name(parquet_thrift.CompressionCodec, codec),
|
|
_get_name(parquet_thrift.CompressionCodec, codec),
|
|
|
page_header.compressed_page_size,
|
|
page_header.compressed_page_size,
|
|
|
- page_header.uncompressed_page_size))
|
|
|
|
|
|
|
+ page_header.uncompressed_page_size)
|
|
|
assert len(raw_bytes) == page_header.uncompressed_page_size, \
|
|
assert len(raw_bytes) == page_header.uncompressed_page_size, \
|
|
|
"found {0} raw bytes (expected {1})".format(
|
|
"found {0} raw bytes (expected {1})".format(
|
|
|
len(raw_bytes),
|
|
len(raw_bytes),
|
|
@@ -251,16 +260,16 @@ def _read_page(fo, page_header, column_metadata):
|
|
|
return raw_bytes
|
|
return raw_bytes
|
|
|
|
|
|
|
|
|
|
|
|
|
-def _read_data(fo, fo_encoding, value_count, bit_width):
|
|
|
|
|
- """Internal method to read data from the file-object using the given
|
|
|
|
|
- encoding. The data could be definition levels, repetition levels, or
|
|
|
|
|
- actual values.
|
|
|
|
|
|
|
+def _read_data(file_obj, fo_encoding, value_count, bit_width):
|
|
|
|
|
+ """Read data from the file-object using the given encoding.
|
|
|
|
|
+
|
|
|
|
|
+ The data could be definition levels, repetition levels, or actual values.
|
|
|
"""
|
|
"""
|
|
|
vals = []
|
|
vals = []
|
|
|
if fo_encoding == parquet_thrift.Encoding.RLE:
|
|
if fo_encoding == parquet_thrift.Encoding.RLE:
|
|
|
seen = 0
|
|
seen = 0
|
|
|
while seen < value_count:
|
|
while seen < value_count:
|
|
|
- values = encoding.read_rle_bit_packed_hybrid(fo, bit_width)
|
|
|
|
|
|
|
+ values = encoding.read_rle_bit_packed_hybrid(file_obj, bit_width)
|
|
|
if values is None:
|
|
if values is None:
|
|
|
break # EOF was reached.
|
|
break # EOF was reached.
|
|
|
vals += values
|
|
vals += values
|
|
@@ -271,14 +280,17 @@ def _read_data(fo, fo_encoding, value_count, bit_width):
|
|
|
return vals
|
|
return vals
|
|
|
|
|
|
|
|
|
|
|
|
|
-def read_data_page(fo, schema_helper, page_header, column_metadata,
|
|
|
|
|
|
|
+def read_data_page(file_obj, schema_helper, page_header, column_metadata,
|
|
|
dictionary):
|
|
dictionary):
|
|
|
- """Reads the datapage from the given file-like object based upon the
|
|
|
|
|
- metadata in the schema_helper, page_header, column_metadata, and
|
|
|
|
|
- (optional) dictionary. Returns a list of values.
|
|
|
|
|
|
|
+ """Read the data page from the given file-like object based upon the parameters.
|
|
|
|
|
+
|
|
|
|
|
+ Metadata in the the schema_helper, page_header, column_metadata, and (optional) dictionary
|
|
|
|
|
+ are used for parsing data.
|
|
|
|
|
+
|
|
|
|
|
+ Returns a list of values.
|
|
|
"""
|
|
"""
|
|
|
daph = page_header.data_page_header
|
|
daph = page_header.data_page_header
|
|
|
- raw_bytes = _read_page(fo, page_header, column_metadata)
|
|
|
|
|
|
|
+ raw_bytes = _read_page(file_obj, page_header, column_metadata)
|
|
|
io_obj = io.BytesIO(raw_bytes)
|
|
io_obj = io.BytesIO(raw_bytes)
|
|
|
vals = []
|
|
vals = []
|
|
|
debug_logging = logger.isEnabledFor(logging.DEBUG)
|
|
debug_logging = logger.isEnabledFor(logging.DEBUG)
|
|
@@ -315,7 +327,7 @@ def read_data_page(fo, schema_helper, page_header, column_metadata,
|
|
|
logger.debug(" Definition levels: %s", len(definition_levels))
|
|
logger.debug(" Definition levels: %s", len(definition_levels))
|
|
|
|
|
|
|
|
# repetition levels are skipped if data is at the first level.
|
|
# repetition levels are skipped if data is at the first level.
|
|
|
- repetition_levels = None
|
|
|
|
|
|
|
+ repetition_levels = None # pylint: disable=unused-variable
|
|
|
if len(column_metadata.path_in_schema) > 1:
|
|
if len(column_metadata.path_in_schema) > 1:
|
|
|
max_repetition_level = schema_helper.max_repetition_level(
|
|
max_repetition_level = schema_helper.max_repetition_level(
|
|
|
column_metadata.path_in_schema)
|
|
column_metadata.path_in_schema)
|
|
@@ -325,13 +337,13 @@ def read_data_page(fo, schema_helper, page_header, column_metadata,
|
|
|
daph.num_values,
|
|
daph.num_values,
|
|
|
bit_width)
|
|
bit_width)
|
|
|
|
|
|
|
|
- # TODO Actually use the repetition levels.
|
|
|
|
|
|
|
+ # NOTE: The repetition levels aren't yet used.
|
|
|
if daph.encoding == parquet_thrift.Encoding.PLAIN:
|
|
if daph.encoding == parquet_thrift.Encoding.PLAIN:
|
|
|
read_values = \
|
|
read_values = \
|
|
|
encoding.read_plain(io_obj, column_metadata.type, daph.num_values - num_nulls)
|
|
encoding.read_plain(io_obj, column_metadata.type, daph.num_values - num_nulls)
|
|
|
if definition_levels:
|
|
if definition_levels:
|
|
|
- it = iter(read_values)
|
|
|
|
|
- vals.extend([next(it) if level == max_definition_level else None for level in definition_levels])
|
|
|
|
|
|
|
+ itr = iter(read_values)
|
|
|
|
|
+ vals.extend([next(itr) if level == max_definition_level else None for level in definition_levels])
|
|
|
else:
|
|
else:
|
|
|
vals.extend(read_values)
|
|
vals.extend(read_values)
|
|
|
if debug_logging:
|
|
if debug_logging:
|
|
@@ -344,7 +356,6 @@ def read_data_page(fo, schema_helper, page_header, column_metadata,
|
|
|
total_seen = 0
|
|
total_seen = 0
|
|
|
dict_values_bytes = io_obj.read()
|
|
dict_values_bytes = io_obj.read()
|
|
|
dict_values_io_obj = io.BytesIO(dict_values_bytes)
|
|
dict_values_io_obj = io.BytesIO(dict_values_bytes)
|
|
|
- # TODO jcrobak -- not sure that this loop is needed?
|
|
|
|
|
while total_seen < daph.num_values:
|
|
while total_seen < daph.num_values:
|
|
|
values = encoding.read_rle_bit_packed_hybrid(
|
|
values = encoding.read_rle_bit_packed_hybrid(
|
|
|
dict_values_io_obj, bit_width, len(dict_values_bytes))
|
|
dict_values_io_obj, bit_width, len(dict_values_bytes))
|
|
@@ -354,18 +365,25 @@ def read_data_page(fo, schema_helper, page_header, column_metadata,
|
|
|
total_seen += len(values)
|
|
total_seen += len(values)
|
|
|
else:
|
|
else:
|
|
|
raise ParquetFormatException("Unsupported encoding: %s",
|
|
raise ParquetFormatException("Unsupported encoding: %s",
|
|
|
- _get_name(Encoding, daph.encoding))
|
|
|
|
|
|
|
+ _get_name(parquet_thrift.Encoding, daph.encoding))
|
|
|
return vals
|
|
return vals
|
|
|
|
|
|
|
|
|
|
|
|
|
-def read_dictionary_page(fo, page_header, column_metadata):
|
|
|
|
|
- raw_bytes = _read_page(fo, page_header, column_metadata)
|
|
|
|
|
|
|
+def _read_dictionary_page(file_obj, page_header, column_metadata):
|
|
|
|
|
+ """Read a page containing dictionary data.
|
|
|
|
|
+
|
|
|
|
|
+ Consumes data using the plain encoding and returns an array of values.
|
|
|
|
|
+ """
|
|
|
|
|
+ raw_bytes = _read_page(file_obj, page_header, column_metadata)
|
|
|
io_obj = io.BytesIO(raw_bytes)
|
|
io_obj = io.BytesIO(raw_bytes)
|
|
|
- return encoding.read_plain(io_obj, column_metadata.type,
|
|
|
|
|
- page_header.dictionary_page_header.num_values)
|
|
|
|
|
|
|
+ return encoding.read_plain(
|
|
|
|
|
+ io_obj,
|
|
|
|
|
+ column_metadata.type,
|
|
|
|
|
+ page_header.dictionary_page_header.num_values
|
|
|
|
|
+ )
|
|
|
|
|
|
|
|
|
|
|
|
|
-def DictReader(fo, columns=None):
|
|
|
|
|
|
|
+def DictReader(file_obj, columns=None): # pylint: disable=invalid-name
|
|
|
"""
|
|
"""
|
|
|
Reader for a parquet file object.
|
|
Reader for a parquet file object.
|
|
|
|
|
|
|
@@ -378,14 +396,15 @@ def DictReader(fo, columns=None):
|
|
|
:param columns: the columns to include. If None (default), all columns
|
|
:param columns: the columns to include. If None (default), all columns
|
|
|
are included. Nested values are referenced with "." notation
|
|
are included. Nested values are referenced with "." notation
|
|
|
"""
|
|
"""
|
|
|
- footer = _read_footer(fo)
|
|
|
|
|
|
|
+ footer = _read_footer(file_obj)
|
|
|
keys = columns if columns else [s.name for s in
|
|
keys = columns if columns else [s.name for s in
|
|
|
footer.schema if s.type]
|
|
footer.schema if s.type]
|
|
|
|
|
|
|
|
- for row in reader(fo, columns):
|
|
|
|
|
|
|
+ for row in reader(file_obj, columns):
|
|
|
yield OrderedDict(zip(keys, row))
|
|
yield OrderedDict(zip(keys, row))
|
|
|
|
|
|
|
|
-def reader(fo, columns=None):
|
|
|
|
|
|
|
+
|
|
|
|
|
+def reader(file_obj, columns=None):
|
|
|
"""
|
|
"""
|
|
|
Reader for a parquet file object.
|
|
Reader for a parquet file object.
|
|
|
|
|
|
|
@@ -396,86 +415,92 @@ def reader(fo, columns=None):
|
|
|
:param columns: the columns to include. If None (default), all columns
|
|
:param columns: the columns to include. If None (default), all columns
|
|
|
are included. Nested values are referenced with "." notation
|
|
are included. Nested values are referenced with "." notation
|
|
|
"""
|
|
"""
|
|
|
- if hasattr(fo, 'mode') and 'b' not in fo.mode:
|
|
|
|
|
|
|
+ if hasattr(file_obj, 'mode') and 'b' not in file_obj.mode:
|
|
|
logger.error("parquet.reader requires the fileobj to be opened in binary mode!")
|
|
logger.error("parquet.reader requires the fileobj to be opened in binary mode!")
|
|
|
- footer = _read_footer(fo)
|
|
|
|
|
|
|
+ footer = _read_footer(file_obj)
|
|
|
schema_helper = schema.SchemaHelper(footer.schema)
|
|
schema_helper = schema.SchemaHelper(footer.schema)
|
|
|
keys = columns if columns else [s.name for s in
|
|
keys = columns if columns else [s.name for s in
|
|
|
footer.schema if s.type]
|
|
footer.schema if s.type]
|
|
|
debug_logging = logger.isEnabledFor(logging.DEBUG)
|
|
debug_logging = logger.isEnabledFor(logging.DEBUG)
|
|
|
- for rg in footer.row_groups:
|
|
|
|
|
|
|
+ for row_group in footer.row_groups:
|
|
|
res = defaultdict(list)
|
|
res = defaultdict(list)
|
|
|
- row_group_rows = rg.num_rows
|
|
|
|
|
- for idx, cg in enumerate(rg.columns):
|
|
|
|
|
|
|
+ row_group_rows = row_group.num_rows
|
|
|
|
|
+ for col_group in row_group.columns:
|
|
|
dict_items = []
|
|
dict_items = []
|
|
|
- cmd = cg.meta_data
|
|
|
|
|
|
|
+ cmd = col_group.meta_data
|
|
|
# skip if the list of columns is specified and this isn't in it
|
|
# skip if the list of columns is specified and this isn't in it
|
|
|
if columns and not ".".join(cmd.path_in_schema) in columns:
|
|
if columns and not ".".join(cmd.path_in_schema) in columns:
|
|
|
continue
|
|
continue
|
|
|
|
|
|
|
|
offset = _get_offset(cmd)
|
|
offset = _get_offset(cmd)
|
|
|
- fo.seek(offset, 0)
|
|
|
|
|
|
|
+ file_obj.seek(offset, 0)
|
|
|
values_seen = 0
|
|
values_seen = 0
|
|
|
if debug_logging:
|
|
if debug_logging:
|
|
|
logger.debug("reading column chunk of type: %s",
|
|
logger.debug("reading column chunk of type: %s",
|
|
|
_get_name(parquet_thrift.Type, cmd.type))
|
|
_get_name(parquet_thrift.Type, cmd.type))
|
|
|
while values_seen < row_group_rows:
|
|
while values_seen < row_group_rows:
|
|
|
- ph = _read_page_header(fo)
|
|
|
|
|
|
|
+ page_header = _read_page_header(file_obj)
|
|
|
if debug_logging:
|
|
if debug_logging:
|
|
|
logger.debug("Reading page (type=%s, "
|
|
logger.debug("Reading page (type=%s, "
|
|
|
"uncompressed=%s bytes, "
|
|
"uncompressed=%s bytes, "
|
|
|
"compressed=%s bytes)",
|
|
"compressed=%s bytes)",
|
|
|
- _get_name(parquet_thrift.PageType, ph.type),
|
|
|
|
|
- ph.uncompressed_page_size,
|
|
|
|
|
- ph.compressed_page_size)
|
|
|
|
|
|
|
+ _get_name(parquet_thrift.PageType, page_header.type),
|
|
|
|
|
+ page_header.uncompressed_page_size,
|
|
|
|
|
+ page_header.compressed_page_size)
|
|
|
|
|
|
|
|
- if ph.type == parquet_thrift.PageType.DATA_PAGE:
|
|
|
|
|
- values = read_data_page(fo, schema_helper, ph, cmd,
|
|
|
|
|
|
|
+ if page_header.type == parquet_thrift.PageType.DATA_PAGE:
|
|
|
|
|
+ values = read_data_page(file_obj, schema_helper, page_header, cmd,
|
|
|
dict_items)
|
|
dict_items)
|
|
|
schema_element = schema_helper.schema_element(cmd.path_in_schema[-1])
|
|
schema_element = schema_helper.schema_element(cmd.path_in_schema[-1])
|
|
|
- res[".".join(cmd.path_in_schema)] += convert_column(values,
|
|
|
|
|
- schema_element) if schema_element.converted_type else values
|
|
|
|
|
- values_seen += ph.data_page_header.num_values
|
|
|
|
|
- elif ph.type == parquet_thrift.PageType.DICTIONARY_PAGE:
|
|
|
|
|
|
|
+ res[".".join(cmd.path_in_schema)] += convert_column(values, schema_element) \
|
|
|
|
|
+ if schema_element.converted_type else values
|
|
|
|
|
+ values_seen += page_header.data_page_header.num_values
|
|
|
|
|
+ elif page_header.type == parquet_thrift.PageType.DICTIONARY_PAGE:
|
|
|
if debug_logging:
|
|
if debug_logging:
|
|
|
- logger.debug(ph)
|
|
|
|
|
|
|
+ logger.debug(page_header)
|
|
|
assert dict_items == []
|
|
assert dict_items == []
|
|
|
- dict_items = read_dictionary_page(fo, ph, cmd)
|
|
|
|
|
|
|
+ dict_items = _read_dictionary_page(file_obj, page_header, cmd)
|
|
|
if debug_logging:
|
|
if debug_logging:
|
|
|
logger.debug("Dictionary: %s", str(dict_items))
|
|
logger.debug("Dictionary: %s", str(dict_items))
|
|
|
else:
|
|
else:
|
|
|
- logger.warn("Skipping unknown page type={0}".format(
|
|
|
|
|
- _get_name(parquet_thrift.PageType, ph.type)))
|
|
|
|
|
|
|
+ logger.info("Skipping unknown page type=%s",
|
|
|
|
|
+ _get_name(parquet_thrift.PageType, page_header.type))
|
|
|
|
|
|
|
|
- for i in range(rg.num_rows):
|
|
|
|
|
|
|
+ for i in range(row_group.num_rows):
|
|
|
yield [res[k][i] for k in keys if res[k]]
|
|
yield [res[k][i] for k in keys if res[k]]
|
|
|
|
|
|
|
|
-class JsonWriter(object):
|
|
|
|
|
|
|
+
|
|
|
|
|
+class JsonWriter(object): # pylint: disable=too-few-public-methods
|
|
|
|
|
+ """Utility for dumping rows as JSON objects."""
|
|
|
|
|
+
|
|
|
def __init__(self, out):
|
|
def __init__(self, out):
|
|
|
|
|
+ """Initialize with output destination."""
|
|
|
self._out = out
|
|
self._out = out
|
|
|
|
|
|
|
|
def writerow(self, row):
|
|
def writerow(self, row):
|
|
|
|
|
+ """Write a single row."""
|
|
|
json_text = json.dumps(row)
|
|
json_text = json.dumps(row)
|
|
|
- if type(json_text) is bytes:
|
|
|
|
|
|
|
+ if isinstance(json_text, bytes):
|
|
|
json_text = json_text.decode('utf-8')
|
|
json_text = json_text.decode('utf-8')
|
|
|
self._out.write(json_text)
|
|
self._out.write(json_text)
|
|
|
self._out.write(u'\n')
|
|
self._out.write(u'\n')
|
|
|
|
|
|
|
|
-def _dump(fo, options, out=sys.stdout):
|
|
|
|
|
|
|
|
|
|
|
|
+def _dump(file_obj, options, out=sys.stdout):
|
|
|
|
|
+ """Dump to fo with given options."""
|
|
|
# writer and keys are lazily loaded. We don't know the keys until we have
|
|
# writer and keys are lazily loaded. We don't know the keys until we have
|
|
|
# the first item. And we need the keys for the csv writer.
|
|
# the first item. And we need the keys for the csv writer.
|
|
|
total_count = 0
|
|
total_count = 0
|
|
|
writer = None
|
|
writer = None
|
|
|
keys = None
|
|
keys = None
|
|
|
- for row in DictReader(fo, options.col):
|
|
|
|
|
|
|
+ for row in DictReader(file_obj, options.col):
|
|
|
if not keys:
|
|
if not keys:
|
|
|
keys = row.keys()
|
|
keys = row.keys()
|
|
|
if not writer:
|
|
if not writer:
|
|
|
- writer = csv.DictWriter(out, keys, delimiter=u'\t', quotechar=u'\'',
|
|
|
|
|
- quoting=csv.QUOTE_MINIMAL) if options.format == 'csv' \
|
|
|
|
|
- else JsonWriter(out) if options.format == 'json' \
|
|
|
|
|
- else None
|
|
|
|
|
|
|
+ writer = csv.DictWriter(out, keys, delimiter=u'\t', quotechar=u'\'', quoting=csv.QUOTE_MINIMAL) \
|
|
|
|
|
+ if options.format == 'csv' \
|
|
|
|
|
+ else JsonWriter(out) if options.format == 'json' \
|
|
|
|
|
+ else None
|
|
|
if total_count == 0 and options.format == "csv" and not options.no_headers:
|
|
if total_count == 0 and options.format == "csv" and not options.no_headers:
|
|
|
writer.writeheader()
|
|
writer.writeheader()
|
|
|
if options.limit != -1 and total_count >= options.limit:
|
|
if options.limit != -1 and total_count >= options.limit:
|
|
@@ -486,5 +511,6 @@ def _dump(fo, options, out=sys.stdout):
|
|
|
|
|
|
|
|
|
|
|
|
|
def dump(filename, options, out=sys.stdout):
|
|
def dump(filename, options, out=sys.stdout):
|
|
|
- with open(filename, 'rb') as fo:
|
|
|
|
|
- return _dump(fo, options=options, out=out)
|
|
|
|
|
|
|
+ """Dump parquet file with given filename using options to `out`."""
|
|
|
|
|
+ with open(filename, 'rb') as file_obj:
|
|
|
|
|
+ return _dump(file_obj, options=options, out=out)
|