فهرست منبع

[core] upgrade avro library

Abraham Elmahrek 11 سال پیش
والد
کامیت
5ff543d

+ 1 - 1
desktop/core/ext-py/avro-1.5.0/PKG-INFO → desktop/core/ext-py/avro-1.7.6/PKG-INFO

@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: avro
-Version: 1.5.0
+Version: 1.7.6
 Summary: Avro is a serialization and RPC framework.
 Home-page: http://hadoop.apache.org/avro
 Author: Apache Avro

+ 262 - 0
desktop/core/ext-py/avro-1.7.6/scripts/avro

@@ -0,0 +1,262 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Command line utlity for reading and writing Avro files."""
+
+from avro.io import DatumReader, DatumWriter
+from avro.datafile import DataFileReader, DataFileWriter
+import avro.schema
+
+try:
+    import json
+except ImportError:
+    import simplejson as json
+import csv
+from sys import stdout, stdin
+from itertools import ifilter, imap
+from functools import partial
+from os.path import splitext
+
+class AvroError(Exception):
+    pass
+
+def print_json(row):
+    print(json.dumps(row))
+
+def print_json_pretty(row):
+    print(json.dumps(row, indent=4))
+
+_write_row = csv.writer(stdout).writerow
+_encoding = stdout.encoding or "UTF-8"
+def _encode(v, encoding=_encoding):
+    if not isinstance(v, basestring):
+        return v
+    return v.encode(_encoding)
+
+def print_csv(row):
+    # We sort the keys to the fields will be in the same place
+    # FIXME: Do we want to do it in schema order?
+    _write_row([_encode(row[key]) for key in sorted(row)])
+
+def select_printer(format):
+    return {
+        "json" : print_json,
+        "json-pretty" : print_json_pretty,
+        "csv" : print_csv
+    }[format]
+
+def record_match(expr, record):
+    return eval(expr, None, {"r" : record})
+
+def parse_fields(fields):
+    fields = fields or ''
+    if not fields.strip():
+        return None
+
+    return [field.strip() for field in fields.split(',') if field.strip()]
+
+def field_selector(fields):
+    fields = set(fields)
+    def keys_filter(obj):
+        return dict((k, obj[k]) for k in (set(obj) & fields))
+    return keys_filter
+
+def print_avro(avro, opts):
+    if opts.header and (opts.format != "csv"):
+        raise AvroError("--header applies only to CSV format")
+
+    # Apply filter first
+    if opts.filter:
+        avro = ifilter(partial(record_match, opts.filter), avro)
+
+    for i in xrange(opts.skip):
+        try:
+            next(avro)
+        except StopIteration:
+            return
+
+    fields = parse_fields(opts.fields)
+    if fields:
+        avro = imap(field_selector(fields), avro)
+
+    printer = select_printer(opts.format)
+    for i, record in enumerate(avro):
+        if i == 0 and opts.header:
+            _write_row(sorted(record.keys()))
+        if i >= opts.count:
+            break
+        printer(record)
+
+def print_schema(avro):
+    schema = avro.meta["avro.schema"]
+    # Pretty print
+    print json.dumps(json.loads(schema), indent=4)
+
+def cat(opts, args):
+    if not args:
+        raise AvroError("No files to show")
+
+    for filename in args:
+        try:
+            fo = open(filename, "rb")
+        except (OSError, IOError), e:
+            raise AvroError("Can't open %s - %s" % (filename, e))
+
+        avro = DataFileReader(fo, DatumReader())
+
+        if opts.print_schema:
+            print_schema(avro)
+            continue
+
+        print_avro(avro, opts)
+
+def _open(filename, mode):
+    if filename == "-":
+        return {
+            "rb" : stdin,
+            "wb" : stdout
+        }[mode]
+
+    return open(filename, mode)
+
+def iter_json(info, _):
+    return imap(json.loads, info)
+
+def convert(value, field):
+    type = field.type.type
+    if type == "union":
+        return convert_union(value, field)
+
+    return  {
+        "int" : int,
+        "long" : long,
+        "float" : float,
+        "double" : float,
+        "string" : str,
+        "bytes" : str,
+        "boolean" : bool,
+        "null" : lambda _: None,
+        "union" : lambda v: convert_union(v, field),
+    }[type](value)
+
+def convert_union(value, field):
+    for name in [s.name for s in field.type.schemas]:
+        try:
+            return convert(name)(value)
+        except ValueError:
+            continue
+
+def iter_csv(info, schema):
+    header = [field.name for field in schema.fields]
+    for row in csv.reader(info):
+        values = [convert(v, f) for v, f in zip(row, schema.fields)]
+        yield dict(zip(header, values))
+
+def guess_input_type(files):
+    if not files:
+        return None
+
+    ext = splitext(files[0])[1].lower()
+    if ext in (".json", ".js"):
+        return "json"
+    elif ext in (".csv",):
+        return "csv"
+
+    return None
+
+def write(opts, files):
+    if not opts.schema:
+        raise AvroError("No schema specified")
+
+    input_type = opts.input_type or guess_input_type(files)
+    if not input_type:
+        raise AvroError("Can't guess input file type (not .json or .csv)")
+
+    try:
+        schema = avro.schema.parse(open(opts.schema, "rb").read())
+        out = _open(opts.output, "wb")
+    except (IOError, OSError), e:
+        raise AvroError("Can't open file - %s" % e)
+
+    writer = DataFileWriter(out, DatumWriter(), schema)
+
+    iter_records = {"json" : iter_json, "csv" : iter_csv}[input_type]
+    for filename in (files or ["-"]):
+        info = _open(filename, "rb")
+        for record in iter_records(info, schema):
+            writer.append(record)
+
+    writer.close()
+
+def main(argv=None):
+    import sys
+    from optparse import OptionParser, OptionGroup
+
+    argv = argv or sys.argv
+
+    parser = OptionParser(description="Display/write for Avro files",
+                      version="1.7.6",
+                      usage="usage: %prog cat|write [options] FILE [FILE...]")
+    # cat options
+
+    cat_options = OptionGroup(parser, "cat options")
+    cat_options.add_option("-n", "--count", default=float("Infinity"),
+                    help="number of records to print", type=int)
+    cat_options.add_option("-s", "--skip", help="number of records to skip",
+                           type=int, default=0)
+    cat_options.add_option("-f", "--format", help="record format",
+                           default="json",
+                           choices=["json", "csv", "json-pretty"])
+    cat_options.add_option("--header", help="print CSV header", default=False,
+                   action="store_true")
+    cat_options.add_option("--filter", help="filter records (e.g. r['age']>1)",
+                    default=None)
+    cat_options.add_option("--print-schema", help="print schema",
+                      action="store_true", default=False)
+    cat_options.add_option('--fields', default=None,
+                help='fields to show, comma separated (show all by default)')
+    parser.add_option_group(cat_options)
+
+    # write options
+    write_options = OptionGroup(parser, "write options")
+    write_options.add_option("--schema", help="schema file (required)")
+    write_options.add_option("--input-type",
+                             help="input file(s) type (json or csv)",
+                             choices=["json", "csv"], default=None)
+    write_options.add_option("-o", "--output", help="output file", default="-")
+    parser.add_option_group(write_options)
+
+    opts, args = parser.parse_args(argv[1:])
+    if len(args) < 1:
+        parser.error("You much specify `cat` or `write`")  # Will exit
+
+    command = args.pop(0)
+    try:
+        if command == "cat":
+            cat(opts, args)
+        elif command == "write":
+            write(opts, args)
+        else:
+            raise AvroError("Unknown command - %s" % command)
+    except AvroError, e:
+        parser.error("%s" % e) # Will exit
+    except Exception, e:
+        raise SystemExit("panic: %s" % e)
+
+if __name__ == "__main__":
+    main()
+

+ 5 - 0
desktop/core/ext-py/avro-1.7.6/setup.cfg

@@ -0,0 +1,5 @@
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+

+ 11 - 2
desktop/core/ext-py/avro-1.5.0/setup.py → desktop/core/ext-py/avro-1.7.6/setup.py

@@ -19,16 +19,22 @@ try:
   from setuptools import setup
 except ImportError:
   from distutils.core import setup
+from sys import version_info
+
+install_requires = []
+if version_info[:2] <= (2, 5):
+    install_requires.append('simplejson >= 2.0.9')
 
 setup(
   name = 'avro',
-  version = '1.5.0',
+  version = '1.7.6',
   packages = ['avro',],
   package_dir = {'avro': 'src/avro'},
+  scripts = ["./scripts/avro"],
 
   # Project uses simplejson, so ensure that it gets installed or upgraded
   # on the target machine
-  install_requires = ['simplejson >= 2.0.9'],
+  install_requires = install_requires,
 
   # metadata for upload to PyPI
   author = 'Apache Avro',
@@ -37,4 +43,7 @@ setup(
   license = 'Apache License 2.0',
   keywords = 'avro serialization rpc',
   url = 'http://hadoop.apache.org/avro',
+  extras_require = {
+    'snappy': ['python-snappy'],
+  },
 )

+ 0 - 0
desktop/core/ext-py/avro-1.5.0/src/avro/__init__.py → desktop/core/ext-py/avro-1.7.6/src/avro/__init__.py


+ 51 - 6
desktop/core/ext-py/avro-1.5.0/src/avro/datafile.py → desktop/core/ext-py/avro-1.7.6/src/avro/datafile.py

@@ -23,7 +23,11 @@ except ImportError:
   from StringIO import StringIO
 from avro import schema
 from avro import io
-
+try:
+  import snappy
+  has_snappy = True
+except ImportError:
+  has_snappy = False
 #
 # Constants
 #
@@ -32,7 +36,7 @@ VERSION = 1
 MAGIC = 'Obj' + chr(VERSION)
 MAGIC_SIZE = len(MAGIC)
 SYNC_SIZE = 16
-SYNC_INTERVAL = 1000 * SYNC_SIZE # TODO(hammer): make configurable
+SYNC_INTERVAL = 4000 * SYNC_SIZE # TODO(hammer): make configurable
 META_SCHEMA = schema.parse("""\
 {"type": "record", "name": "org.apache.avro.file.Header",
  "fields" : [
@@ -41,6 +45,8 @@ META_SCHEMA = schema.parse("""\
    {"name": "sync", "type": {"type": "fixed", "name": "sync", "size": %d}}]}
 """ % (MAGIC_SIZE, SYNC_SIZE))
 VALID_CODECS = ['null', 'deflate']
+if has_snappy:
+    VALID_CODECS.append('snappy')
 VALID_ENCODINGS = ['binary'] # not used yet
 
 CODEC_KEY = "avro.codec"
@@ -80,6 +86,7 @@ class DataFileWriter(object):
     self._buffer_encoder = io.BinaryEncoder(self._buffer_writer)
     self._block_count = 0
     self._meta = {}
+    self._header_written = False
 
     if writers_schema is not None:
       if codec not in VALID_CODECS:
@@ -88,7 +95,6 @@ class DataFileWriter(object):
       self.set_meta('avro.codec', codec)
       self.set_meta('avro.schema', str(writers_schema))
       self.datum_writer.writers_schema = writers_schema
-      self._write_header()
     else:
       # open writer for reading to collect metadata
       dfr = DataFileReader(writer, io.DatumReader())
@@ -105,6 +111,7 @@ class DataFileWriter(object):
 
       # seek to the end of the file and prepare for writing
       writer.seek(0, 2)
+      self._header_written = True
 
   # read-only properties
   writer = property(lambda self: self._writer)
@@ -115,6 +122,14 @@ class DataFileWriter(object):
   sync_marker = property(lambda self: self._sync_marker)
   meta = property(lambda self: self._meta)
 
+  def __enter__(self):
+    return self
+
+  def __exit__(self, type, value, traceback):
+    # Perform a close if there's no exception
+    if type is None:
+      self.close()
+
   # read/write properties
   def set_block_count(self, new_val):
     self._block_count = new_val
@@ -131,9 +146,13 @@ class DataFileWriter(object):
               'meta': self.meta,
               'sync': self.sync_marker}
     self.datum_writer.write_data(META_SCHEMA, header, self.encoder)
+    self._header_written = True
 
   # TODO(hammer): make a schema for blocks and use datum_writer
   def _write_block(self):
+    if not self._header_written:
+      self._write_header()
+
     if self.block_count > 0:
       # write number of items in block
       self.encoder.write_long(self.block_count)
@@ -142,19 +161,28 @@ class DataFileWriter(object):
       uncompressed_data = self.buffer_writer.getvalue()
       if self.get_meta(CODEC_KEY) == 'null':
         compressed_data = uncompressed_data
+        compressed_data_length = len(compressed_data)
       elif self.get_meta(CODEC_KEY) == 'deflate':
         # The first two characters and last character are zlib
         # wrappers around deflate data.
         compressed_data = zlib.compress(uncompressed_data)[2:-1]
+        compressed_data_length = len(compressed_data)
+      elif self.get_meta(CODEC_KEY) == 'snappy':
+        compressed_data = snappy.compress(uncompressed_data)
+        compressed_data_length = len(compressed_data) + 4 # crc32
       else:
         fail_msg = '"%s" codec is not supported.' % self.get_meta(CODEC_KEY)
         raise DataFileException(fail_msg)
 
       # Write length of block
-      self.encoder.write_long(len(compressed_data))
+      self.encoder.write_long(compressed_data_length)
 
       # Write block
       self.writer.write(compressed_data)
+      
+      # Write CRC32 checksum for Snappy
+      if self.get_meta(CODEC_KEY) == 'snappy':
+        self.encoder.write_crc32(uncompressed_data)
 
       # write sync marker
       self.writer.write(self.sync_marker)
@@ -217,7 +245,15 @@ class DataFileReader(object):
     # get ready to read
     self._block_count = 0
     self.datum_reader.writers_schema = schema.parse(self.get_meta(SCHEMA_KEY))
-  
+
+  def __enter__(self):
+    return self
+
+  def __exit__(self, type, value, traceback):
+    # Perform a close if there's no exception
+    if type is None:
+      self.close()
+
   def __iter__(self):
     return self
 
@@ -280,7 +316,7 @@ class DataFileReader(object):
       # Skip a long; we don't need to use the length.
       self.raw_decoder.skip_long()
       self._datum_decoder = self._raw_decoder
-    else:
+    elif self.codec == 'deflate':
       # Compressed data is stored as (length, data), which
       # corresponds to how the "bytes" type is encoded.
       data = self.raw_decoder.read_bytes()
@@ -288,6 +324,15 @@ class DataFileReader(object):
       # "raw" (no zlib headers) decompression.  See zlib.h.
       uncompressed = zlib.decompress(data, -15)
       self._datum_decoder = io.BinaryDecoder(StringIO(uncompressed))
+    elif self.codec == 'snappy':
+      # Compressed data includes a 4-byte CRC32 checksum
+      length = self.raw_decoder.read_long()
+      data = self.raw_decoder.read(length - 4)
+      uncompressed = snappy.decompress(data)
+      self._datum_decoder = io.BinaryDecoder(StringIO(uncompressed))
+      self.raw_decoder.check_crc32(uncompressed);
+    else:
+      raise DataFileException("Unknown codec: %r" % self.codec)
 
   def _skip_sync(self):
     """

+ 13 - 0
desktop/core/ext-py/avro-1.5.0/src/avro/io.py → desktop/core/ext-py/avro-1.7.6/src/avro/io.py

@@ -39,6 +39,7 @@ uses the following mapping:
 import struct
 from avro import schema
 import sys
+from binascii import crc32
 
 try:
 	import json
@@ -71,6 +72,7 @@ STRUCT_INT = struct_class('!I')     # big-endian unsigned int
 STRUCT_LONG = struct_class('!Q')    # big-endian unsigned long long
 STRUCT_FLOAT = struct_class('!f')   # big-endian float
 STRUCT_DOUBLE = struct_class('!d')  # big-endian double
+STRUCT_CRC32 = struct_class('>I')   # big-endian unsigned int
 
 #
 # Exceptions
@@ -230,6 +232,11 @@ class BinaryDecoder(object):
     """
     return unicode(self.read_bytes(), "utf-8")
 
+  def check_crc32(self, bytes):
+    checksum = STRUCT_CRC32.unpack(self.read(4))[0];
+    if crc32(bytes) & 0xffffffff != checksum:
+      raise schema.AvroException("Checksum failure")
+
   def skip_null(self):
     pass
 
@@ -349,6 +356,12 @@ class BinaryEncoder(object):
     datum = datum.encode("utf-8")
     self.write_bytes(datum)
 
+  def write_crc32(self, bytes):
+    """
+    A 4-byte, big-endian CRC32 checksum
+    """
+    self.write(STRUCT_CRC32.pack(crc32(bytes) & 0xffffffff));
+
 #
 # DatumReader/Writer
 #

+ 4 - 3
desktop/core/ext-py/avro-1.5.0/src/avro/ipc.py → desktop/core/ext-py/avro-1.7.6/src/avro/ipc.py

@@ -465,7 +465,8 @@ class HTTPTransceiver(object):
   A simple HTTP-based transceiver implementation.
   Useful for clients but not for servers
   """
-  def __init__(self, host, port):
+  def __init__(self, host, port, req_resource='/'):
+    self.req_resource = req_resource
     self.conn = httplib.HTTPConnection(host, port)
     self.conn.connect()
 
@@ -477,6 +478,7 @@ class HTTPTransceiver(object):
   def set_conn(self, new_conn):
     self._conn = new_conn
   conn = property(lambda self: self._conn, set_conn)
+  req_resource = '/'
 
   def transceive(self, request):
     self.write_framed_message(request)
@@ -492,14 +494,13 @@ class HTTPTransceiver(object):
 
   def write_framed_message(self, message):
     req_method = 'POST'
-    req_resource = '/'
     req_headers = {'Content-Type': 'avro/binary'}
 
     req_body_buffer = FramedWriter(StringIO())
     req_body_buffer.write_framed_message(message)
     req_body = req_body_buffer.writer.getvalue()
 
-    self.conn.request(req_method, req_resource, req_body, req_headers)
+    self.conn.request(req_method, self.req_resource, req_body, req_headers)
 
   def close(self):
     self.conn.close()

+ 5 - 3
desktop/core/ext-py/avro-1.5.0/src/avro/protocol.py → desktop/core/ext-py/avro-1.7.6/src/avro/protocol.py

@@ -123,7 +123,7 @@ class Protocol(object):
   def to_json(self):
     to_dump = {}
     to_dump['protocol'] = self.name
-    names = schema.Names()
+    names = schema.Names(default_namespace=self.namespace)
     if self.namespace: 
       to_dump['namespace'] = self.namespace
     if self.types:
@@ -186,9 +186,11 @@ class Message(object):
     self.props[key] = value  
 
   def __str__(self):
-    return json.dumps(self.to_json(schema.Names()))
+    return json.dumps(self.to_json())
 
-  def to_json(self, names):
+  def to_json(self, names=None):
+    if names is None:
+      names = schema.Names()
     to_dump = {}
     to_dump['request'] = self.request.to_json(names)
     to_dump['response'] = self.response.to_json(names)

+ 125 - 48
desktop/core/ext-py/avro-1.5.0/src/avro/schema.py → desktop/core/ext-py/avro-1.7.6/src/avro/schema.py

@@ -68,7 +68,7 @@ VALID_TYPES = PRIMITIVE_TYPES + NAMED_TYPES + (
   'error_union'
 )
 
-RESERVED_PROPS = (
+SCHEMA_RESERVED_PROPS = (
   'type',
   'name',
   'namespace',
@@ -77,6 +77,15 @@ RESERVED_PROPS = (
   'size',       # Fixed
   'symbols',    # Enum
   'values',     # Map
+  'doc',
+)
+
+FIELD_RESERVED_PROPS = (
+  'default',
+  'name',
+  'doc',
+  'order',
+  'type',
 )
 
 VALID_FIELD_SORT_ORDERS = (
@@ -101,7 +110,7 @@ class SchemaParseException(AvroException):
 
 class Schema(object):
   """Base class for all Schema classes."""
-  def __init__(self, type):
+  def __init__(self, type, other_props=None):
     # Ensure valid ctor args
     if not isinstance(type, basestring):
       fail_msg = 'Schema type must be a string.'
@@ -113,22 +122,26 @@ class Schema(object):
     # add members
     if not hasattr(self, '_props'): self._props = {}
     self.set_prop('type', type)
+    self.type = type
+    self._props.update(other_props or {})
 
   # Read-only properties dict. Printing schemas
   # creates JSON properties directly from this dict. 
   props = property(lambda self: self._props)
-  type = property(lambda self: self.get_prop('type'))
+
+  # Read-only property dict. Non-reserved properties
+  other_props = property(lambda self: get_other_props(self._props, SCHEMA_RESERVED_PROPS),
+                         doc="dictionary of non-reserved properties")
 
   # utility functions to manipulate properties dict
   def get_prop(self, key):
-    return self.props.get(key)
+    return self._props.get(key)
 
   def set_prop(self, key, value):
-    self.props[key] = value
+    self._props[key] = value
 
   def __str__(self):
-    names = Names()
-    return json.dumps(self.to_json(names))
+    return json.dumps(self.to_json())
 
   def to_json(self, names):
     """
@@ -221,7 +234,24 @@ class Names(object):
       if not self.names.has_key(test):
           return None
       return self.names[test]
-      
+  
+  def prune_namespace(self, properties):
+    """given a properties, return properties with namespace removed if
+    it matches the own default namespace"""
+    if self.default_namespace is None:
+      # I have no default -- no change
+      return properties
+    if 'namespace' not in properties:
+      # he has no namespace - no change
+      return properties
+    if properties['namespace'] != self.default_namespace:
+      # we're different - leave his stuff alone
+      return properties
+    # we each have a namespace and it's redundant. delete his.
+    prunable = properties.copy()
+    del(prunable['namespace'])
+    return prunable
+
   def add_name(self, name_attr, space_attr, new_schema):
     """
     Add a new schema object to the name set.
@@ -245,7 +275,7 @@ class Names(object):
 
 class NamedSchema(Schema):
   """Named Schemas specified in NAMED_TYPES."""
-  def __init__(self, type, name, namespace=None, names=None):
+  def __init__(self, type, name, namespace=None, names=None, other_props=None):
     # Ensure valid ctor args
     if not name:
       fail_msg = 'Named Schemas must have a non-empty name.'
@@ -258,7 +288,7 @@ class NamedSchema(Schema):
       raise SchemaParseException(fail_msg)
 
     # Call parent ctor
-    Schema.__init__(self, type)
+    Schema.__init__(self, type, other_props)
 
     # Add class members
     new_name = names.add_name(name, namespace, self)
@@ -283,7 +313,8 @@ class NamedSchema(Schema):
   fullname = property(lambda self: self._fullname)
 
 class Field(object):
-  def __init__(self, type, name, has_default, default=None, order=None, names=None):
+  def __init__(self, type, name, has_default, default=None,
+               order=None,names=None, doc=None, other_props=None):
     # Ensure valid ctor args
     if not name:
       fail_msg = 'Fields must have a non-empty name.'
@@ -298,6 +329,7 @@ class Field(object):
     # add members
     self._props = {}
     self._has_default = has_default
+    self._props.update(other_props or {})
 
     if (isinstance(type, basestring) and names is not None
         and names.has_name(type, None)):
@@ -310,25 +342,36 @@ class Field(object):
         raise SchemaParseException(fail_msg)
     self.set_prop('type', type_schema)
     self.set_prop('name', name)
+    self.type = type_schema
+    self.name = name
     # TODO(hammer): check to ensure default is valid
     if has_default: self.set_prop('default', default)
     if order is not None: self.set_prop('order', order)
+    if doc is not None: self.set_prop('doc', doc)
 
   # read-only properties
-  type = property(lambda self: self.get_prop('type'))
-  name = property(lambda self: self.get_prop('name'))
   default = property(lambda self: self.get_prop('default'))
   has_default = property(lambda self: self._has_default)
   order = property(lambda self: self.get_prop('order'))
+  doc = property(lambda self: self.get_prop('doc'))
   props = property(lambda self: self._props)
 
-  # utility functions to manipulate properties dict
+  # Read-only property dict. Non-reserved properties
+  other_props = property(lambda self: get_other_props(self._props, FIELD_RESERVED_PROPS),
+                         doc="dictionary of non-reserved properties")
+
+# utility functions to manipulate properties dict
   def get_prop(self, key):
-    return self.props.get(key)
+    return self._props.get(key)
   def set_prop(self, key, value):
-    self.props[key] = value
+    self._props[key] = value
 
-  def to_json(self, names):
+  def __str__(self):
+    return json.dumps(self.to_json())
+
+  def to_json(self, names=None):
+    if names is None:
+      names = Names()
     to_dump = self.props.copy()
     to_dump['type'] = self.type.to_json(names)
     return to_dump
@@ -352,7 +395,7 @@ class PrimitiveSchema(Schema):
 
     self.fullname = type
 
-  def to_json(self, names):
+  def to_json(self, names=None):
     if len(self.props) == 1:
       return self.fullname
     else:
@@ -366,14 +409,14 @@ class PrimitiveSchema(Schema):
 #
 
 class FixedSchema(NamedSchema):
-  def __init__(self, name, namespace, size, names=None):
+  def __init__(self, name, namespace, size, names=None, other_props=None):
     # Ensure valid ctor args
     if not isinstance(size, int):
       fail_msg = 'Fixed Schema requires a valid integer for size property.'
       raise AvroException(fail_msg)
 
     # Call parent ctor
-    NamedSchema.__init__(self, 'fixed', name, namespace, names)
+    NamedSchema.__init__(self, 'fixed', name, namespace, names, other_props)
 
     # Add class members
     self.set_prop('size', size)
@@ -381,18 +424,20 @@ class FixedSchema(NamedSchema):
   # read-only properties
   size = property(lambda self: self.get_prop('size'))
 
-  def to_json(self, names):
+  def to_json(self, names=None):
+    if names is None:
+      names = Names()
     if self.fullname in names.names:
       return self.name_ref(names)
     else:
       names.names[self.fullname] = self
-      return self.props
+      return names.prune_namespace(self.props)
 
   def __eq__(self, that):
     return self.props == that.props
 
 class EnumSchema(NamedSchema):
-  def __init__(self, name, namespace, symbols, names=None):
+  def __init__(self, name, namespace, symbols, names=None, doc=None, other_props=None):
     # Ensure valid ctor args
     if not isinstance(symbols, list):
       fail_msg = 'Enum Schema requires a JSON array for the symbols property.'
@@ -405,20 +450,24 @@ class EnumSchema(NamedSchema):
       raise AvroException(fail_msg)
 
     # Call parent ctor
-    NamedSchema.__init__(self, 'enum', name, namespace, names)
+    NamedSchema.__init__(self, 'enum', name, namespace, names, other_props)
 
     # Add class members
     self.set_prop('symbols', symbols)
+    if doc is not None: self.set_prop('doc', doc)
 
   # read-only properties
   symbols = property(lambda self: self.get_prop('symbols'))
+  doc = property(lambda self: self.get_prop('doc'))
 
-  def to_json(self, names):
+  def to_json(self, names=None):
+    if names is None:
+      names = Names()
     if self.fullname in names.names:
       return self.name_ref(names)
     else:
       names.names[self.fullname] = self
-      return self.props
+      return names.prune_namespace(self.props)
 
   def __eq__(self, that):
     return self.props == that.props
@@ -428,9 +477,9 @@ class EnumSchema(NamedSchema):
 #
 
 class ArraySchema(Schema):
-  def __init__(self, items, names=None):
+  def __init__(self, items, names=None, other_props=None):
     # Call parent ctor
-    Schema.__init__(self, 'array')
+    Schema.__init__(self, 'array', other_props)
     # Add class members
 
     if isinstance(items, basestring) and names.has_name(items, None):
@@ -447,7 +496,9 @@ class ArraySchema(Schema):
   # read-only properties
   items = property(lambda self: self.get_prop('items'))
 
-  def to_json(self, names):
+  def to_json(self, names=None):
+    if names is None:
+      names = Names()
     to_dump = self.props.copy()
     item_schema = self.get_prop('items')
     to_dump['items'] = item_schema.to_json(names)
@@ -458,9 +509,9 @@ class ArraySchema(Schema):
     return to_cmp == json.loads(str(that))
 
 class MapSchema(Schema):
-  def __init__(self, values, names=None):
+  def __init__(self, values, names=None, other_props=None):
     # Call parent ctor
-    Schema.__init__(self, 'map')
+    Schema.__init__(self, 'map',other_props)
 
     # Add class members
     if isinstance(values, basestring) and names.has_name(values, None):
@@ -477,7 +528,9 @@ class MapSchema(Schema):
   # read-only properties
   values = property(lambda self: self.get_prop('values'))
 
-  def to_json(self, names):
+  def to_json(self, names=None):
+    if names is None:
+      names = Names()
     to_dump = self.props.copy()
     to_dump['values'] = self.get_prop('values').to_json(names)
     return to_dump
@@ -522,7 +575,9 @@ class UnionSchema(Schema):
   # read-only properties
   schemas = property(lambda self: self._schemas)
 
-  def to_json(self, names):
+  def to_json(self, names=None):
+    if names is None:
+      names = Names()
     to_dump = []
     for schema in self.schemas:
       to_dump.append(schema.to_json(names))
@@ -537,7 +592,9 @@ class ErrorUnionSchema(UnionSchema):
     # Prepend "string" to handle system errors
     UnionSchema.__init__(self, ['string'] + schemas, names)
 
-  def to_json(self, names):
+  def to_json(self, names=None):
+    if names is None:
+      names = Names()
     to_dump = []
     for schema in self.schemas:
       # Don't print the system error schema
@@ -564,7 +621,10 @@ class RecordSchema(NamedSchema):
           default = field.get('default')
 
         order = field.get('order')
-        new_field = Field(type, name, has_default, default, order, names)
+        doc = field.get('doc')
+        other_props = get_other_props(field, FIELD_RESERVED_PROPS)
+        new_field = Field(type, name, has_default, default, order, names, doc,
+                         other_props)
         # make sure field name has not been used yet
         if new_field.name in field_names:
           fail_msg = 'Field name %s already in use.' % new_field.name
@@ -575,7 +635,8 @@ class RecordSchema(NamedSchema):
       field_objects.append(new_field)
     return field_objects
 
-  def __init__(self, name, namespace, fields, names=None, schema_type='record'):
+  def __init__(self, name, namespace, fields, names=None, schema_type='record',
+               doc=None, other_props=None):
     # Ensure valid ctor args
     if fields is None:
       fail_msg = 'Record schema requires a non-empty fields property.'
@@ -586,9 +647,10 @@ class RecordSchema(NamedSchema):
 
     # Call parent ctor (adds own name to namespace, too)
     if schema_type == 'request':
-      Schema.__init__(self, schema_type)
+      Schema.__init__(self, schema_type, other_props)
     else:
-      NamedSchema.__init__(self, schema_type, name, namespace, names)
+      NamedSchema.__init__(self, schema_type, name, namespace, names,
+                           other_props)
 
     if schema_type == 'record': 
       old_default = names.default_namespace
@@ -598,12 +660,14 @@ class RecordSchema(NamedSchema):
     # Add class members
     field_objects = RecordSchema.make_field_objects(fields, names)
     self.set_prop('fields', field_objects)
-    
+    if doc is not None: self.set_prop('doc', doc)
+
     if schema_type == 'record':
       names.default_namespace = old_default
 
   # read-only properties
   fields = property(lambda self: self.get_prop('fields'))
+  doc = property(lambda self: self.get_prop('doc'))
 
   @property
   def fields_dict(self):
@@ -612,7 +676,9 @@ class RecordSchema(NamedSchema):
       fields_dict[field.name] = field
     return fields_dict
 
-  def to_json(self, names):
+  def to_json(self, names=None):
+    if names is None:
+      names = Names()
     # Request records don't have names
     if self.type == 'request':
       return [ f.to_json(names) for f in self.fields ]
@@ -622,7 +688,7 @@ class RecordSchema(NamedSchema):
     else:
       names.names[self.fullname] = self
 
-    to_dump = self.props.copy()
+    to_dump = names.prune_namespace(self.props.copy())
     to_dump['fields'] = [ f.to_json(names) for f in self.fields ]
     return to_dump
 
@@ -633,8 +699,16 @@ class RecordSchema(NamedSchema):
 #
 # Module Methods
 #
+def get_other_props(all_props,reserved_props):
+  """
+  Retrieve the non-reserved properties from a dictionary of properties
+  @args reserved_props: The set of reserved properties to exclude
+  """
+  if hasattr(all_props, 'items') and callable(all_props.items):
+    return dict([(k,v) for (k,v) in all_props.items() if k not in
+                 reserved_props ])
+
 
-# TODO(hammer): handle non-reserved properties
 def make_avsc_object(json_data, names=None):
   """
   Build Avro Schema from data parsed out of JSON string.
@@ -647,29 +721,32 @@ def make_avsc_object(json_data, names=None):
   # JSON object (non-union)
   if hasattr(json_data, 'get') and callable(json_data.get):
     type = json_data.get('type')
+    other_props = get_other_props(json_data, SCHEMA_RESERVED_PROPS)
     if type in PRIMITIVE_TYPES:
       return PrimitiveSchema(type)
     elif type in NAMED_TYPES:
       name = json_data.get('name')
-      namespace = json_data.get('namespace')
+      namespace = json_data.get('namespace', names.default_namespace)
       if type == 'fixed':
         size = json_data.get('size')
-        return FixedSchema(name, namespace, size, names)
+        return FixedSchema(name, namespace, size, names, other_props)
       elif type == 'enum':
         symbols = json_data.get('symbols')
-        return EnumSchema(name, namespace, symbols, names)
+        doc = json_data.get('doc')
+        return EnumSchema(name, namespace, symbols, names, doc, other_props)
       elif type in ['record', 'error']:
         fields = json_data.get('fields')
-        return RecordSchema(name, namespace, fields, names, type)
+        doc = json_data.get('doc')
+        return RecordSchema(name, namespace, fields, names, type, doc, other_props)
       else:
         raise SchemaParseException('Unknown Named Type: %s' % type)
     elif type in VALID_TYPES:
       if type == 'array':
         items = json_data.get('items')
-        return ArraySchema(items, names)
+        return ArraySchema(items, names, other_props)
       elif type == 'map':
         values = json_data.get('values')
-        return MapSchema(values, names)
+        return MapSchema(values, names, other_props)
       elif type == 'error_union':
         declared_errors = json_data.get('declared_errors')
         return ErrorUnionSchema(declared_errors, names)

+ 0 - 0
desktop/core/ext-py/avro-1.5.0/src/avro/tool.py → desktop/core/ext-py/avro-1.7.6/src/avro/tool.py


+ 0 - 0
desktop/core/ext-py/avro-1.5.0/src/avro/txipc.py → desktop/core/ext-py/avro-1.7.6/src/avro/txipc.py


+ 53 - 0
desktop/core/ext-py/avro-1.5.0/test/test_datafile.py → desktop/core/ext-py/avro-1.7.6/test/test_datafile.py

@@ -52,6 +52,11 @@ SCHEMAS_TO_VALIDATE = (
 
 FILENAME = 'test_datafile.out'
 CODECS_TO_VALIDATE = ('null', 'deflate')
+try:
+  import snappy
+  CODECS_TO_VALIDATE += ('snappy',)
+except ImportError:
+  print 'Snappy not present, will skip testing it.'
 
 # TODO(hammer): clean up written files with ant, not os.remove
 class TestDataFile(unittest.TestCase):
@@ -145,5 +150,53 @@ class TestDataFile(unittest.TestCase):
     os.remove(FILENAME)
     self.assertEquals(correct, len(CODECS_TO_VALIDATE)*len(SCHEMAS_TO_VALIDATE))
 
+  def test_context_manager(self):
+    # Context manager was introduced as a first class
+    # member only in Python 2.6 and above.
+    import sys
+    if sys.version_info < (2,6):
+      print 'Skipping context manager tests on this Python version.'
+      return
+    # Test the writer with a 'with' statement.
+    writer = open(FILENAME, 'wb')
+    datum_writer = io.DatumWriter()
+    sample_schema, sample_datum = SCHEMAS_TO_VALIDATE[1]
+    schema_object = schema.parse(sample_schema)
+    with datafile.DataFileWriter(writer, datum_writer, schema_object) as dfw:
+      dfw.append(sample_datum)
+    self.assertTrue(writer.closed)
+
+    # Test the reader with a 'with' statement.
+    datums = []
+    reader = open(FILENAME, 'rb')
+    datum_reader = io.DatumReader()
+    with datafile.DataFileReader(reader, datum_reader) as dfr:
+      for datum in dfr:
+        datums.append(datum)
+    self.assertTrue(reader.closed)
+
+  def test_metadata(self):
+    # Test the writer with a 'with' statement.
+    writer = open(FILENAME, 'wb')
+    datum_writer = io.DatumWriter()
+    sample_schema, sample_datum = SCHEMAS_TO_VALIDATE[1]
+    schema_object = schema.parse(sample_schema)
+    with datafile.DataFileWriter(writer, datum_writer, schema_object) as dfw:
+      dfw.set_meta('test.string', 'foo')
+      dfw.set_meta('test.number', '1')
+      dfw.append(sample_datum)
+    self.assertTrue(writer.closed)
+
+    # Test the reader with a 'with' statement.
+    datums = []
+    reader = open(FILENAME, 'rb')
+    datum_reader = io.DatumReader()
+    with datafile.DataFileReader(reader, datum_reader) as dfr:
+      self.assertEquals('foo', dfr.get_meta('test.string'))
+      self.assertEquals('1', dfr.get_meta('test.number'))
+      for datum in dfr:
+        datums.append(datum)
+    self.assertTrue(reader.closed)
+
 if __name__ == '__main__':
   unittest.main()

+ 2 - 2
desktop/core/ext-py/avro-1.5.0/test/test_datafile_interop.py → desktop/core/ext-py/avro-1.7.6/test/test_datafile_interop.py

@@ -24,12 +24,12 @@ class TestDataFileInterop(unittest.TestCase):
     print 'TEST INTEROP'
     print '============'
     print ''
-    for f in os.listdir('/home/cutting/src/avro/release-1.5.0-rc2/lang/py/../../build/interop/data'):
+    for f in os.listdir('/home/cutting/src/avro/release-1.7.6-rc0/lang/py/../../build/interop/data'):
       print 'READING %s' % f
       print ''
 
       # read data in binary from file
-      reader = open(os.path.join('/home/cutting/src/avro/release-1.5.0-rc2/lang/py/../../build/interop/data', f), 'rb')
+      reader = open(os.path.join('/home/cutting/src/avro/release-1.7.6-rc0/lang/py/../../build/interop/data', f), 'rb')
       datum_reader = io.DatumReader()
       dfr = datafile.DataFileReader(reader, datum_reader)
       for datum in dfr:

+ 0 - 0
desktop/core/ext-py/avro-1.5.0/test/test_io.py → desktop/core/ext-py/avro-1.7.6/test/test_io.py


+ 8 - 1
desktop/core/ext-py/avro-1.5.0/test/test_ipc.py → desktop/core/ext-py/avro-1.7.6/test/test_ipc.py

@@ -21,11 +21,18 @@ import unittest
 
 # This test does import this code, to make sure it at least passes
 # compilation.
-import avro.ipc
+from avro import ipc
 
 class TestIPC(unittest.TestCase):
   def test_placeholder(self):
     pass
 
+  def test_server_with_path(self):
+    client_with_custom_path = ipc.HTTPTransceiver('dummyserver.net', 80, '/service/article')
+    self.assertEqual('/service/article', client_with_custom_path.req_resource)
+
+    client_with_default_path = ipc.HTTPTransceiver('dummyserver.net', 80)
+    self.assertEqual('/', client_with_default_path.req_resource)
+
 if __name__ == '__main__':
   unittest.main()

+ 21 - 4
desktop/core/ext-py/avro-1.5.0/test/test_protocol.py → desktop/core/ext-py/avro-1.7.6/test/test_protocol.py

@@ -38,9 +38,7 @@ class ExampleProtocol(object):
 #
 # Example Protocols
 #
-
-EXAMPLES = [
-  ExampleProtocol("""\
+HELLO_WORLD = ExampleProtocol("""\
 {
   "namespace": "com.acme",
   "protocol": "HelloWorld",
@@ -60,7 +58,9 @@ EXAMPLES = [
     }
   }
 }
-    """, True),
+    """, True)
+EXAMPLES = [
+  HELLO_WORLD,
   ExampleProtocol("""\
 {"namespace": "org.apache.avro.test",
  "protocol": "Simple",
@@ -364,6 +364,23 @@ class TestProtocol(unittest.TestCase):
       (num_correct, len(EXAMPLES))
     self.assertEqual(num_correct, len(EXAMPLES), fail_msg)
 
+  def test_inner_namespace_set(self):
+    print ''
+    print 'TEST INNER NAMESPACE'
+    print '==================='
+    print ''
+    proto = protocol.parse(HELLO_WORLD.protocol_string)
+    self.assertEqual(proto.namespace, "com.acme")
+    greeting_type = proto.types_dict['Greeting']
+    self.assertEqual(greeting_type.namespace, 'com.acme')
+
+  def test_inner_namespace_not_rendered(self):
+    proto = protocol.parse(HELLO_WORLD.protocol_string)
+    self.assertEqual('com.acme.Greeting', proto.types[0].fullname)
+    self.assertEqual('Greeting', proto.types[0].name)
+    # but there shouldn't be 'namespace' rendered to json on the inner type
+    self.assertFalse('namespace' in proto.to_json()['types'][0])
+
   def test_valid_cast_to_string_after_parse(self):
     """
     Test that the string generated by an Avro Protocol object

+ 81 - 0
desktop/core/ext-py/avro-1.5.0/test/test_schema.py → desktop/core/ext-py/avro-1.7.6/test/test_schema.py

@@ -254,6 +254,41 @@ RECORD_EXAMPLES = [
     """, False),
 ]
 
+DOC_EXAMPLES = [
+  ExampleSchema("""\
+    {"type": "record",
+     "name": "TestDoc",
+     "doc":  "Doc string",
+     "fields": [{"name": "name", "type": "string", 
+                 "doc" : "Doc String"}]}
+    """, True),
+  ExampleSchema("""\
+    {"type": "enum", "name": "Test", "symbols": ["A", "B"],
+     "doc": "Doc String"}
+    """, True),
+]
+
+OTHER_PROP_EXAMPLES = [
+  ExampleSchema("""\
+    {"type": "record",
+     "name": "TestRecord",
+     "cp_string": "string",
+     "cp_int": 1,
+     "cp_array": [ 1, 2, 3, 4],
+     "fields": [ {"name": "f1", "type": "string", "cp_object": {"a":1,"b":2} },
+                 {"name": "f2", "type": "long", "cp_null": null} ]}
+    """, True),
+  ExampleSchema("""\
+     {"type": "map", "values": "long", "cp_boolean": true}
+    """, True),
+  ExampleSchema("""\
+    {"type": "enum",
+     "name": "TestEnum",
+     "symbols": [ "one", "two", "three" ],
+     "cp_float" : 1.0 }
+    """,True),
+]
+
 EXAMPLES = PRIMITIVE_EXAMPLES
 EXAMPLES += FIXED_EXAMPLES
 EXAMPLES += ENUM_EXAMPLES
@@ -261,6 +296,7 @@ EXAMPLES += ARRAY_EXAMPLES
 EXAMPLES += MAP_EXAMPLES
 EXAMPLES += UNION_EXAMPLES
 EXAMPLES += RECORD_EXAMPLES
+EXAMPLES += DOC_EXAMPLES
 
 VALID_EXAMPLES = [e for e in EXAMPLES if e.valid]
 
@@ -390,5 +426,50 @@ class TestSchema(unittest.TestCase):
     fullname = schema.Name('a', 'o.a.a', 'o.a.h').fullname
     self.assertEqual(fullname, 'o.a.a.a')
 
+  def test_doc_attributes(self):
+    print_test_name('TEST DOC ATTRIBUTES')
+    correct = 0
+    for example in DOC_EXAMPLES:
+      original_schema = schema.parse(example.schema_string)
+      if original_schema.doc is not None:
+        correct += 1
+      if original_schema.type == 'record':
+        for f in original_schema.fields:
+          if f.doc is None:
+            self.fail("Failed to preserve 'doc' in fields: " + example.schema_string)
+    self.assertEqual(correct,len(DOC_EXAMPLES))
+
+  def test_other_attributes(self):
+    print_test_name('TEST OTHER ATTRIBUTES')
+    correct = 0
+    props = {}
+    for example in OTHER_PROP_EXAMPLES:
+      original_schema = schema.parse(example.schema_string)
+      round_trip_schema = schema.parse(str(original_schema))
+      self.assertEqual(original_schema.other_props,round_trip_schema.other_props)
+      if original_schema.type == "record":
+        field_props = 0
+        for f in original_schema.fields:
+          if f.other_props:
+            props.update(f.other_props)
+            field_props += 1
+        self.assertEqual(field_props,len(original_schema.fields))
+      if original_schema.other_props:
+        props.update(original_schema.other_props)
+        correct += 1
+    for k in props:
+      v = props[k]
+      if k == "cp_boolean":
+        self.assertEqual(type(v), bool)
+      elif k == "cp_int":
+        self.assertEqual(type(v), int)
+      elif k == "cp_object":
+        self.assertEqual(type(v), dict)
+      elif k == "cp_float":
+        self.assertEqual(type(v), float)
+      elif k == "cp_array":
+        self.assertEqual(type(v), list)
+    self.assertEqual(correct,len(OTHER_PROP_EXAMPLES))
+
 if __name__ == '__main__':
   unittest.main()

+ 256 - 0
desktop/core/ext-py/avro-1.7.6/test/test_script.py

@@ -0,0 +1,256 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+# http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import unittest
+import csv
+from cStringIO import StringIO
+try:
+    import json
+except ImportError:
+    import simplejson as json
+from tempfile import NamedTemporaryFile
+import avro.schema
+from avro.io import DatumWriter
+from avro.datafile import DataFileWriter
+from os.path import dirname, join, isfile
+from os import remove
+from operator import itemgetter
+
+NUM_RECORDS = 7
+
+try:
+    from subprocess import check_output
+except ImportError:
+    from subprocess import Popen, PIPE
+
+    def check_output(args):
+        pipe = Popen(args, stdout=PIPE)
+        if pipe.wait() != 0:
+            raise ValueError
+        return pipe.stdout.read()
+
+try:
+    from subprocess import check_call
+except ImportError:
+    def check_call(args, **kw):
+        pipe = Popen(args, **kw)
+        assert pipe.wait() == 0
+
+SCHEMA = '''
+{
+    "namespace": "test.avro",
+        "name": "LooneyTunes",
+        "type": "record",
+        "fields": [
+            {"name": "first", "type": "string"},
+            {"name": "last", "type": "string"},
+            {"name": "type", "type": "string"}
+        ]
+}
+'''
+
+LOONIES = (
+    ("daffy", "duck", "duck"),
+    ("bugs", "bunny", "bunny"),
+    ("tweety", "", "bird"),
+    ("road", "runner", "bird"),
+    ("wile", "e", "coyote"),
+    ("pepe", "le pew", "skunk"),
+    ("foghorn", "leghorn", "rooster"),
+)
+
+def looney_records():
+    for f, l, t in LOONIES:
+        yield {"first": f, "last" : l, "type" : t}
+
+SCRIPT = join(dirname(__file__), "..", "scripts", "avro")
+
+_JSON_PRETTY = '''{
+    "type": "duck", 
+    "last": "duck", 
+    "first": "daffy"
+}'''
+
+def gen_avro(filename):
+    schema = avro.schema.parse(SCHEMA)
+    fo = open(filename, "wb")
+    writer = DataFileWriter(fo, DatumWriter(), schema)
+    for record in looney_records():
+        writer.append(record)
+    writer.close()
+    fo.close()
+
+def tempfile():
+    return NamedTemporaryFile(delete=False).name
+
+class TestCat(unittest.TestCase):
+    def setUp(self):
+        self.avro_file = tempfile()
+        gen_avro(self.avro_file)
+
+    def tearDown(self):
+        if isfile(self.avro_file):
+            remove(self.avro_file)
+
+    def _run(self, *args, **kw):
+        out = check_output([SCRIPT, "cat", self.avro_file] + list(args))
+        if kw.get("raw"):
+            return out
+        else:
+            return out.splitlines()
+
+    def test_print(self):
+        return len(self._run()) == NUM_RECORDS
+
+    def test_filter(self):
+        return len(self._run("--filter", "r['type']=='bird'")) == 2
+
+    def test_skip(self):
+        skip = 3
+        return len(self._run("--skip", str(skip))) == NUM_RECORDS - skip
+
+    def test_csv(self):
+        reader = csv.reader(StringIO(self._run("-f", "csv", raw=True)))
+        assert len(list(reader)) == NUM_RECORDS
+
+    def test_csv_header(self):
+        io = StringIO(self._run("-f", "csv", "--header", raw=True))
+        reader = csv.DictReader(io)
+        r = {"type": "duck", "last": "duck", "first": "daffy"}
+        assert next(reader) == r
+
+    def test_print_schema(self):
+        out = self._run("--print-schema", raw=True)
+        assert json.loads(out)["namespace"] == "test.avro"
+
+    def test_help(self):
+        # Just see we have these
+        self._run("-h")
+        self._run("--help")
+
+    def test_json_pretty(self):
+        out = self._run("--format", "json-pretty", "-n", "1", raw=1)
+        assert out.strip() == _JSON_PRETTY.strip()
+
+    def test_version(self):
+        check_output([SCRIPT, "cat", "--version"])
+
+    def test_files(self):
+        out = self._run(self.avro_file)
+        assert len(out) == 2 * NUM_RECORDS
+
+    def test_fields(self):
+        # One field selection (no comma)
+        out = self._run('--fields', 'last')
+        assert json.loads(out[0]) == {'last': 'duck'}
+
+        # Field selection (with comma and space)
+        out = self._run('--fields', 'first, last')
+        assert json.loads(out[0]) == {'first': 'daffy', 'last': 'duck'}
+
+        # Empty fields should get all
+        out = self._run('--fields', '')
+        assert json.loads(out[0]) == \
+                {'first': 'daffy', 'last': 'duck', 'type': 'duck'}
+
+        # Non existing fields are ignored
+        out = self._run('--fields', 'first,last,age')
+        assert json.loads(out[0]) == {'first': 'daffy', 'last': 'duck'}
+
+class TestWrite(unittest.TestCase):
+    def setUp(self):
+        self.json_file = tempfile() + ".json"
+        fo = open(self.json_file, "w")
+        for record in looney_records():
+            json.dump(record, fo)
+            fo.write("\n")
+        fo.close()
+
+        self.csv_file = tempfile() + ".csv"
+        fo = open(self.csv_file, "w")
+        write = csv.writer(fo).writerow
+        get = itemgetter("first", "last", "type")
+        for record in looney_records():
+            write(get(record))
+        fo.close()
+
+        self.schema_file = tempfile()
+        fo = open(self.schema_file, "w")
+        fo.write(SCHEMA)
+        fo.close()
+
+    def tearDown(self):
+        for filename in (self.csv_file, self.json_file, self.schema_file):
+            try:
+                remove(filename)
+            except OSError:
+                continue
+
+    def _run(self, *args, **kw):
+        args = [SCRIPT, "write", "--schema", self.schema_file] + list(args)
+        check_call(args, **kw)
+
+    def load_avro(self, filename):
+        out = check_output([SCRIPT, "cat", filename])
+        return map(json.loads, out.splitlines())
+
+    def test_version(self):
+        check_call([SCRIPT, "write", "--version"])
+
+    def format_check(self, format, filename):
+        tmp = tempfile()
+        fo = open(tmp, "wb")
+        self._run(filename, "-f", format, stdout=fo)
+        fo.close()
+
+        records = self.load_avro(tmp)
+        assert len(records) == NUM_RECORDS
+        assert records[0]["first"] == "daffy"
+
+        remove(tmp)
+
+    def test_write_json(self):
+        self.format_check("json", self.json_file)
+
+    def test_write_csv(self):
+        self.format_check("csv", self.csv_file)
+
+    def test_outfile(self):
+        tmp = tempfile()
+        remove(tmp)
+        self._run(self.json_file, "-o", tmp)
+
+        assert len(self.load_avro(tmp)) == NUM_RECORDS
+        remove(tmp)
+
+    def test_multi_file(self):
+        tmp = tempfile()
+        fo = open(tmp, "wb")
+        self._run(self.json_file, self.json_file, stdout=fo)
+        fo.close()
+
+        assert len(self.load_avro(tmp)) == 2 * NUM_RECORDS
+        remove(tmp)
+
+    def test_stdin(self):
+        tmp = tempfile()
+
+        info = open(self.json_file, "rb")
+        fo = open(tmp, "wb")
+        self._run("--input-type", "json", stdin=info, stdout=fo)
+        fo.close()
+
+        assert len(self.load_avro(tmp)) == NUM_RECORDS
+        remove(tmp)