Эх сурвалжийг харах

HUE-577 [fb] Snappy compression support

Snappy doesn't support streaming decompression, so use at own risk.
Using snappy could bog down Hue server completely if the file it's
extracting is larger than allocated memory.
abec 13 жил өмнө
parent
commit
3c02468

+ 28 - 0
apps/filebrowser/src/filebrowser/conf.py

@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from django.utils.translation import ugettext_lazy as _
+
+from desktop.lib.conf import Config
+
+
+MAX_SNAPPY_DECOMPRESSION_SIZE = Config(
+  key="max_snappy_decompression_size",
+  help=_("Max snappy decompression size in bytes."),
+  private=True,
+  default=1024*1024*25,
+  type=int)

+ 1 - 1
apps/filebrowser/src/filebrowser/templates/display.mako

@@ -55,7 +55,7 @@ ${commonheader(_('%(filename)s - File Viewer') % dict(filename=truncate(filename
 				        <li><a href="${base_url}?offset=0&length=2000&mode=${view['mode']}&compression=gzip">${_('Preview As Gzip')}</a></li>
 				        <li><a href="${base_url}?offset=0&length=2000&mode=${view['mode']}&compression=gzip">${_('Preview As Gzip')}</a></li>
 				      % endif
 				      % endif
 
 
-				      % if view['compression'] != "avro" and path.endswith('.avro'):
+				      % if view['compression'] != "avro" and view['compression'] != "snappy_avro" and path.endswith('.avro'):
 				        <li><a href="${base_url}?offset=0&length=2000&mode=${view['mode']}&compression=avro">${_('Preview As Avro')}</a></li>
 				        <li><a href="${base_url}?offset=0&length=2000&mode=${view['mode']}&compression=avro">${_('Preview As Avro')}</a></li>
 				      % endif
 				      % endif
 
 

+ 109 - 60
apps/filebrowser/src/filebrowser/views.py

@@ -52,6 +52,7 @@ from desktop.lib import i18n, paginator
 from desktop.lib.conf import coerce_bool
 from desktop.lib.conf import coerce_bool
 from desktop.lib.django_util import make_absolute, render, render_json, format_preserving_redirect
 from desktop.lib.django_util import make_absolute, render, render_json, format_preserving_redirect
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.exceptions_renderable import PopupException
+from filebrowser.conf import MAX_SNAPPY_DECOMPRESSION_SIZE
 from filebrowser.lib.archives import archive_factory
 from filebrowser.lib.archives import archive_factory
 from filebrowser.lib.rwx import filetype, rwx
 from filebrowser.lib.rwx import filetype, rwx
 from filebrowser.lib import xxd
 from filebrowser.lib import xxd
@@ -637,86 +638,112 @@ def read_contents(codec_type, path, fs, offset, length):
        length - Amount of bytes to read after offset.
        length - Amount of bytes to read after offset.
        Returns: A tuple of codec_type, offset, length and contents read.
        Returns: A tuple of codec_type, offset, length and contents read.
     """
     """
-    # Auto codec detection for [gzip, avro, none]
-    # Only done when codec_type is unset
-    if not codec_type:
-        if path.endswith('.gz') and detect_gzip(fs.open(path).read(2)):
-            codec_type = 'gzip'
-            offset = 0
-        elif path.endswith('.avro') and detect_avro(fs.open(path).read(3)):
-            codec_type = 'avro'
-        else:
-            codec_type = 'none'
-
-    f = fs.open(path)
     contents = ''
     contents = ''
 
 
-    if codec_type == 'gzip':
-        contents = _read_gzip(fs, path, offset, length)
-    elif codec_type == 'avro':
-        contents = _read_avro(fs, path, offset, length)
-    else:
-        # for 'none' type.
-        contents = _read_simple(fs, path, offset, length)
+    try:
+        fhandle = fs.open(path)
+        stats = fs.stats(path)
+
+        # Auto codec detection for [gzip, avro, none]
+        # Only done when codec_type is unset
+        contents = fhandle.read(3)
+        if not codec_type:
+            codec_type = 'none'
+            if path.endswith('.gz') and detect_gzip(contents):
+                codec_type = 'gzip'
+                offset = 0
+            elif path.endswith('.avro'):
+                if detect_avro(contents):
+                    codec_type = 'avro'
+                elif snappy_installed():
+                    if stats.size > MAX_SNAPPY_DECOMPRESSION_SIZE.get():
+                        raise PopupException(_('Failed to validate snappy compressed file. File size is greater than allowed max snappy decompression size of %d') % MAX_SNAPPY_DECOMPRESSION_SIZE.get())
+
+                    if detect_snappy(contents + fhandle.read()):
+                        codec_type = 'snappy_avro'
+        fhandle.seek(0)
+
+        if codec_type == 'avro' and snappy_installed() and detect_snappy(fhandle.read()):
+            fhandle.seek(0)
+            codec_type = 'snappy_avro'
+
+        if codec_type == 'gzip':
+            contents = _read_gzip(fhandle, path, offset, length, stats)
+        elif codec_type == 'avro':
+            contents = _read_avro(fhandle, path, offset, length, stats)
+        elif codec_type == 'snappy_avro':
+            contents = _read_snappy_avro(fhandle, path, offset, length, stats)
+        else:
+            # for 'none' type.
+            contents = _read_simple(fhandle, path, offset, length, stats)
+
+    finally:
+        fhandle.close()
 
 
     return (codec_type, offset, length, contents)
     return (codec_type, offset, length, contents)
 
 
 
 
-def _read_avro(fs, path, offset, length):
+def _decompress_snappy(compressed_content):
+    try:
+        import snappy
+        return snappy.decompress(compressed_content)
+    except Exception, e:
+        raise PopupException(_('Failed to decompress snappy compressed file.'), detail=e)
+
+
+def _read_snappy_avro(fhandle, path, offset, length, stats):
+    if not snappy_installed():
+        raise PopupException(_('Failed to decompress snappy compressed file. Snappy is not installed!'))
+
+    if stats.size > MAX_SNAPPY_DECOMPRESSION_SIZE.get():
+        raise PopupException(_('Failed to decompress snappy compressed file. File size is greater than allowed max snappy decompression size of %d') % MAX_SNAPPY_DECOMPRESSION_SIZE.get())
+
+    return _read_avro(StringIO(_decompress_snappy(fhandle.read())), path, offset, length, stats)
+
+
+def _read_avro(fhandle, path, offset, length, stats):
     contents = ''
     contents = ''
     try:
     try:
-        fhandle = fs.open(path)
-        try:
-            fhandle.seek(offset)
-            data_file_reader = datafile.DataFileReader(fhandle, io.DatumReader())
-            contents_list = []
-            read_start = fhandle.tell()
-            # Iterate over the entire sought file.
-            for datum in data_file_reader:
-                read_length = fhandle.tell() - read_start
-                if read_length > length and len(contents_list) > 0:
-                    break
-                else:
-                    datum_str = str(datum) + "\n"
-                    contents_list.append(datum_str)
-            data_file_reader.close()
-            contents = "".join(contents_list)
-        except:
-            logging.warn("Could not read avro file at %s" % path, exc_info=True)
-            raise PopupException(_("Failed to read Avro file."))
-    finally:
-        fhandle.close()
+        fhandle.seek(offset)
+        data_file_reader = datafile.DataFileReader(fhandle, io.DatumReader())
+        contents_list = []
+        read_start = fhandle.tell()
+        # Iterate over the entire sought file.
+        for datum in data_file_reader:
+            read_length = fhandle.tell() - read_start
+            if read_length > length and len(contents_list) > 0:
+                break
+            else:
+                datum_str = str(datum) + "\n"
+                contents_list.append(datum_str)
+        data_file_reader.close()
+        contents = "".join(contents_list)
+    except:
+        logging.warn("Could not read avro file at %s" % path, exc_info=True)
+        raise PopupException(_("Failed to read Avro file."))
     return contents
     return contents
 
 
 
 
-def _read_gzip(fs, path, offset, length):
+def _read_gzip(fhandle, path, offset, length, stats):
     contents = ''
     contents = ''
     if offset and offset != 0:
     if offset and offset != 0:
         raise PopupException(_("Offsets are not supported with Gzip compression."))
         raise PopupException(_("Offsets are not supported with Gzip compression."))
     try:
     try:
-        fhandle = fs.open(path)
-        try:
-            contents = GzipFile('', 'r', 0, StringIO(fhandle.read())).read(length)
-        except:
-            logging.warn("Could not decompress file at %s" % path, exc_info=True)
-            raise PopupException(_("Failed to decompress file."))
-    finally:
-        fhandle.close()
+        contents = GzipFile('', 'r', 0, StringIO(fhandle.read())).read(length)
+    except:
+        logging.warn("Could not decompress file at %s" % path, exc_info=True)
+        raise PopupException(_("Failed to decompress file."))
     return contents
     return contents
 
 
 
 
-def _read_simple(fs, path, offset, length):
+def _read_simple(fhandle, path, offset, length, stats):
     contents = ''
     contents = ''
     try:
     try:
-        fhandle = fs.open(path)
-        try:
-            fhandle.seek(offset)
-            contents = fhandle.read(length)
-        except:
-            logging.warn("Could not read file at %s" % path, exc_info=True)
-            raise PopupException(_("Failed to read file."))
-    finally:
-        fhandle.close()
+        fhandle.seek(offset)
+        contents = fhandle.read(length)
+    except:
+        logging.warn("Could not read file at %s" % path, exc_info=True)
+        raise PopupException(_("Failed to read file."))
     return contents
     return contents
 
 
 
 
@@ -731,6 +758,28 @@ def detect_avro(contents):
     return contents[:3] == '\x4F\x62\x6A'
     return contents[:3] == '\x4F\x62\x6A'
 
 
 
 
+def detect_snappy(contents):
+    '''
+    This is a silly small function which checks to see if the file is Snappy.
+    It requires the entire contents of the compressed file.
+    This will also return false if snappy decompression if we do not have the library available.
+    '''
+    try:
+        import snappy
+        return snappy.isValidCompressed(contents)
+    except:
+        return False
+
+
+def snappy_installed():
+    '''Snappy is library that isn't supported by python2.4'''
+    try:
+        import snappy
+        return True
+    except:
+        return False
+
+
 def _calculate_navigation(offset, length, size):
 def _calculate_navigation(offset, length, size):
     """
     """
     List of (offset, length, string) tuples for suggested navigation through the file.
     List of (offset, length, string) tuples for suggested navigation through the file.

+ 78 - 1
apps/filebrowser/src/filebrowser/views_test.py

@@ -27,17 +27,21 @@ import logging
 import os
 import os
 import re
 import re
 import urlparse
 import urlparse
+from avro import schema, datafile, io
+from StringIO import StringIO
 
 
 from django.utils.encoding import smart_str
 from django.utils.encoding import smart_str
 from nose.plugins.attrib import attr
 from nose.plugins.attrib import attr
+from nose.plugins.skip import SkipTest
 from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal
 from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal
 
 
 from desktop.lib.django_test_util import make_logged_in_client
 from desktop.lib.django_test_util import make_logged_in_client
 from desktop.lib.test_utils import grant_access
 from desktop.lib.test_utils import grant_access
 from hadoop import pseudo_hdfs4
 from hadoop import pseudo_hdfs4
 
 
-from avro import schema, datafile, io
+from conf import MAX_SNAPPY_DECOMPRESSION_SIZE
 from lib.rwx import expand_mode
 from lib.rwx import expand_mode
+from views import snappy_installed
 
 
 
 
 LOG = logging.getLogger(__name__)
 LOG = logging.getLogger(__name__)
@@ -520,6 +524,79 @@ def test_chooser():
   assert_equal('/', dic['path'])
   assert_equal('/', dic['path'])
 
 
 
 
+@attr('requires_hadoop')
+def test_view_snappy_compressed_avro():
+  if not snappy_installed():
+    raise SkipTest
+  import snappy
+
+  cluster = pseudo_hdfs4.shared_cluster()
+  try:
+    c = make_logged_in_client()
+    cluster.fs.setuser(cluster.superuser)
+    if cluster.fs.isdir("/test-snappy-avro-filebrowser"):
+      cluster.fs.rmtree('/test-snappy-avro-filebrowser/')
+
+    cluster.fs.mkdir('/test-snappy-avro-filebrowser/')
+
+    test_schema = schema.parse("""
+      {
+        "name": "test",
+        "type": "record",
+        "fields": [
+          { "name": "name", "type": "string" },
+          { "name": "integer", "type": "int" }
+        ]
+      }
+    """)
+
+    # Cannot use StringIO with datafile writer!
+    f = cluster.fs.open('/test-snappy-avro-filebrowser/test-view.avro', "w")
+    data_file_writer = datafile.DataFileWriter(f, io.DatumWriter(),
+                                                writers_schema=test_schema,
+                                                codec='deflate')
+    dummy_datum = {
+      'name': 'Test',
+      'integer': 10,
+    }
+    data_file_writer.append(dummy_datum)
+    data_file_writer.close()
+
+    fh = cluster.fs.open('/test-snappy-avro-filebrowser/test-view.avro', 'r')
+    f = cluster.fs.open('/test-snappy-avro-filebrowser/test-view.compressed.avro', "w")
+    f.write(snappy.compress(fh.read()))
+    f.close()
+    fh.close()
+
+    # Snappy compressed fail
+    response = c.get('/filebrowser/view/test-snappy-avro-filebrowser/test-view.avro?compression=snappy_avro')
+    assert_true('Failed to decompress' in response.context['message'], response)
+
+    # Snappy compressed succeed
+    response = c.get('/filebrowser/view/test-snappy-avro-filebrowser/test-view.compressed.avro')
+    assert_equal('snappy_avro', response.context['view']['compression'])
+    assert_equal(eval(response.context['view']['contents']), dummy_datum, response)
+    response = c.get('/filebrowser/view/test-snappy-avro-filebrowser/test-view.compressed.avro?compression=snappy_avro')
+    assert_equal('snappy_avro', response.context['view']['compression'])
+    assert_equal(eval(response.context['view']['contents']), dummy_datum, response)
+
+    # Avro should also decompress snappy
+    response = c.get('/filebrowser/view/test-snappy-avro-filebrowser/test-view.compressed.avro?compression=avro')
+    assert_equal('snappy_avro', response.context['view']['compression'])
+    assert_equal(eval(response.context['view']['contents']), dummy_datum, response)
+
+    # Largest snappy compressed file
+    finish = MAX_SNAPPY_DECOMPRESSION_SIZE.set_for_testing(1)
+    response = c.get('/filebrowser/view/test-snappy-avro-filebrowser/test-view.avro?compression=snappy_avro')
+    assert_true('File size is greater than allowed max snappy decompression size' in response.context['message'], response)
+
+  finally:
+    finish()
+    try:
+      cluster.fs.rmtree('/test-snappy-avro-filebrowser/')
+    except:
+      pass      # Don't let cleanup errors mask earlier failures
+
 @attr('requires_hadoop')
 @attr('requires_hadoop')
 def test_view_avro():
 def test_view_avro():
   cluster = pseudo_hdfs4.shared_cluster()
   cluster = pseudo_hdfs4.shared_cluster()