Преглед на файлове

[filebrowser] Log all caught naked "except:" blocks

Erick Tryzelaar преди 10 години
родител
ревизия
8dc87d0
променени са 2 файла, в които са добавени 50 реда и са изтрити 93 реда
  1. 9 5
      apps/filebrowser/src/filebrowser/views.py
  2. 41 88
      apps/filebrowser/src/filebrowser/views_test.py

+ 9 - 5
apps/filebrowser/src/filebrowser/views.py

@@ -717,7 +717,7 @@ def _read_avro(fhandle, path, offset, length, stats):
 
         contents = "".join(contents_list)
     except:
-        logging.warn("Could not read avro file at %s" % path, exc_info=True)
+        logging.exception("Could not read avro file at %s" % path)
         raise PopupException(_("Failed to read Avro file."))
     return contents
 
@@ -729,7 +729,7 @@ def _read_parquet(fhandle, path, offset, length, stats):
         dumped_data.seek(offset)
         return dumped_data.read()
     except:
-        logging.warn("Could not read parquet file at %s" % path, exc_info=True)
+        logging.exception("Could not read parquet file at %s" % path)
         raise PopupException(_("Failed to read Parquet file."))
 
 
@@ -740,7 +740,7 @@ def _read_gzip(fhandle, path, offset, length, stats):
     try:
         contents = GzipFile('', 'r', 0, StringIO(fhandle.read())).read(length)
     except:
-        logging.warn("Could not decompress file at %s" % path, exc_info=True)
+        logging.exception("Could not decompress file at %s" % path)
         raise PopupException(_("Failed to decompress file."))
     return contents
 
@@ -751,7 +751,7 @@ def _read_simple(fhandle, path, offset, length, stats):
         fhandle.seek(offset)
         contents = fhandle.read(length)
     except:
-        logging.warn("Could not read file at %s" % path, exc_info=True)
+        logging.exception("Could not read file at %s" % path)
         raise PopupException(_("Failed to read file."))
     return contents
 
@@ -777,6 +777,7 @@ def detect_snappy(contents):
         import snappy
         return snappy.isValidCompressed(contents)
     except:
+        logging.exception('failed to detect snappy')
         return False
 
 
@@ -792,7 +793,10 @@ def snappy_installed():
     try:
         import snappy
         return True
+    except ImportError:
+        return False
     except:
+        logging.exception('failed to verify if snappy is installed')
         return False
 
 
@@ -1321,4 +1325,4 @@ def truncate(toTruncate, charsToKeep=50):
         return toTruncate
 
 def _is_hdfs_superuser(request):
-  return request.user.username == request.fs.superuser or request.user.groups.filter(name__exact=request.fs.supergroup).exists()
+  return request.user.username == request.fs.superuser or request.user.groups.filter(name__exact=request.fs.supergroup).exists()

+ 41 - 88
apps/filebrowser/src/filebrowser/views_test.py

@@ -42,6 +42,22 @@ from views import snappy_installed
 LOG = logging.getLogger(__name__)
 
 
+def cleanup_tree(cluster, path):
+  try:
+    cluster.fs.rmtree(path)
+  except:
+    # Don't let cleanup errors mask earlier failures
+    LOG.exception('failed to cleanup %s' % path)
+
+
+def cleanup_file(cluster, path):
+  try:
+    cluster.fs.remove(path)
+  except:
+    # Don't let cleanup errors mask earlier failures
+    LOG.exception('failed to cleanup %s' % path)
+
+
 @attr('requires_hadoop')
 def test_remove():
   cluster = pseudo_hdfs4.shared_cluster()
@@ -74,10 +90,7 @@ def test_remove():
     assert_false(cluster.fs.exists(PATH_3))
 
   finally:
-    try:
-      cluster.fs.rmtree(prefix)     # Clean up
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, prefix)
 
 
 @attr('requires_hadoop')
@@ -128,10 +141,7 @@ def test_move():
     assert_true(cluster.fs.exists(SUB_PATH2_3))
 
   finally:
-    try:
-      cluster.fs.rmtree(prefix)     # Clean up
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, prefix)
 
 
 @attr('requires_hadoop')
@@ -182,10 +192,7 @@ def test_copy():
     assert_true(cluster.fs.exists(SUB_PATH2_3))
 
   finally:
-    try:
-      cluster.fs.rmtree(prefix)     # Clean up
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, prefix)
 
 
 @attr('requires_hadoop')
@@ -215,10 +222,7 @@ def test_mkdir_singledir():
     assert_equal(dir_listing[2]['name'], success_path)
 
   finally:
-    try:
-      cluster.fs.rmtree(prefix)     # Clean up
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, prefix)
 
 
 @attr('requires_hadoop')
@@ -249,10 +253,7 @@ def test_touch():
     assert_equal(file_listing[2]['name'], success_path)
 
   finally:
-    try:
-      cluster.fs.rmtree(prefix)
-    except:
-      pass
+    cleanup_tree(cluster, prefix)
 
 
 @attr('requires_hadoop')
@@ -304,12 +305,9 @@ def test_chmod():
     assert_equal(041777, int(cluster.fs.stats(PATH_3)["mode"]))
 
   finally:
-    try:
-      cluster.fs.rmtree(PATH)     # Clean up
-      cluster.fs.rmtree(PATH_2)     # Clean up
-      cluster.fs.rmtree(PATH_3)     # Clean up
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, PATH)
+    cleanup_tree(cluster, PATH_2)
+    cleanup_tree(cluster, PATH_3)
 
 
 @attr('requires_hadoop')
@@ -349,10 +347,7 @@ def test_chmod_sticky():
     assert_equal(False, mode[-1])
 
   finally:
-    try:
-      cluster.fs.rmtree(PATH)     # Clean up
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, PATH)
 
 
 @attr('requires_hadoop')
@@ -496,7 +491,8 @@ def test_listdir():
     try:
       cluster.fs.do_as_superuser(cluster.fs.rmtree, prefix)
     except:
-      pass      # Don't let cleanup errors mask earlier failures
+      # Don't let cleanup errors mask earlier failures
+      LOG.exception('failed to cleanup %s' % prefix)
 
 
 @attr('requires_hadoop')
@@ -569,10 +565,7 @@ def test_listdir_sort_and_filter():
     listing = c.get('/filebrowser/view' + BASE + '?filter=1&sortby=name&descending=true&pagesize=1&pagenum=2').context['files']
     assert_equal(['..', '.', '1'], [ f['name'] for f in listing ])
   finally:
-    try:
-      cluster.fs.rmtree(BASE)
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, BASE)
 
 
 @attr('requires_hadoop')
@@ -638,10 +631,7 @@ def test_view_snappy_compressed():
   finally:
     for done in finish:
       done()
-    try:
-      cluster.fs.rmtree('/test-snappy-avro-filebrowser/')
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, '/test-snappy-avro-filebrowser/')
 
 
 @attr('requires_hadoop')
@@ -697,10 +687,7 @@ def test_view_snappy_compressed_avro():
   finally:
     for done in finish:
       done()
-    try:
-      cluster.fs.rmtree('/test-snappy-avro-filebrowser/')
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, '/test-snappy-avro-filebrowser/')
 
 
 @attr('requires_hadoop')
@@ -759,10 +746,7 @@ def test_view_avro():
     assert_true('Failed to decompress' in response.context['message'])
 
   finally:
-    try:
-      cluster.fs.rmtree('/test-avro-filebrowser/')
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, '/test-avro-filebrowser/')
 
 
 @attr('requires_hadoop')
@@ -788,10 +772,7 @@ def test_view_parquet():
     assert_true('FRANCE' in response.context['view']['contents'])
 
   finally:
-    try:
-      cluster.fs.rmtree('/test-parquet-filebrowser/')
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, '/test-parquet-filebrowser/')
 
 
 @attr('requires_hadoop')
@@ -838,10 +819,7 @@ def test_view_gz():
     assert_true("Failed to decompress" in response.context['message'])
 
   finally:
-    try:
-      cluster.fs.rmtree('/test-gz-filebrowser/')
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
+    cleanup_tree(cluster, '/test-gz-filebrowser/')
 
 
 @attr('requires_hadoop')
@@ -943,10 +921,7 @@ def view_helper(cluster, encoding, content):
     assert_equal(response.context['view']['contents'],
                  unicode(bytestring[0:8], encoding, errors='replace'))
   finally:
-    try:
-      cluster.fs.remove(filename)
-    except Exception, ex:
-      LOG.error('Failed to cleanup %s: %s' % (filename, ex))
+    cleanup_file(cluster, filename)
 
 
 @attr('requires_hadoop')
@@ -977,10 +952,7 @@ def test_edit_i18n():
     pass_2 = pass_1 + u'세상'
     edit_helper(cluster, 'johab', pass_1, pass_2)
   finally:
-    try:
-      cluster.fs.rmtree('/test-filebrowser/')
-    except Exception, ex:
-      LOG.error('Failed to remove tree /test-filebrowser: %s' % (ex,))
+    cleanup_tree(cluster, '/test-filebrowser/')
 
 
 def edit_helper(cluster, encoding, contents_pass_1, contents_pass_2):
@@ -1033,10 +1005,7 @@ def edit_helper(cluster, encoding, contents_pass_1, contents_pass_2):
 
     # TODO(todd) add test for maintaining ownership/permissions
   finally:
-    try:
-      cluster.fs.remove(filename)
-    except Exception, ex:
-      LOG.error('Failed to remove %s: %s' % (smart_str(filename), ex))
+    cleanup_file(cluster, filename)
 
 
 @attr('requires_hadoop')
@@ -1097,10 +1066,7 @@ def test_upload_file():
       # StopFutureHandlers() does not seem to work in test mode as it continues to MemoryFileUploadHandler after perm issue and so fails.
       pass
   finally:
-    try:
-      cluster.fs.remove(HDFS_DEST_DIR)
-    except Exception, ex:
-      pass
+    cleanup_file(HDFS_DEST_DIR)
 
 @attr('requires_hadoop')
 def test_upload_zip():
@@ -1137,10 +1103,7 @@ def test_upload_zip():
     assert_equal(0, response['status'], response)
     assert_true(cluster.fs.exists(HDFS_ZIP_FILE))
   finally:
-    try:
-      cluster.fs.remove(HDFS_DEST_DIR)
-    except:
-      pass
+    cleanup_file(HDFS_DEST_DIR)
 
 @attr('requires_hadoop')
 def test_upload_tgz():
@@ -1178,10 +1141,7 @@ def test_upload_tgz():
     assert_equal(0, response['status'], response)
     assert_true(cluster.fs.exists(HDFS_TGZ_FILE))
   finally:
-    try:
-      cluster.fs.remove(HDFS_DEST_DIR)
-    except:
-      pass
+    cleanup_file(cluster, HDFS_DEST_DIR)
 
 @attr('requires_hadoop')
 def test_upload_bz2():
@@ -1219,10 +1179,7 @@ def test_upload_bz2():
     assert_equal(0, response['status'], response)
     assert_true(cluster.fs.exists(HDFS_BZ2_FILE))
   finally:
-    try:
-      cluster.fs.remove(HDFS_DEST_DIR)
-    except:
-      pass
+    cleanup_file(cluster, HDFS_DEST_DIR)
 
 def test_location_to_url():
   assert_equal('/filebrowser/view/var/lib/hadoop-hdfs', location_to_url('/var/lib/hadoop-hdfs', False))
@@ -1265,8 +1222,4 @@ def test_trash():
     response = c.get('/filebrowser/view/user/test?default_to_trash', follow=True)
     assert_true(any(['.Trash' in page for page, code in response.redirect_chain]), response.redirect_chain)
   finally:
-    try:
-      cluster.fs.rmtree(prefix)     # Clean up
-    except:
-      pass      # Don't let cleanup errors mask earlier failures
-
+    cleanup_tree(cluster, prefix)