Explorar o código

HUE-1746 [metastore] Remove header line from datafile if header detected

Romain Rigaux %!s(int64=12) %!d(string=hai) anos
pai
achega
705cd62c0e

+ 6 - 0
apps/beeswax/src/beeswax/create_table.py

@@ -30,6 +30,7 @@ from desktop.lib.django_util import render
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.django_forms import MultiForm
 from hadoop.fs import hadoopfs
+from hadoop.fs.fsutils import remove_header
 
 from beeswax.common import TERMINATORS
 from beeswax.design import hql_query
@@ -264,6 +265,7 @@ def _submit_create_and_load(request, create_hql, table_name, path, do_load, data
   if do_load:
     on_success_params['table'] = table_name
     on_success_params['path'] = path
+    on_success_params['removeHeader'] = request.POST.get('removeHeader')
     on_success_url = reverse(app_name + ':load_after_create', kwargs={'database': database})
   else:
     on_success_url = reverse('metastore:describe_table', kwargs={'database': database, 'table': table_name})
@@ -457,12 +459,16 @@ def load_after_create(request, database):
   """
   tablename = request.REQUEST.get('table')
   path = request.REQUEST.get('path')
+  is_remove_header = request.REQUEST.get('removeHeader').lower() == 'on'
 
   if not tablename or not path:
     msg = _('Internal error: Missing needed parameter to load data into table.')
     LOG.error(msg)
     raise PopupException(msg)
 
+  if is_remove_header:
+    remove_header(request.fs, path)
+
   LOG.debug("Auto loading data from %s into table %s" % (path, tablename))
   hql = "LOAD DATA INPATH '%s' INTO TABLE `%s.%s`" % (path, database, tablename)
   query = hql_query(hql)

+ 8 - 2
apps/beeswax/src/beeswax/templates/define_columns.mako

@@ -87,8 +87,13 @@ ${ layout.metastore_menubar() }
                       <h3>${_('Define your columns')}</h3>
                     </div>
                     <div class="row" style="margin-left: 8px">
-                      <div class="span3">${_('Use first row as column names')} &nbsp;<a id="useHeader" class="btn disable-feedback"><i class="fa fa-outdent"></i></a></div>
-                      <div class="span3">${ _('Bulk edit column names') } &nbsp;<a id="editColumns" class="btn"><i class="fa fa-edit"></i></a></div>
+                      <div class="span3">
+                        <input id="removeHeader" type="checkbox" class="hide" name="removeHeader">
+                        ${_('Use first row as column names')} &nbsp;<a id="useHeader" class="btn disable-feedback"><i class="fa fa-outdent"></i></a>
+                      </div>
+                      <div class="span3">
+                        ${ _('Bulk edit column names') } &nbsp;<a id="editColumns" class="btn"><i class="fa fa-edit"></i></a>
+                      </div>
                     </div>
                     <div class="control-group" style="margin-top: 10px">
                         <div class="controls">
@@ -192,6 +197,7 @@ ${ layout.metastore_menubar() }
         $(this).addClass(_klass);
         _isChecked = true;
       }
+      $("#removeHeader").prop('checked', _isChecked);
 
       $(".cols input[type='text']").each(function (cnt, item) {
         if (_isChecked) {

+ 1 - 0
apps/beeswax/src/beeswax/test_base.py

@@ -130,6 +130,7 @@ def get_shared_beeswax_server():
     if _SHARED_HIVE_SERVER_PROCESS is None:
       p = _start_server(cluster)
       LOG.info("started")
+      cluster.fs.do_as_superuser(cluster.fs.chmod, '/tmp', 01777)
 
       _SHARED_HIVE_SERVER_PROCESS = p
       def kill():

+ 39 - 0
apps/beeswax/src/beeswax/tests.py

@@ -1016,9 +1016,13 @@ for x in sys.stdin:
         gzdat.write(data)
         gzdat.close()
         data = sio.getvalue()
+
       f = self.cluster.fs.open(filename, "w")
       f.write(data)
       f.close()
+      self.cluster.fs.do_as_superuser(self.cluster.fs.chown, filename, 'test', 'test')
+
+    self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir, '/user/test')
 
     write_file('/tmp/spacé.dat'.decode('utf-8'), RAW_FIELDS, ' ')
     write_file('/tmp/tab.dat', RAW_FIELDS, '\t')
@@ -1123,6 +1127,41 @@ for x in sys.stdin:
     assert_true("nada" in resp.content, resp.content)
     assert_true("sp ace" in resp.content, resp.content)
 
+    # Test table creation and data loading
+    resp = self.client.post('/beeswax/create/import_wizard/default', {
+      'submit_create': 'on',
+      'path': '/tmp/comma.csv',
+      'name': 'test_create_import_with_header',
+      'delimiter_0': ',',
+      'delimiter_1': '',
+      'file_type': 'text',
+      'do_import': 'True',
+      'cols-0-_exists': 'True',
+      'cols-0-column_name': 'col_a',
+      'cols-0-column_type': 'string',
+      'cols-1-_exists': 'True',
+      'cols-1-column_name': 'col_b',
+      'cols-1-column_type': 'string',
+      'cols-2-_exists': 'True',
+      'cols-2-column_name': 'col_c',
+      'cols-2-column_type': 'string',
+      'cols-next_form_id': '3',
+      'removeHeader': 'on'
+    }, follow=True)
+
+    resp = wait_for_query_to_finish(self.client, resp, max=180.0)
+
+    # Check data is in the table (by describing it)
+    resp = self.client.get('/metastore/table/default/test_create_import_with_header')
+    cols = resp.context['table'].cols
+    assert_equal(len(cols), 3)
+    assert_equal([ col.name for col in cols ], [ 'col_a', 'col_b', 'col_c' ])
+    assert_equal(resp.context['sample'], [
+      #['a', 'b', 'c'], # Gone as told to be header
+      ['"a', 'a"', '"b'], # Hive does not support natively quoted CSV
+      ['"a', '""a"', '"b']
+    ] )
+
 
   def test_create_database(self):
     resp = self.client.post("/beeswax/create/database", {

+ 3 - 72
apps/filebrowser/src/filebrowser/views.py

@@ -52,6 +52,7 @@ from desktop.lib.django_util import make_absolute, render, render_json, format_p
 from desktop.lib.exceptions_renderable import PopupException
 from hadoop.fs.hadoopfs import Hdfs
 from hadoop.fs.exceptions import WebHdfsException
+from hadoop.fs.fsutils import do_newfile_save, do_overwrite_save
 
 from filebrowser.conf import MAX_SNAPPY_DECOMPRESSION_SIZE
 from filebrowser.lib.archives import archive_factory
@@ -240,11 +241,11 @@ def save_file(request):
         return edit(request, path, form=form)
 
     if request.fs.exists(path):
-        _do_overwrite_save(request.fs, path,
+        do_overwrite_save(request.fs, path,
                            form.cleaned_data['contents'],
                            form.cleaned_data['encoding'])
     else:
-        _do_newfile_save(request.fs, path,
+        do_newfile_save(request.fs, path,
                          form.cleaned_data['contents'],
                          form.cleaned_data['encoding'])
 
@@ -253,76 +254,6 @@ def save_file(request):
     return edit(request, path, form)
 
 
-def _do_overwrite_save(fs, path, data, encoding):
-    """
-    Atomically (best-effort) save the specified data to the given path
-    on the filesystem.
-
-    TODO(todd) should this be in some fsutil.py?
-    """
-    # TODO(todd) Should probably do an advisory permissions check here to
-    # see if we're likely to fail (eg make sure we own the file
-    # and can write to the dir)
-
-    # First write somewhat-kinda-atomically to a staging file
-    # so that if we fail, we don't clobber the old one
-    path_dest = path + "._hue_new"
-
-    new_file = fs.open(path_dest, "w")
-    try:
-        try:
-            new_file.write(data.encode(encoding))
-            logging.info("Wrote to " + path_dest)
-        finally:
-            new_file.close()
-    except Exception, e:
-        # An error occurred in writing, we should clean up
-        # the tmp file if it exists, before re-raising
-        try:
-            fs.remove(path_dest)
-        except:
-            pass
-        raise e
-
-    # Try to match the permissions and ownership of the old file
-    cur_stats = fs.stats(path)
-    try:
-        fs.chmod(path_dest, stat_module.S_IMODE(cur_stats['mode']))
-    except:
-        logging.warn("Could not chmod new file %s to match old file %s" % (
-            path_dest, path), exc_info=True)
-        # but not the end of the world - keep going
-
-    try:
-        fs.chown(path_dest, cur_stats['user'], cur_stats['group'])
-    except:
-        logging.warn("Could not chown new file %s to match old file %s" % (
-            path_dest, path), exc_info=True)
-        # but not the end of the world - keep going
-
-    # Now delete the old - nothing we can do here to recover
-    fs.remove(path)
-
-    # Now move the new one into place
-    # If this fails, then we have no reason to assume
-    # we can do anything to recover, since we know the
-    # destination shouldn't already exist (we just deleted it above)
-    fs.rename(path_dest, path)
-
-
-def _do_newfile_save(fs, path, data, encoding):
-    """
-    Save data to the path 'path' on the filesystem 'fs'.
-
-    There must not be a pre-existing file at that path.
-    """
-    new_file = fs.open(path, "w")
-    try:
-        new_file.write(data.encode(encoding))
-    finally:
-        new_file.close()
-
-
 def parse_breadcrumbs(path):
     breadcrumbs_parts = Hdfs.normpath(path).split('/')
     i = 1

+ 0 - 3
apps/filebrowser/src/filebrowser/views_test.py

@@ -849,14 +849,11 @@ def test_view_i18n():
     c = make_logged_in_client()
     response = c.get('/filebrowser/view/')
     assert_equal(response.context['path'], '/')
-    cluster.fs.mkdir('/user/test')
-    cluster.fs.chown("/user/test", "test", "test")
     response = c.get('/filebrowser/view/?default_to_home=1')
     assert_equal("http://testserver/filebrowser/view/user/test", response["location"])
   finally:
     try:
       cluster.fs.rmtree('/test-filebrowser/')
-      cluster.fs.rmtree('/user/test')
     except Exception, ex:
       LOG.error('Failed to cleanup test directory: %s' % (ex,))
 

+ 1 - 1
apps/metastore/src/metastore/views.py

@@ -163,7 +163,7 @@ def describe_table(request, database, table):
     ],
     'table': table,
     'partitions': partitions,
-    'sample': table_data and table_data.rows(),
+    'sample': table_data and list(table_data.rows()),
     'error_message': error_message,
     'database': database,
     'has_write_access': has_write_access(request.user),

+ 0 - 1
apps/oozie/src/oozie/tests.py

@@ -303,7 +303,6 @@ class OozieBase(OozieServerProvider):
 
     self.c.post(reverse('oozie:install_examples'))
     self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir, '/user/test')
-    self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, '/user/test', 0777, True)
 
     _INITIALIZED = True
 

+ 107 - 0
desktop/libs/hadoop/src/hadoop/fs/fsutils.py

@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import stat as stat_module
+
+
+logger = logging.getLogger(__name__)
+
+
+def do_overwrite_save(fs, path, data, encoding):
+
+    def copy_data(path_dest):
+        new_file = fs.open(path_dest, "w")
+        try:
+            try:
+                new_file.write(data.encode(encoding))
+                logging.info("Wrote to " + path_dest)
+            finally:
+                new_file.close()
+        except Exception, e:
+            # An error occurred in writing, we should clean up
+            # the tmp file if it exists, before re-raising
+            try:
+                fs.remove(path_dest)
+            except:
+                pass
+            raise e
+
+    _do_overwrite(fs, path, copy_data)
+
+
+def remove_header(fs, path):
+
+    def copy_data(path_dest):
+        fs.copyfile(path, path_dest, skip_header=True)
+
+    _do_overwrite(fs, path, copy_data)
+
+
+def _do_overwrite(fs, path, copy_data):
+    """
+    Atomically (best-effort) save the specified data to the given path
+    on the filesystem.
+    """
+    # TODO(todd) Should probably do an advisory permissions check here to
+    # see if we're likely to fail (eg make sure we own the file
+    # and can write to the dir)
+
+    # First write somewhat-kinda-atomically to a staging file
+    # so that if we fail, we don't clobber the old one
+    path_dest = path + "._hue_new"
+
+    # Copy the data to destination
+    copy_data(path_dest)
+
+    # Try to match the permissions and ownership of the old file
+    cur_stats = fs.stats(path)
+    try:
+        fs.do_as_superuser(fs.chmod, path_dest, stat_module.S_IMODE(cur_stats['mode']))
+    except:
+        logging.warn("Could not chmod new file %s to match old file %s" % (path_dest, path), exc_info=True)
+        # but not the end of the world - keep going
+
+    try:
+        fs.do_as_superuser(fs.chown, path_dest, cur_stats['user'], cur_stats['group'])
+    except:
+        logging.warn("Could not chown new file %s to match old file %s" % (path_dest, path), exc_info=True)
+        # but not the end of the world - keep going
+
+    # Now delete the old - nothing we can do here to recover
+    fs.remove(path)
+
+    # Now move the new one into place
+    # If this fails, then we have no reason to assume
+    # we can do anything to recover, since we know the
+    # destination shouldn't already exist (we just deleted it above)
+    fs.rename(path_dest, path)
+
+
+def do_newfile_save(fs, path, data, encoding):
+    """
+    Save data to the path 'path' on the filesystem 'fs'.
+
+    There must not be a pre-existing file at that path.
+    """
+    new_file = fs.open(path, "w")
+    try:
+        new_file.write(data.encode(encoding))
+    finally:
+        new_file.close()
+

+ 69 - 0
desktop/libs/hadoop/src/hadoop/fs/fsutils_tests.py

@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import unittest
+
+from nose.tools import assert_equals, assert_true, assert_not_equal
+
+from desktop.lib import i18n
+
+from hadoop import pseudo_hdfs4
+from hadoop.fs.fsutils import remove_header, do_overwrite_save
+
+
+LOG = logging.getLogger(__name__)
+
+
+class FsUtilsTests(unittest.TestCase):
+  requires_hadoop = True
+
+  @classmethod
+  def setUpClass(cls):
+    cls.cluster = pseudo_hdfs4.shared_cluster()
+
+  def setUp(self):
+    self.cluster.fs.setuser('test')
+
+  def tearDown(self):
+    try:
+      self.cluster.fs.purge_trash()
+    except Exception, e:
+      LOG.error('Could not clean up trash: %s', e)
+
+  def test_remove_header(self):
+    fs = self.cluster.fs
+
+    path = "/tmp/test_remove_header.txt"
+    data_header = "destination\trank"
+    data_body = """thailand\t10
+costarica\t?
+curacao\t?"""
+    data = data_header + '\n' + data_body
+
+    f = fs.open(path, "w")
+    f.write("hello")
+    f.close()
+
+    encoding = i18n.get_site_encoding()
+    do_overwrite_save(fs, path, data, encoding)
+
+    assert_not_equal(data_body, fs.open(path).read())
+
+    remove_header(fs, path)
+
+    assert_equals(data_body, fs.open(path).read())

+ 5 - 1
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py

@@ -474,7 +474,7 @@ class WebHdfs(Hdfs):
     self._invoke_with_redirect('POST', path, params, data)
 
 
-  def copyfile(self, src, dst):
+  def copyfile(self, src, dst, skip_header=False):
     sb = self._stats(src)
     if sb is None:
       raise IOError(errno.ENOENT, _("Copy src '%s' does not exist") % src)
@@ -488,6 +488,10 @@ class WebHdfs(Hdfs):
     while True:
       data = self.read(src, offset, UPLOAD_CHUNK_SIZE.get())
       if offset == 0:
+        if skip_header:
+          n = data.index('\n')
+          if n > 0:
+            data = data[n + 1:]
         self.create(dst,
                     overwrite=True,
                     blocksize=sb.blockSize,

+ 2 - 2
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -254,8 +254,8 @@ class PseudoHdfs4(object):
     self.fs.do_as_superuser(self.fs.mkdir, '/var/log/hadoop-yarn/apps', 01777)
     self.fs.do_as_superuser(self.fs.chmod, '/var/log/hadoop-yarn/apps', 01777)
 
-    self.fs.create_home_dir('/user/test')
-    self.fs.create_home_dir('/user/hue')
+    self.fs.do_as_user('test', self.fs.create_home_dir, '/user/test')
+    self.fs.do_as_user('hue', self.fs.create_home_dir, '/user/hue')
 
 
   def _start_mr2(self, env):