Эх сурвалжийг харах

HUE-54. beeswax can't handle unicode data

Beeswax works with UTF-8 data in Hive. Since Hive does not support other
encodings, it is not possible to query a table with, say, latin1 data. But
Beeswax will no longer raise exceptions.
bc Wong 15 жил өмнө
parent
commit
a71678a57f

+ 2 - 0
apps/beeswax/src/beeswax/create_table.py

@@ -60,6 +60,8 @@ def create_table(request):
           'partition_columns': partition_columns
         }
       )
+      # Mako outputs bytestring in utf8
+      proposed_query = proposed_query.decode('utf-8')
       tablename = form.table.cleaned_data['name']
       on_success_url = urlresolvers.reverse(describe_table, kwargs={'table': tablename})
       return confirm_query(request, proposed_query, on_success_url)

+ 164 - 3
apps/beeswax/src/beeswax/db_utils.py

@@ -28,7 +28,8 @@ from beeswax import models
 from beeswax.models import QueryHistory
 from beeswaxd import BeeswaxService
 
-from desktop.lib import thrift_util
+from django.utils.encoding import smart_str, force_unicode
+from desktop.lib import thrift_util, i18n
 from hive_metastore import ThriftHiveMetastore
 from beeswaxd.ttypes import BeeswaxException, QueryHandle, QueryNotFoundException
 
@@ -150,19 +151,179 @@ def get_query_state(query_history):
               (query_history.id, ex))
     return None
 
+
 #
 # Note that thrift_util does client connection caching for us.
 #
 def db_client():
-  return thrift_util.get_client(BeeswaxService.Client,
+  """Get the Thrift client to talk to beeswax server"""
+
+  class UnicodeBeeswaxClient(object):
+    """Wrap the thrift client to take and return Unicode"""
+    def __init__(self, client):
+      self._client = client
+
+    def __getattr__(self, attr):
+      if attr in self.__dict__:
+        return self.__dict__[attr]
+      return getattr(self._client, attr)
+
+    def query(self, query):
+      _encode_struct_attr(query, 'query')
+      return self._client.query(query)
+
+    def explain(self, query):
+      _encode_struct_attr(query, 'query')
+      res = self._client.explain(query)
+      return _decode_struct_attr(res, 'textual')
+
+    def fetch(self, *args, **kwargs):
+      res = self._client.fetch(*args, **kwargs)
+      if res.ready:
+        res.columns = [ force_unicode(col, errors='replace') for col in res.columns ]
+        res.data = [ force_unicode(row, errors='replace') for row in res.data ]
+      return res
+
+    def dump_config(self):
+      res = self._client.dump_config()
+      return force_unicode(res, errors='replace')
+
+    def echo(self, msg):
+      return self._client.echo(smart_str(msg))
+
+    def get_log(self, *args, **kwargs):
+      res = self._client.get_log(*args, **kwargs)
+      return force_unicode(res, errors='replace')
+
+    def get_default_configuration(self, *args, **kwargs):
+      config_list = self._client.get_default_configuration(*args, **kwargs)
+      for config in config_list:
+        _decode_struct_attr(config, 'key')
+        _decode_struct_attr(config, 'value')
+        _decode_struct_attr(config, 'desc')
+      return config_list
+
+    def get_results_metadata(self, *args, **kwargs):
+      res = self._client.get_results_metadata(*args, **kwargs)
+      return _decode_struct_attr(res, 'table_dir')
+
+  client = thrift_util.get_client(BeeswaxService.Client,
                                 conf.BEESWAX_SERVER_HOST.get(),
                                 conf.BEESWAX_SERVER_PORT.get(),
                                 service_name="Beeswax (Hive UI) Server",
                                 timeout_seconds=BEESWAX_SERVER_THRIFT_TIMEOUT)
+  return UnicodeBeeswaxClient(client)
+
 
 def meta_client():
-  return thrift_util.get_client(ThriftHiveMetastore.Client,
+  """Get the Thrift client to talk to the metastore"""
+
+  class UnicodeMetastoreClient(object):
+    """Wrap the thrift client to take and return Unicode."""
+    def __init__(self, client):
+      self._client = client
+
+    def __getattr__(self, attr):
+      if attr in self.__dict__:
+        return self.__dict__[attr]
+      return getattr(self._client, attr)
+
+    def _encode_storage_descriptor(self, sd):
+      _encode_struct_attr(sd, 'location')
+      for col in sd.cols:
+        _encode_struct_attr(col, 'comment')
+      self._encode_map(sd.parameters)
+
+    def _decode_storage_descriptor(self, sd):
+      _decode_struct_attr(sd, 'location')
+      for col in sd.cols:
+        _decode_struct_attr(col, 'comment')
+      self._decode_map(sd.parameters)
+
+    def _encode_map(self, mapp):
+      for key, value in mapp.iteritems():
+        mapp[key] = smart_str(value, strings_only=True)
+
+    def _decode_map(self, mapp):
+      for key, value in mapp.iteritems():
+        mapp[key] = force_unicode(value, strings_only=True, errors='replace')
+
+    def create_database(self, name, description):
+      description = smart_str(description)
+      return self._client.create_database(name, description)
+
+    def get_database(self, *args, **kwargs):
+      db = self._client.get_database(*args, **kwargs)
+      return _decode_struct_attr(db, 'description')
+
+    def get_fields(self, *args, **kwargs):
+      res = self._client.get_fields(*args, **kwargs)
+      for fschema in res:
+        _decode_struct_attr(fschema, 'comment')
+      return res
+
+    def get_table(self, *args, **kwargs):
+      res = self._client.get_table(*args, **kwargs)
+      self._decode_storage_descriptor(res.sd)
+      self._decode_map(res.parameters)
+      return res
+
+    def alter_table(self, dbname, tbl_name, new_tbl):
+      self._encode_storage_descriptor(new_tbl.sd)
+      self._encode_map(new_tbl.parameters)
+      return self._client.alter_table(dbname, tbl_name, new_tbl)
+
+    def _encode_partition(self, part):
+      self._encode_storage_descriptor(part.sd)
+      self._encode_map(part.parameters)
+      return part
+
+    def _decode_partition(self, part):
+      self._decode_storage_descriptor(part.sd)
+      self._decode_map(part.parameters)
+      return part
+
+    def add_partition(self, new_part):
+      self._encode_partition(new_part)
+      part = self._client.add_partition(new_part)
+      return self._decode_partition(part)
+
+    def get_partition(self, *args, **kwargs):
+      part = self._client.get_partition(*args, **kwargs)
+      return self._decode_partition(part)
+
+    def get_partitions(self, *args, **kwargs):
+      part_list = self._client.get_partitions(*args, **kwargs)
+      for part in part_list:
+        self._decode_partition(part)
+      return part_list
+
+    def alter_partition(self, db_name, tbl_name, new_part):
+      self._encode_partition(new_part)
+      return self._client.alter_partition(db_name, tbl_name, new_part)
+
+  client = thrift_util.get_client(ThriftHiveMetastore.Client,
                                 conf.BEESWAX_META_SERVER_HOST.get(),
                                 conf.BEESWAX_META_SERVER_PORT.get(),
                                 service_name="Hive Metadata (Hive UI) Server",
                                 timeout_seconds=METASTORE_THRIFT_TIMEOUT)
+  return UnicodeMetastoreClient(client)
+
+
+def _decode_struct_attr(struct, attr):
+  try:
+    val = getattr(struct, attr)
+  except AttributeError:
+    return struct
+  unival = force_unicode(val, strings_only=True, errors='replace')
+  setattr(struct, attr, unival)
+  return struct
+
+def _encode_struct_attr(struct, attr):
+  try:
+    unival = getattr(struct, attr)
+  except AttributeError:
+    return struct
+  val = smart_str(unival, strings_only=True)
+  setattr(struct, attr, val)
+  return struct

+ 1 - 1
apps/beeswax/src/beeswax/templates/beeswax_components.mako

@@ -94,7 +94,7 @@
     </dd>
     % if len(field.errors):
       <dd class="beeswax_error ccs-error">
-         ${str(field.errors) | n}
+         ${unicode(field.errors) | n}
        </dd>
     % endif
   % endif

+ 1 - 1
apps/beeswax/src/beeswax/templates/execute.mako

@@ -59,7 +59,7 @@
         alt="Example: SELECT * FROM tablename" name="${form.query["query"].html_name | n}">${extract_field_data(form.query["query"]) or ''}</textarea>
         % if len(form.query["query"].errors):
           <div class="validation-advice">
-             ${str(form.query["query"].errors) | n}
+             ${unicode(form.query["query"].errors) | n}
           </div>
         % endif
     </dd>

+ 1 - 1
apps/beeswax/src/beeswax/templates/util.mako

@@ -15,7 +15,7 @@
 ## limitations under the License.
 <%def name="render_error(err)">
   <div class="ccs-error">
-    ${str(err) | n}
+    ${unicode(err) | n}
   </div>
 </%def>
 

+ 81 - 29
apps/beeswax/src/beeswax/test_base.py

@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+# -*- coding: utf-8 -*-
 # Licensed to Cloudera, Inc. under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -160,9 +161,9 @@ def wait_for_query_to_finish(client, response, max=30.0):
 
 
 def make_query(client, query, submission_type="Execute",
-               follow=True, udfs=None, settings=None, resources=[],
+               udfs=None, settings=None, resources=None,
                wait=False, name=None, desc=None, local=True,
-               is_parameterized=True):
+               is_parameterized=True, **kwargs):
   """
   Prepares arguments for the execute view.
 
@@ -210,8 +211,9 @@ def make_query(client, query, submission_type="Execute",
     parameters["file_resources-%d-type" % i] = type
     parameters["file_resources-%d-path" % i] = path
     parameters["file_resources-%d-_exists" % i] = 'True'
-  response = client.post("/beeswax/execute", parameters,
-    follow=follow)
+
+  kwargs.setdefault('follow', True)
+  response = client.post("/beeswax/execute", parameters, **kwargs)
 
   if wait:
     return wait_for_query_to_finish(client, response)
@@ -246,6 +248,11 @@ class BeeswaxSampleProvider(object):
 
   @classmethod
   def teardown_class(cls):
+    cls.cluster.fs.setuser(cls.cluster.superuser)
+    try:
+      cls.cluster.fs.rmtree('/tmp/beeswax')
+    except IOError, ex:
+      LOG.warn('Failed to cleanup /tmp/beeswax: %s' % (ex,))
     cls.shutdown[0]()
 
   @classmethod
@@ -257,43 +264,88 @@ class BeeswaxSampleProvider(object):
     if _INITIALIZED:
       return
 
-    # Create a test table and load data here;
-    # this is used in several tests.
-    CREATE_TABLE = """
-      CREATE TABLE test (foo INT, bar STRING)
+    data_file = u'/tmp/beeswax/sample_data_échantillon_%d.tsv'
+
+    # Create a "test_partitions" table.
+    CREATE_PARTITIONED_TABLE = """
+      CREATE TABLE test_partitions (foo INT, bar STRING)
+      PARTITIONED BY (baz STRING, boom STRING)
       ROW FORMAT DELIMITED
         FIELDS TERMINATED BY '\t'
         LINES TERMINATED BY '\n'
     """
-    make_query(cls.client, CREATE_TABLE, wait=True)
+    make_query(cls.client, CREATE_PARTITIONED_TABLE, wait=True)
+    cls._make_data_file(data_file % 1)
 
-    # Create some data for it
-    cls.cluster.fs.setuser(cls.cluster.superuser)
-    def write_sample_data(cluster, filename):
-      f = cluster.fs.open(filename, "w")
-      for x in range(256):
-        f.write("%d\t0x%x\n" % (x, x))
-      f.close()
-    write_sample_data(cls.cluster, "/tmp/sample_data.tsv")
-    write_sample_data(cls.cluster, "/tmp/sample_data2.tsv")
-
-    # Load the data
     LOAD_DATA = """
-      LOAD DATA INPATH '/tmp/sample_data.tsv' OVERWRITE INTO TABLE test
-    """
+      LOAD DATA INPATH '%s'
+      OVERWRITE INTO TABLE test_partitions
+      PARTITION (baz='baz_one', boom='boom_two')
+    """ % (data_file % 1,)
     make_query(cls.client, LOAD_DATA, wait=True, local=False)
 
-    CREATE_PARTITIONED_TABLE = """
-      CREATE TABLE test_partitions (foo INT, bar STRING)
-      PARTITIONED BY (baz STRING, boom STRING)
+    # Create a bunch of other tables
+    CREATE_TABLE = """
+      CREATE TABLE `%(name)s` (foo INT, bar STRING)
+      COMMENT "%(comment)s"
       ROW FORMAT DELIMITED
         FIELDS TERMINATED BY '\t'
         LINES TERMINATED BY '\n'
     """
-    make_query(cls.client, CREATE_PARTITIONED_TABLE, wait=True)
 
+    # Create a "test" table.
+    table_info = dict(name='test', comment='Test table')
+    cls._make_data_file(data_file % 2)
+    cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 2)
+
+    # Create a "test_utf8" table.
+    table_info = dict(name='test_utf8', comment=cls.get_i18n_table_comment())
+    cls._make_i18n_data_file(data_file % 3, 'utf-8')
+    cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 3)
+
+    # Create a "test_latin1" table.
+    table_info = dict(name='test_latin1', comment=cls.get_i18n_table_comment())
+    cls._make_i18n_data_file(data_file % 4, 'latin1')
+    cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 4)
+
+    _INITIALIZED = True
+
+  @staticmethod
+  def get_i18n_table_comment():
+    return u'en-hello pt-Olá ch-你好 ko-안녕 ru-Здравствуйте'
+
+  @classmethod
+  def _make_table(cls, table_name, create_ddl, filename):
+    make_query(cls.client, create_ddl, wait=True)
     LOAD_DATA = """
-      LOAD DATA INPATH '/tmp/sample_data2.tsv' OVERWRITE INTO TABLE test_partitions PARTITION (baz='baz_one', boom='boom_two')
-    """
+      LOAD DATA INPATH '%s' OVERWRITE INTO TABLE %s
+    """ % (filename, table_name)
     make_query(cls.client, LOAD_DATA, wait=True, local=False)
-    _INITIALIZED = True
+
+  @classmethod
+  def _make_data_file(cls, filename):
+    """
+    Create data to be loaded into tables.
+    Data contains two columns of:
+      <num>     0x<hex_num>
+    where <num> goes from 0 to 255 inclusive.
+    """
+    cls.cluster.fs.setuser(cls.cluster.superuser)
+    f = cls.cluster.fs.open(filename, "w")
+    for x in xrange(256):
+      f.write("%d\t0x%x\n" % (x, x))
+    f.close()
+
+  @classmethod
+  def _make_i18n_data_file(cls, filename, encoding):
+    """
+    Create i18n data to be loaded into tables.
+    Data contains two columns of:
+      <num>     <unichr(num)>
+    where <num> goes from 0 to 255 inclusive.
+    """
+    cls.cluster.fs.setuser(cls.cluster.superuser)
+    f = cls.cluster.fs.open(filename, "w")
+    for x in xrange(256):
+      f.write("%d\t%s\n" % (x, unichr(x).encode(encoding)))
+    f.close()

+ 44 - 10
apps/beeswax/src/beeswax/tests.py

@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+# -*- coding: utf-8 -*-
 # Licensed to Cloudera, Inc. under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -27,6 +28,7 @@ import tempfile
 import threading
 from nose.tools import assert_true, assert_equal, assert_false
 from nose.plugins.skip import SkipTest
+from django.utils.encoding import smart_str
 
 from desktop.lib.django_test_util import make_logged_in_client, assert_equal_mod_whitespace
 from desktop.lib.django_test_util import assert_similar_pages
@@ -45,22 +47,31 @@ from beeswax.test_base import BeeswaxSampleProvider
 from beeswaxd import BeeswaxService
 
 LOG = logging.getLogger(__name__)
+CSV_LINK_PAT = re.compile('/beeswax/download/\d+/csv')
 
 def _make_query(client, query, submission_type="Execute",
-                follow=True, udfs=None, settings=None, resources=[],
+                udfs=None, settings=None, resources=[],
                 wait=False, name=None, desc=None, local=True,
-                is_parameterized=True):
+                is_parameterized=True, **kwargs):
   """Wrapper around the real make_query"""
   res = make_query(client, query, submission_type,
-                   follow, udfs, settings, resources,
-                   wait, name, desc, local, is_parameterized)
+                   udfs, settings, resources,
+                   wait, name, desc, local, is_parameterized, **kwargs)
   # Should be in the history if it's submitted.
   if submission_type == 'Execute':
-    verify_history(client, fragment=collapse_whitespace(query[:20]))
+    fragment = collapse_whitespace(smart_str(query[:20]))
+    verify_history(client, fragment=fragment)
 
   return res
 
 
+def get_csv(client, result_response):
+  """Get the csv for a query result"""
+  csv_link = CSV_LINK_PAT.search(result_response.content)
+  assert_true(csv_link, "Query result should have a csv download link")
+  return client.get(csv_link.group()).content
+
+
 class TestBeeswaxWithHadoop(BeeswaxSampleProvider):
   """Tests for beeswax that require a running Hadoop"""
   requires_hadoop = True
@@ -292,6 +303,32 @@ for x in sys.stdin:
     response = _make_query(c, "SELECT SUM(foo) FROM test_explain", submission_type="Explain")
     assert_true(response.context["explanation"])
 
+  def test_explain_query_i18n(self):
+    query = u"SELECT foo FROM test_utf8 WHERE bar='%s'" % (unichr(200),)
+    response = _make_query(self.client, query, submission_type="Explain")
+    assert_true(response.context['explanation'])
+
+  def test_query_i18n(self):
+    # Selecting from utf-8 table should get correct result
+    query = u"SELECT * FROM test_utf8 WHERE bar='%s'" % (unichr(200),)
+    response = _make_query(self.client, query, wait=True)
+    assert_equal(["200", unichr(200)], response.context["results"][0],
+                 "selecting from utf-8 table should get correct result")
+
+    csv = get_csv(self.client, response)
+    assert_equal('"200","%s"' % (unichr(200).encode('utf-8'),), csv.split()[1])
+
+    # Selecting from latin1 table should not blow up
+    query = u"SELECT * FROM test_latin1 WHERE bar='%s'" % (unichr(200),)
+    response = _make_query(self.client, query, wait=True)
+    assert_true(response.context.has_key("results"),
+                "selecting from latin1 table should not blow up")
+
+    # Describe table should be fine with non-ascii comment
+    response = self.client.get('/beeswax/table/test_utf8')
+    assert_equal(response.context['table'].parameters['comment'],
+                 self.get_i18n_table_comment())
+
   def _parallel_query_helper(self, i, result_holder, lock, num_tasks):
     client = make_logged_in_client()
     try:
@@ -329,13 +366,10 @@ for x in sys.stdin:
     for t in threads:
       t.join()
 
-    csv_link_pat = re.compile('/beeswax/download/\d+/csv')
     for i in range(PARALLEL_TASKS):
-      csv_link = csv_link_pat.search(responses[i].content)
-      assert csv_link, "Query result should have a csv download link"
-      csv_resp = self.client.get(csv_link.group())
+      csv = get_csv(self.client, responses[i])
       # We get 3 rows: Column header, and 2 rows of results in double quotes
-      answer = [ int(data.strip('"')) for data in csv_resp.content.split()[1:] ]
+      answer = [ int(data.strip('"')) for data in csv.split()[1:] ]
       assert_equal( [ i + 1, i + 2 ], answer)
 
   def test_data_export(self):

+ 4 - 3
apps/beeswax/src/beeswax/views.py

@@ -25,6 +25,7 @@ from django import forms
 from django.core import urlresolvers
 from django.db.models import Q
 from django.http import HttpResponse, QueryDict
+from django.utils.encoding import force_unicode
 
 from desktop.lib import django_mako
 from desktop.lib.paginator import Paginator
@@ -89,7 +90,7 @@ def drop_table(request, table):
     # external by looking at db_utils.meta_client().get_table("default", table).tableType,
     # but this was introduced in Hive 0.5, and therefore may not be available
     # with older metastores.
-    title="This may delete the underlying data as well as the metadata.  Drop table %s?" % table
+    title = "This may delete the underlying data as well as the metadata.  Drop table %s?" % table
     return render('confirm.html', request, dict(url=request.path, title=title))
   elif request.method == 'POST':
     hql = "DROP TABLE `%s`" % (table,)
@@ -288,7 +289,6 @@ def execute_query(request, design_id=None):
         break
 
       query_str = _strip_trailing_semicolon(form.query.cleaned_data["query"])
-      notify = form.query.cleaned_data.get('email_notify', False)
 
       # (Optional) Parameterization.
       parameterization = get_parameterization(request, query_str, form.query, design, to_explain)
@@ -300,6 +300,7 @@ def execute_query(request, design_id=None):
         if to_explain:
           return explain_directly(request, query_str, query_msg, design)
         else:
+          notify = form.query.cleaned_data.get('email_notify', False)
           return execute_directly(request, query_msg, design,
                                   on_success_url=on_success_url,
                                   notify=notify)
@@ -410,7 +411,7 @@ def expand_exception(exc):
   if not exc.message:
     error_message = "Unknown exception."
   else:
-    error_message = exc.message
+    error_message = force_unicode(exc.message, strings_only=True, errors='replace')
   return error_message, log
 
 

+ 3 - 2
apps/filebrowser/src/filebrowser/templates/listdir_components.mako

@@ -16,7 +16,6 @@
 <%!
 import datetime
 from django.template.defaultfilters import urlencode, stringformat, filesizeformat, date, time
-from django.utils.encoding import iri_to_uri
 %>
 
 
@@ -56,7 +55,9 @@ from django.utils.encoding import iri_to_uri
             display_name = file['path']
           endif
         %>
-        <% path_enc = iri_to_uri(urlencode(file['path'])) %>
+	## Since path is in unicode, Django and Mako handle url encoding and
+	## iri encoding correctly for us.
+        <% path_enc = file['path'] %>
         <tr class="ccs-no_select fb-item-row ${cls}"
          data-filters="ContextMenu"
          data-context-menu-actions="[{'events':['contextmenu','click:relay(.fb-item-options)'],'menu':'ul.context-menu'}]"

+ 7 - 3
desktop/core/src/desktop/lib/django_forms.py

@@ -162,10 +162,14 @@ class KeyValueField(CharField):
       raise ValidationError("Not in key=value format.")
 
 class UnicodeEncodingField(ChoiceOrOtherField):
+  """
+  The cleaned value of the field is the actual encoding, not a tuple
+  """
   CHOICES = [
     ('utf-8', 'Unicode UTF8'),
     ('utf-16', 'Unicode UTF16'),
     ('latin_1', 'Western ISO-8859-1'),
+    ('latin_9', 'Western ISO-8859-15'),
     ('cyrillic', 'Cryrillic'),
     ('arabic', 'Arabic'),
     ('greek', 'Greek'),
@@ -176,7 +180,7 @@ class UnicodeEncodingField(ChoiceOrOtherField):
     ('euc-kr', 'Korean (EUC-KR)'),
     ('iso2022-kr', 'Korean (ISO-2022-KR)'),
     ('gbk', 'Chinese Simplified (GBK)'),
-    ('big5hkscs', 'Chinese Traditional (Big5)'),
+    ('big5hkscs', 'Chinese Traditional (Big5-HKSCS)'),
     ('ascii', 'ASCII'),
   ]
 
@@ -184,10 +188,10 @@ class UnicodeEncodingField(ChoiceOrOtherField):
     ChoiceOrOtherField.__init__(self, UnicodeEncodingField.CHOICES, initial, *args, **kwargs)
 
   def clean(self, value):
-    encoding = value[0]
+    encoding = value[0] == OTHER_VAL and value[1] or value[0]
     if encoding and not desktop.lib.i18n.validate_encoding(encoding):
       raise forms.ValidationError("'%s' encoding is not available" % (encoding,))
-    return value
+    return encoding
 
 
 class MultiForm(object):

+ 78 - 83
desktop/core/src/desktop/lib/django_util.py

@@ -34,7 +34,6 @@ from django.template.loader import render_to_string as django_render_to_string
 from django.template import RequestContext
 from django.db import models
 from desktop.lib import django_mako
-from django.core import urlresolvers
 
 import desktop.conf
 import desktop.lib.thrift_util
@@ -293,9 +292,9 @@ def extract_field_data(field):
 
 def get_app_nice_name(app_name):
   try:
-   return desktop.appmanager.get_desktop_module(app_name).settings.NICE_NAME
+    return desktop.appmanager.get_desktop_module(app_name).settings.NICE_NAME
   except:
-   return app_name
+    return app_name
 
 class StructuredException(Exception):
   """
@@ -342,11 +341,7 @@ class PopupException(Exception):
     self.detail = detail
 
   def response(self, request):
-    return render("popup_error.mako", request, 
-      dict(title=self.title, message=self.message, detail=self.detail, request=request))
-
-  def html(self):
-    return django_mako.render_to_string('popup_error.mako',
+    return render("popup_error.mako", request,
       dict(title=self.title, message=self.message, detail=self.detail, request=request))
 
 
@@ -405,84 +400,84 @@ def reverse_with_get(view, args=None, kwargs=None, get=None):
   return url
 
 def humanize_duration(seconds, abbreviate=False):
-    d = datetime.datetime.fromtimestamp(0)
-    now = datetime.datetime.fromtimestamp(seconds)
-    return timesince(d, now, abbreviate)
+  d = datetime.datetime.fromtimestamp(0)
+  now = datetime.datetime.fromtimestamp(seconds)
+  return timesince(d, now, abbreviate)
 
 def timesince(d=None, now=None, abbreviate=False):
-    """
-    Takes two datetime objects and returns the time between d and now
-    as a nicely formatted string, e.g. "10 minutes".  If d occurs after now,
-    then "0 seconds" is returned. If abbreviate is True, it truncates values to,
-    for example, "10m" or "4m 30s". Alternately it can take a second value
-    and return the proper count.
-
-    Units used are years, months, weeks, days, hours, minutes, and seconds.
-    Microseconds are ignored.  Up to two adjacent units will be
-    displayed.  For example, "2 weeks, 3 days" and "1 year, 3 months" are
-    possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
-
-    Adapted from the timesince filter in Django:
-    http://docs.djangoproject.com/en/dev/ref/templates/builtins/#timesince
-    """
-    
+  """
+  Takes two datetime objects and returns the time between d and now
+  as a nicely formatted string, e.g. "10 minutes".  If d occurs after now,
+  then "0 seconds" is returned. If abbreviate is True, it truncates values to,
+  for example, "10m" or "4m 30s". Alternately it can take a second value
+  and return the proper count.
+
+  Units used are years, months, weeks, days, hours, minutes, and seconds.
+  Microseconds are ignored.  Up to two adjacent units will be
+  displayed.  For example, "2 weeks, 3 days" and "1 year, 3 months" are
+  possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
+
+  Adapted from the timesince filter in Django:
+  http://docs.djangoproject.com/en/dev/ref/templates/builtins/#timesince
+  """
+
+  if abbreviate:
+    chunks = (
+      (60 * 60 * 24 * 365, lambda n: 'y'),
+      (60 * 60 * 24 * 30, lambda n: 'm'),
+      (60 * 60 * 24 * 7, lambda n : 'w'),
+      (60 * 60 * 24, lambda n : 'd'),
+      (60 * 60, lambda n: 'h'),
+      (60, lambda n: 'm'),
+      (1, lambda n : 's'),
+    )
+  else:
+    chunks = (
+      (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
+      (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
+      (60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),
+      (60 * 60 * 24, lambda n : ungettext('day', 'days', n)),
+      (60 * 60, lambda n: ungettext('hour', 'hours', n)),
+      (60, lambda n: ungettext('minute', 'minutes', n)),
+      (1, lambda n : ungettext('second', 'seconds', n)),
+    )
+
+  # Convert datetime.date to datetime.datetime for comparison.
+  if not isinstance(d, datetime.datetime):
+    d = datetime.datetime(d.year, d.month, d.day)
+  if now and not isinstance(now, datetime.datetime):
+    now = datetime.datetime(now.year, now.month, now.day)
+
+  if not now:
+    if d.tzinfo:
+      now = datetime.datetime.now(LocalTimezone(d))
+    else:
+      now = datetime.datetime.now()
+
+  # ignore microsecond part of 'd' since we removed it from 'now'
+  delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
+  since = delta.days * 24 * 60 * 60 + delta.seconds
+  if since <= 0:
+    # d is in the future compared to now, stop processing.
     if abbreviate:
-      chunks = (
-        (60 * 60 * 24 * 365, lambda n: 'y'),
-        (60 * 60 * 24 * 30, lambda n: 'm'),
-        (60 * 60 * 24 * 7, lambda n : 'w'),
-        (60 * 60 * 24, lambda n : 'd'),
-        (60 * 60, lambda n: 'h'),
-        (60, lambda n: 'm'),
-        (1, lambda n : 's'),
-      )
+      return u'0' + ugettext('s')
     else:
-      chunks = (
-        (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
-        (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
-        (60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),
-        (60 * 60 * 24, lambda n : ungettext('day', 'days', n)),
-        (60 * 60, lambda n: ungettext('hour', 'hours', n)),
-        (60, lambda n: ungettext('minute', 'minutes', n)),
-        (1, lambda n : ungettext('second', 'seconds', n)),
-      )
-    
-    # Convert datetime.date to datetime.datetime for comparison.
-    if not isinstance(d, datetime.datetime):
-        d = datetime.datetime(d.year, d.month, d.day)
-    if now and not isinstance(now, datetime.datetime):
-        now = datetime.datetime(now.year, now.month, now.day)
-
-    if not now:
-        if d.tzinfo:
-            now = datetime.datetime.now(LocalTimezone(d))
-        else:
-            now = datetime.datetime.now()
-
-    # ignore microsecond part of 'd' since we removed it from 'now'
-    delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
-    since = delta.days * 24 * 60 * 60 + delta.seconds
-    if since <= 0:
-      # d is in the future compared to now, stop processing.
+      return u'0 ' + ugettext('seconds')
+  for i, (seconds, name) in enumerate(chunks):
+    count = since // seconds
+    if count != 0:
+      break
+  if abbreviate:
+    s = ugettext('%(number)d%(type)s') % {'number': count, 'type': name(count)}
+  else:
+    s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
+  if i + 1 < len(chunks):
+    # Now get the second item
+    seconds2, name2 = chunks[i + 1]
+    count2 = (since - (seconds * count)) // seconds2
+    if count2 != 0:
       if abbreviate:
-        return u'0' + ugettext('s')
+        s += ugettext(', %(number)d%(type)s') % {'number': count2, 'type': name2(count2)}
       else:
-        return u'0 ' + ugettext('seconds')
-    for i, (seconds, name) in enumerate(chunks):
-        count = since // seconds
-        if count != 0:
-            break
-    if abbreviate:
-      s = ugettext('%(number)d%(type)s') % {'number': count, 'type': name(count)}
-    else:
-      s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
-    if i + 1 < len(chunks):
-        # Now get the second item
-        seconds2, name2 = chunks[i + 1]
-        count2 = (since - (seconds * count)) // seconds2
-        if count2 != 0:
-          if abbreviate:
-            s += ugettext(', %(number)d%(type)s') % {'number': count2, 'type': name2(count2)}
-          else:
-            s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
-    return s
+        s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
+  return s

+ 19 - 9
desktop/core/src/desktop/lib/export_csvxls.py

@@ -21,7 +21,10 @@ import pyExcelerator as xl
 import cStringIO
 import csv
 import logging
+
 from django.http import HttpResponse
+from django.utils.encoding import smart_str, force_unicode
+from desktop.lib import i18n
 
 LOG = logging.getLogger(__name__)
 XLS_SIZE_LIMIT = 200 * 1024 * 1024      # 200MB
@@ -87,19 +90,19 @@ def generator(header, data, formatter):
       pass
   yield formatter.fini_doc()
 
-def make_response(header, data, format, name):
+def make_response(header, data, format, name, encoding=None):
   """
   @param header List of strings to form the header
-  @param data An iterator of rows, where every
-  row is a list of strings
+  @param data An iterator of rows, where every row is a list of strings
   @param format Either "csv" or "xls"
   @param name Base name for output file
+  @param encoding Unicode encoding for data
   """
   if format == 'csv':
-    formatter = CSVformatter()
+    formatter = CSVformatter(encoding)
     mimetype = 'application/csv'
   elif format == 'xls':
-    formatter = XLSformatter()
+    formatter = XLSformatter(encoding)
     mimetype = 'application/xls'
   else:
     raise Exception("Unknown format: %s" % (format,))
@@ -109,10 +112,11 @@ def make_response(header, data, format, name):
   return resp
 
 class CSVformatter(Formatter):
-  def __init__(self):
+  def __init__(self, encoding=None):
     super(CSVformatter, self).__init__()
     dialect = csv.excel()
     dialect.quoting = csv.QUOTE_ALL
+    self._encoding = encoding or i18n.get_site_encoding()
     self._csv_writer = csv.writer(self, dialect=dialect)
     self._line = None
 
@@ -127,6 +131,7 @@ class CSVformatter(Formatter):
 
   def format_row(self, row):
     # writerow will call our write() method
+    row = [smart_str(cell, self._encoding, strings_only=True, errors='replace') for cell in row]
     self._csv_writer.writerow(row)
     return self._line
 
@@ -135,8 +140,9 @@ class CSVformatter(Formatter):
 
 class XLSformatter(Formatter):
   """Unfortunately, pyExcelerator can't stream."""
-  def __init__(self):
+  def __init__(self, encoding=None):
     super(XLSformatter, self).__init__()
+    self._encoding = encoding or i18n.get_site_encoding()
     self._book = xl.Workbook()
     self._sheet = self._book.add_sheet("Sheet 1")
     self._row = 0
@@ -145,15 +151,19 @@ class XLSformatter(Formatter):
   def init_doc(self):
     return ''
 
+  def _decode_cell(self, cell):
+    """PyExcelerator happily takes unicode. So first decode binary data."""
+    return force_unicode(cell, self._encoding, strings_only=True, errors='replace')
+
   def format_header(self, header):
     for i, cell in enumerate(header):
-      self._sheet.write(self._row, i, cell)
+      self._sheet.write(self._row, i, self._decode_cell(cell))
     self._row += 1
     return ''
 
   def format_row(self, row):
     for i, cell in enumerate(row):
-      self._sheet.write(self._row, i, cell)
+      self._sheet.write(self._row, i, self._decode_cell(cell))
       self._limit_size(cell)
     self._row += 1
     return ''

+ 55 - 0
docs/dev/i18n.rst

@@ -0,0 +1,55 @@
+.. -*- coding: utf-8 -*-
+
+=============================================
+Status of Internationalization (i18n) Support
+=============================================
+
+.. Note::
+    This documents the design of i18n in HUE, as well as the various degrees of
+    i18n support in different HUE applications.
+
+
+---------------
+Design Overview
+---------------
+
+Thanks to Django, HUE always receives unicode data from the browser. Data
+operations within the boundary of HUE should be done in unicode as much as
+possible. Data encoding occurs when the data exits HUE, in one of the following
+ways:
+
+Model Storage
+    These are data to go into a Django model, such as user firstname, lastname,
+    Beeswax saved query. Since the Django model layer handles unicode data, HUE
+    does not need to do any encoding.
+
+Hadoop Metadata
+    These include namespace metadata, Hive Metastore data, and so on. In
+    general, Hadoop metadata (where they support international characters) all
+    seem to be in UTF-8.
+
+Hadoop Data
+    This category includes file data, Hive table contents, HBase contents, etc.
+    The goal is to support custom encodings as specified by the user.
+    Eventually, on a view by view basis, the user may select the correct data
+    encoding. Meanwhile, for applications and views that do not yet expose such
+    flexibility, they may use the ``DEFAULT_SITE_ENCODING`` configuration
+    variable to interpret binary Hadoop Data.
+
+
+---------------------
+Implementation Status
+---------------------
+
+HDFS
+====
+HDFS namespace is in UTF-8. So we always encode to (and decode from) UTF-8.
+The ``libs.hadoop.fs.hadoopfs`` module speaks UTF-8 and handles this for us.
+The File Browser correctly handles HDFS namespace. *It does not yet expose file
+contents encoding in the UI*.
+
+Hive
+====
+Hive metadata (such as table comments) is also in UTF-8. The
+``beeswax.db_utils`` module converts between HUE (unicode) and Hive (UTF-8).
+Currently, Hive seems to only work with UTF-8 data.