Просмотр исходного кода

HUE-8737 [core] Futurize desktop/libs/indexer for Python 3.5

Ying Chen 6 лет назад
Родитель
Сommit
11ff9648e1
25 измененных файлов с 173 добавлено и 96 удалено
  1. 7 5
      desktop/libs/indexer/src/indexer/api.py
  2. 35 24
      desktop/libs/indexer/src/indexer/api3.py
  3. 1 0
      desktop/libs/indexer/src/indexer/argument.py
  4. 7 1
      desktop/libs/indexer/src/indexer/conf.py
  5. 12 11
      desktop/libs/indexer/src/indexer/controller.py
  6. 2 1
      desktop/libs/indexer/src/indexer/fields.py
  7. 21 11
      desktop/libs/indexer/src/indexer/file_format.py
  8. 2 1
      desktop/libs/indexer/src/indexer/indexers/envelope.py
  9. 2 1
      desktop/libs/indexer/src/indexer/indexers/envelope_tests.py
  10. 1 0
      desktop/libs/indexer/src/indexer/indexers/flume.py
  11. 1 0
      desktop/libs/indexer/src/indexer/indexers/morphline.py
  12. 2 1
      desktop/libs/indexer/src/indexer/indexers/morphline_operations.py
  13. 14 5
      desktop/libs/indexer/src/indexer/indexers/morphline_tests.py
  14. 3 3
      desktop/libs/indexer/src/indexer/indexers/rdbms.py
  15. 12 4
      desktop/libs/indexer/src/indexer/indexers/sql.py
  16. 3 2
      desktop/libs/indexer/src/indexer/indexers/sql_tests.py
  17. 4 2
      desktop/libs/indexer/src/indexer/management/commands/indexer_setup.py
  18. 2 1
      desktop/libs/indexer/src/indexer/rdbms_indexer_tests.py
  19. 1 1
      desktop/libs/indexer/src/indexer/solr_api.py
  20. 7 6
      desktop/libs/indexer/src/indexer/solr_client.py
  21. 3 2
      desktop/libs/indexer/src/indexer/solr_client_tests.py
  22. 11 4
      desktop/libs/indexer/src/indexer/test_utils.py
  23. 2 1
      desktop/libs/indexer/src/indexer/tests.py
  24. 17 8
      desktop/libs/indexer/src/indexer/utils.py
  25. 1 1
      desktop/libs/indexer/src/indexer/views.py

+ 7 - 5
desktop/libs/indexer/src/indexer/api.py

@@ -15,6 +15,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from builtins import next
+from builtins import zip
 import itertools
 import json
 import logging
@@ -50,11 +52,11 @@ def parse_fields(request):
         file_obj = request.fs.open(request.POST.get('path'))
         field_list = field_values_from_separated_file(file_obj, delimiter, quote)
         row = next(field_list)
-        field_names = row.keys()
-        field_types = get_field_types((row.values() for row in itertools.chain([row], field_list)), iterations=51)
+        field_names = list(row.keys())
+        field_types = get_field_types((list(row.values()) for row in itertools.chain([row], field_list)), iterations=51)
         file_obj.close()
 
-        result['data'] = zip(field_names, field_types)
+        result['data'] = list(zip(field_names, field_types))
         result['status'] = 0
       elif content_type == 'morphlines':
         morphlines = json.loads(request.POST.get('morphlines'))
@@ -79,7 +81,7 @@ def parse_fields(request):
       else:
         result['status'] = 1
         result['message'] = _('Type %s not supported.') % content_type
-    except Exception, e:
+    except Exception as e:
       LOG.exception(e.message)
       result['message'] = e.message
   else:
@@ -165,7 +167,7 @@ def collections_create(request):
 
       response['status'] = 0
       response['message'] = _('Collection created!')
-    except Exception, e:
+    except Exception as e:
       LOG.error(e)
       raise
   else:

+ 35 - 24
desktop/libs/indexer/src/indexer/api3.py

@@ -15,13 +15,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from __future__ import print_function
+from future import standard_library
+standard_library.install_aliases()
+from builtins import oct
+from builtins import zip
+from past.builtins import basestring
 import chardet
 import json
 import logging
-import urllib
-import StringIO
-
-from urlparse import urlparse
+import urllib.request, urllib.error
+import sys
 
 from django.urls import reverse
 from django.utils.translation import ugettext as _
@@ -51,23 +55,30 @@ from indexer.indexers.sql import SQLIndexer
 from indexer.solr_client import SolrClient, MAX_UPLOAD_SIZE
 from indexer.indexers.flume import FlumeIndexer
 
+if sys.version_info[0] > 2:
+  from io import string_io as string_io
+  from urllib.parse import urlparse, unquote as urllib_unquote
+else:
+  from StringIO import StringIO as string_io
+  from urllib import unquote as urllib_unquote
+  from urlparse import urlparse
 
 LOG = logging.getLogger(__name__)
 
 
 try:
   from beeswax.server import dbms
-except ImportError, e:
+except ImportError as e:
   LOG.warn('Hive and HiveServer2 interfaces are not enabled')
 
 try:
   from filebrowser.views import detect_parquet
-except ImportError, e:
+except ImportError as e:
   LOG.warn('File Browser interface is not enabled')
 
 try:
   from search.conf import SOLR_URL
-except ImportError, e:
+except ImportError as e:
   LOG.warn('Solr Search interface is not enabled')
 
 
@@ -82,7 +93,7 @@ def _escape_white_space_characters(s, inverse = False):
   to = 1 if inverse else 0
   from_ = 0 if inverse else 1
 
-  for pair in MAPPINGS.iteritems():
+  for pair in MAPPINGS.items():
     s = s.replace(pair[to], pair[from_]).encode('utf-8')
 
   return s
@@ -99,7 +110,7 @@ def guess_format(request):
   file_format = json.loads(request.POST.get('fileFormat', '{}'))
 
   if file_format['inputFormat'] == 'file':
-    path = urllib.unquote(file_format["path"])
+    path = urllib_unquote(file_format["path"])
     indexer = MorphlineIndexer(request.user, request.fs)
     if not request.fs.isfile(path):
       raise PopupException(_('Path %(path)s is not a file') % file_format)
@@ -116,7 +127,7 @@ def guess_format(request):
     db = dbms.get(request.user)
     try:
       table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
-    except Exception, e:
+    except Exception as e:
       raise PopupException(e.message if hasattr(e, 'message') and e.message else e)
     storage = {}
     for delim in table_metadata.storage_details:
@@ -163,7 +174,7 @@ def guess_field_types(request):
 
   if file_format['inputFormat'] == 'file':
     indexer = MorphlineIndexer(request.user, request.fs)
-    path = urllib.unquote(file_format["path"])
+    path = urllib_unquote(file_format["path"])
     stream = request.fs.open(path)
     encoding = chardet.detect(stream.read(10000)).get('encoding')
     stream.seek(0)
@@ -209,7 +220,7 @@ def guess_field_types(request):
       snippet['query'] = snippet['statement']
       try:
         sample = db.fetch_result(notebook, snippet, 4, start_over=True)['rows'][:4]
-      except Exception, e:
+      except Exception as e:
         LOG.warn('Skipping sample data as query handle might be expired: %s' % e)
         sample = [[], [], [], [], []]
       columns = db.autocomplete(snippet=snippet, database='', table='')
@@ -261,7 +272,7 @@ def guess_field_types(request):
         'kafkaFieldNames': ','.join(kafkaFieldNames),
         'data': '\n'.join([','.join(['...'] * len(kafkaFieldTypes))] * 5)
       }
-      stream = StringIO.StringIO()
+      stream = string_io()
       stream.write(data)
 
       _convert_format(file_format["format"], inverse=True)
@@ -274,7 +285,7 @@ def guess_field_types(request):
         },
         "format": file_format['format']
       })
-      type_mapping = dict(zip(kafkaFieldNames, kafkaFieldTypes))
+      type_mapping = dict(list(zip(kafkaFieldNames, kafkaFieldTypes)))
 
       for col in format_['columns']:
         col['keyType'] = type_mapping[col['name']]
@@ -317,15 +328,15 @@ def guess_field_types(request):
         } for column in sf.restful('sobjects/%(streamObject)s/describe/' % file_format)['fields']
       ]
       query = 'SELECT %s FROM %s LIMIT 4' % (', '.join([col['name'] for col in table_metadata]), file_format['streamObject'])
-      print query
+      print(query)
 
       try:
         records = sf.query_all(query)
-      except SalesforceRefusedRequest, e:
+      except SalesforceRefusedRequest as e:
         raise PopupException(message=str(e))
 
       format_ = {
-        "sample": [row.values()[1:] for row in records['records']],
+        "sample": [list(row.values())[1:] for row in records['records']],
         "columns": [
             Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')).to_dict()
             for col in table_metadata
@@ -349,7 +360,7 @@ def importer_submit(request):
 
   if source['inputFormat'] == 'file':
     if source['path']:
-      path = urllib.unquote(source['path'])
+      path = urllib_unquote(source['path'])
       source['path'] = request.fs.netnormpath(path)
       parent_path = request.fs.parent_path(path)
       stats = request.fs.stats(parent_path)
@@ -357,7 +368,7 @@ def importer_submit(request):
       # Only for HDFS, import data and non-external table
       if split.scheme in ('', 'hdfs') and destination['importData'] and destination['useDefaultLocation'] and oct(stats["mode"])[-1] != '7' and not request.POST.get('show_command'):
         user_scratch_dir = request.fs.get_home_dir() + '/.scratchdir'
-        request.fs.do_as_user(request.user, request.fs.mkdir, user_scratch_dir, 00777)
+        request.fs.do_as_user(request.user, request.fs.mkdir, user_scratch_dir, 0o0777)
         request.fs.do_as_user(request.user, request.fs.rename, source['path'], user_scratch_dir)
         source['path'] = user_scratch_dir + '/' + source['path'].split('/')[-1]
 
@@ -406,7 +417,7 @@ def _small_indexing(user, fs, client, source, destination, index_name):
   errors = []
 
   if source['inputFormat'] not in ('manual', 'table', 'query_handle'):
-    path = urllib.unquote(source["path"])
+    path = urllib_unquote(source["path"])
     stats = fs.stats(path)
     if stats.size > MAX_UPLOAD_SIZE:
       raise PopupException(_('File size is too large to handle!'))
@@ -417,7 +428,7 @@ def _small_indexing(user, fs, client, source, destination, index_name):
   _create_solr_collection(user, fs, client, destination, index_name, kwargs)
 
   if source['inputFormat'] == 'file':
-    path = urllib.unquote(source["path"])
+    path = urllib_unquote(source["path"])
     data = fs.read(path, 0, MAX_UPLOAD_SIZE)
 
   if client.is_solr_six_or_more():
@@ -442,10 +453,10 @@ def _small_indexing(user, fs, client, source, destination, index_name):
     else:
       response = client.index(name=index_name, data=data, **kwargs)
       errors = [error.get('message', '') for error in response['responseHeader'].get('errors', [])]
-  except Exception, e:
+  except Exception as e:
     try:
       client.delete_index(index_name, keep_config=False)
-    except Exception, e2:
+    except Exception as e2:
       LOG.warn('Error while cleaning-up config of failed collection creation %s: %s' % (index_name, e2))
     raise e
 
@@ -531,7 +542,7 @@ def _large_indexing(request, file_format, collection_name, query=None, start_tim
   elif file_format['inputFormat'] == 'stream':
     return _envelope_job(request, file_format, destination, start_time=start_time, lib_path=lib_path)
   elif file_format['inputFormat'] == 'file':
-    input_path = '${nameNode}%s' % urllib.unquote(file_format["path"])
+    input_path = '${nameNode}%s' % urllib_unquote(file_format["path"])
   else:
     input_path = None
 

+ 1 - 0
desktop/libs/indexer/src/indexer/argument.py

@@ -14,6 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.import logging
 
+from builtins import object
 from django.utils.translation import ugettext as _
 
 

+ 7 - 1
desktop/libs/indexer/src/indexer/conf.py

@@ -15,9 +15,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from future import standard_library
+standard_library.install_aliases()
 import logging
 import os
-from urlparse import urlparse
+import sys
 
 from django.utils.translation import ugettext_lazy as _t
 
@@ -25,6 +27,10 @@ from desktop.lib.conf import Config
 from libsolr import conf as libsolr_conf
 from libzookeeper import conf as libzookeeper_conf
 
+if sys.version_info[0] > 2:
+  from urllib.parse import urlparse
+else:
+  from urlparse import urlparse
 
 LOG = logging.getLogger(__name__)
 

+ 12 - 11
desktop/libs/indexer/src/indexer/controller.py

@@ -16,6 +16,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from builtins import object
 import json
 import logging
 import numbers
@@ -51,7 +52,7 @@ class CollectionManagerController(object):
     self.user = user
 
   def _format_flags(self, fields):
-    for name, properties in fields.items():
+    for name, properties in list(fields.items()):
       for (code, value) in FLAGS:
         if code in properties['flags']:
           properties[value] = True  # Add a new key-value boolean for the decoded flag
@@ -88,7 +89,7 @@ class CollectionManagerController(object):
       solr_cores = api.cores()
       for name in solr_cores:
         solr_cores[name]['isCoreOnly'] = True
-    except Exception, e:
+    except Exception as e:
       LOG.warn('No Zookeeper servlet running on Solr server: %s' % e)
 
     solr_cores.update(solr_collections)
@@ -102,7 +103,7 @@ class CollectionManagerController(object):
       autocomplete['collections'] = api.collections2()
       autocomplete['configs'] = api.configs()
 
-    except Exception, e:
+    except Exception as e:
       LOG.warn('No Zookeeper servlet running on Solr server: %s' % e)
 
     return autocomplete
@@ -113,7 +114,7 @@ class CollectionManagerController(object):
     try:
       field_data = api.fields(collection_or_core_name)
       fields = self._format_flags(field_data['schema']['fields'])
-    except Exception, e:
+    except Exception as e:
       LOG.warn('/luke call did not succeed: %s' % e)
       try:
         fields = api.schema_fields(collection_or_core_name)
@@ -159,7 +160,7 @@ class CollectionManagerController(object):
         try:
           zc.copy_path(root_node, config_root_path)
 
-        except Exception, e:
+        except Exception as e:
           zc.delete_path(root_node)
           raise PopupException(_('Error in copying Solr configurations: %s') % e)
       finally:
@@ -171,7 +172,7 @@ class CollectionManagerController(object):
         # Delete instance directory if we couldn't create a collection.
         try:
           zc.delete_path(root_node)
-        except Exception, e:
+        except Exception as e:
           raise PopupException(_('Error in deleting Solr configurations.'), detail=e)
         raise PopupException(_('Could not create collection. Check error logs for more info.'))
 
@@ -211,7 +212,7 @@ class CollectionManagerController(object):
         root_node = '%s/%s' % (ZK_SOLR_CONFIG_NAMESPACE, name)
         with ZookeeperClient(hosts=client.get_zookeeper_host(), read_only=False) as zc:
           zc.delete_path(root_node)
-      except Exception, e:
+      except Exception as e:
         # Re-create collection so that we don't have an orphan config
         api.add_collection(name)
         raise PopupException(_('Error in deleting Solr configurations.'), detail=e)
@@ -225,12 +226,12 @@ class CollectionManagerController(object):
     api = SolrApi(SOLR_URL.get(), self.user, SECURITY_ENABLED.get())
     # Create only new fields
     # Fields that already exist, do not overwrite since there is no way to do that, currently.
-    old_field_names = api.fields(name)['schema']['fields'].keys()
-    new_fields = filter(lambda field: field['name'] not in old_field_names, fields)
+    old_field_names = list(api.fields(name)['schema']['fields'].keys())
+    new_fields = [field for field in fields if field['name'] not in old_field_names]
     new_fields_filtered = []
     for field in new_fields:
       new_field = {}
-      for attribute in filter(lambda attribute: attribute in field, ALLOWED_FIELD_ATTRIBUTES):
+      for attribute in [attribute for attribute in ALLOWED_FIELD_ATTRIBUTES if attribute in field]:
         new_field[attribute] = field[attribute]
       new_fields_filtered.append(new_field)
 
@@ -294,7 +295,7 @@ class CollectionManagerController(object):
             raise PopupException(_('Could not index the data. Check error logs for more info.'))
 
         row_count += len(dataset)
-    except Exception, e:
+    except Exception as e:
       raise PopupException(_('Could not update index: %s') % e)
 
     return row_count

+ 2 - 1
desktop/libs/indexer/src/indexer/fields.py

@@ -14,10 +14,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.import logging
 
+from builtins import object
 import re
 
 
-class FieldType():
+class FieldType(object):
 
   def __init__(self, name, regex, heuristic_regex=None):
     self._name = name

+ 21 - 11
desktop/libs/indexer/src/indexer/file_format.py

@@ -13,12 +13,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.import logging
+from future import standard_library
+standard_library.install_aliases()
+from builtins import range
+from past.builtins import basestring
+from builtins import object
 import csv
 import gzip
 import operator
 import itertools
 import logging
-import StringIO
+import sys
 
 from django.utils.translation import ugettext as _
 
@@ -29,6 +34,11 @@ from indexer.conf import ENABLE_SCALABLE_INDEXER
 from indexer.fields import Field, guess_field_type_from_samples
 from indexer.indexers.morphline_operations import get_operator
 
+if sys.version_info[0] > 2:
+  from io import StringIO as string_io
+  from past.builtins import long
+else:
+  from StringIO import StringIO as string_io
 
 LOG = logging.getLogger(__name__)
 
@@ -347,9 +357,9 @@ class CSVFormat(FileFormat):
     # ******Changed from********
     # rdr = reader(StringIO(sample), self.sniff(sample))
     from _csv import reader
-    rdr = reader(StringIO.StringIO(sample), dialect)
+    rdr = reader(string_io(sample), dialect)
 
-    header = rdr.next()  # assume first row is header
+    header = next(rdr)  # assume first row is header
 
     columns = len(header)
     columnTypes = {}
@@ -365,7 +375,7 @@ class CSVFormat(FileFormat):
       if len(row) != columns:
         continue  # skip rows that have irregular number of columns
 
-      for col in columnTypes.keys():
+      for col in list(columnTypes.keys()):
 
         for thisType in [int, long, float, complex]:
           try:
@@ -392,7 +402,7 @@ class CSVFormat(FileFormat):
     # finally, compare results against first row and "vote"
     # on whether it's a header
     hasHeader = 0
-    for col, colType in columnTypes.items():
+    for col, colType in list(columnTypes.items()):
       if type(colType) == type(0):  # it's a length
         if len(header[col]) != colType:
           hasHeader += 1
@@ -434,7 +444,7 @@ class CSVFormat(FileFormat):
   def _guess_from_file_stream(cls, file_stream):
     for sample_data, sample_lines in cls._get_sample(file_stream):
       try:
-        lines = itertools.islice(StringIO.StringIO(sample_data), IMPORT_PEEK_NLINES)
+        lines = itertools.islice(string_io(sample_data), IMPORT_PEEK_NLINES)
         sample_data_lines = ''
         for line in lines:
           sample_data_lines += line
@@ -525,7 +535,7 @@ class CSVFormat(FileFormat):
       counts[num_columns] += 1
 
     if counts:
-      num_columns_guess = max(counts.iteritems(), key=operator.itemgetter(1))[0]
+      num_columns_guess = max(iter(counts.items()), key=operator.itemgetter(1))[0]
     else:
       num_columns_guess = 0
     return num_columns_guess
@@ -548,12 +558,12 @@ class CSVFormat(FileFormat):
       sample = sample.replace('\n', '\\n')
       return csv.reader(sample.split(self.line_terminator), delimiter=self.delimiter, quotechar=self.quote_char)
     else:
-      return csv.reader(StringIO.StringIO(sample), delimiter=self.delimiter, quotechar=self.quote_char)
+      return csv.reader(string_io(sample), delimiter=self.delimiter, quotechar=self.quote_char)
 
   def _guess_field_names(self, sample):
     reader = self._get_sample_reader(sample)
 
-    first_row = reader.next()
+    first_row = next(reader)
 
     if self._has_header:
       header = []
@@ -599,7 +609,7 @@ class GzipFileReader(object):
     except IOError:
       return None, None
     try:
-      return data, itertools.islice(csv.reader(StringIO.StringIO(data)), IMPORT_PEEK_NLINES)
+      return data, itertools.islice(csv.reader(string_io(data)), IMPORT_PEEK_NLINES)
     except UnicodeError:
       return None, None
 
@@ -611,7 +621,7 @@ class TextFileReader(object):
   def readlines(fileobj, encoding):
     try:
       data = fileobj.read(IMPORT_PEEK_SIZE)
-      return data, itertools.islice(csv.reader(StringIO.StringIO(data)), IMPORT_PEEK_NLINES)
+      return data, itertools.islice(csv.reader(string_io(data)), IMPORT_PEEK_NLINES)
     except UnicodeError:
       return None, None
 

+ 2 - 1
desktop/libs/indexer/src/indexer/indexers/envelope.py

@@ -14,6 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.import logging
 
+from builtins import object
 import logging
 import os
 
@@ -48,7 +49,7 @@ class EnvelopeIndexer(object):
     # Create workspace on hdfs
     self.fs.do_as_user(self.username, self.fs.mkdir, hdfs_workspace_path)
 
-    for config_name, config_content in configs.iteritems():
+    for config_name, config_content in configs.items():
       hdfs_config_path = os.path.join(hdfs_workspace_path, config_name)
       self.fs.do_as_user(self.username, self.fs.create, hdfs_config_path, data=config_content)
 

+ 2 - 1
desktop/libs/indexer/src/indexer/indexers/envelope_tests.py

@@ -16,6 +16,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from builtins import object
 from django.contrib.auth.models import User
 
 from nose.plugins.skip import SkipTest
@@ -24,7 +25,7 @@ from nose.tools import assert_equal, assert_true
 from indexer.indexers.envelope import EnvelopeIndexer
 
 
-class TestEnvelope():
+class TestEnvelope(object):
 
   def setUp(self):
     raise SkipTest

+ 1 - 0
desktop/libs/indexer/src/indexer/indexers/flume.py

@@ -14,6 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.import logging
 
+from builtins import object
 import logging
 import os
 

+ 1 - 0
desktop/libs/indexer/src/indexer/indexers/morphline.py

@@ -14,6 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.import logging
 
+from builtins import object
 import logging
 import os
 

+ 2 - 1
desktop/libs/indexer/src/indexer/indexers/morphline_operations.py

@@ -14,12 +14,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.import logging
 
+from builtins import object
 from django.utils.translation import ugettext as _
 
 from indexer.argument import TextArgument, CheckboxArgument, MappingArgument
 
 
-class Operator():
+class Operator(object):
 
   def __init__(self, name, args, output_type):
     self._name = name

+ 14 - 5
desktop/libs/indexer/src/indexer/indexers/morphline_tests.py

@@ -14,10 +14,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.from nose.tools import assert_equal
 
+from future import standard_library
+standard_library.install_aliases()
+from builtins import zip
+from past.builtins import basestring
+from builtins import object
 from copy import deepcopy
 
-import StringIO
 import logging
+import sys
 
 from nose.tools import assert_equal, assert_true
 from nose.plugins.attrib import attr
@@ -38,11 +43,15 @@ from indexer.indexers.morphline import MorphlineIndexer
 from indexer.solr_client import SolrClient
 from indexer.solr_client_tests import MockSolrCdhCloudHdfsApi
 
+if sys.version_info[0] > 2:
+  from io import StringIO as string_io
+else:
+  from StringIO import StringIO as string_io
 
 LOG = logging.getLogger(__name__)
 
 
-class TestIndexer():
+class TestIndexer(object):
 
   simpleCSVString = """id,Rating,Location,Name,Time
 1,5,San Francisco,Good Restaurant,8:30pm
@@ -107,7 +116,7 @@ class TestIndexer():
     self.finish()
 
   def test_guess_csv_format(self):
-    stream = StringIO.StringIO(TestIndexer.simpleCSVString)
+    stream = string_io(TestIndexer.simpleCSVString)
     indexer = MorphlineIndexer("test", solr_client=self.solr_client)
 
     guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
@@ -127,7 +136,7 @@ class TestIndexer():
 
   def test_guess_format_invalid_csv_format(self):
     indexer = MorphlineIndexer("test", solr_client=self.solr_client)
-    stream = StringIO.StringIO(TestIndexer.simpleCSVString)
+    stream = string_io(TestIndexer.simpleCSVString)
 
     guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
 
@@ -305,7 +314,7 @@ class TestIndexer():
     assert_true(isinstance(morphline, basestring))
 
 
-class MockedRequest():
+class MockedRequest(object):
   def __init__(self, user, fs, jt):
     self.user = user
     self.fs = fs

+ 3 - 3
desktop/libs/indexer/src/indexer/indexers/rdbms.py

@@ -52,7 +52,7 @@ def get_db_component(request):
 
     format_['data'] = [{'name': element, 'value': element} for element in data]
     format_['status'] = 0
-  except Exception, e:
+  except Exception as e:
     message = _('Error accessing the database: %s') % e
     LOG.warn(message)
     format_['message'] = message
@@ -107,7 +107,7 @@ def jdbc_db_list(request):
 def get_drivers(request):
   format_ = {'data': [], 'status': 1}
   servers_dict = dict(get_server_choices())
-  format_['data'] = [{'value': key, 'name': servers_dict[key]} for key in servers_dict.keys()]
+  format_['data'] = [{'value': key, 'name': servers_dict[key]} for key in list(servers_dict.keys())]
   format_['data'].append({'value': 'jdbc', 'name': 'JDBC'})
 #   format_['data'].append({'value': 'sqlalchemy', 'name': 'SQL Alchemy'})
   format_['status'] = 0
@@ -172,7 +172,7 @@ def run_sqoop(request, source, destination, start_time):
       url = rdbms_host
 
     password_file_path = request.fs.join(request.fs.get_home_dir() + '/sqoop/', uuid.uuid4().hex + '.password')
-    request.fs.do_as_user(request.user, request.fs.create, password_file_path, overwrite=True, permission=0700, data=smart_str(rdbms_password))
+    request.fs.do_as_user(request.user, request.fs.create, password_file_path, overwrite=True, permission=0o700, data=smart_str(rdbms_password))
 
     lib_files = []
     if destination['sqoopJobLibPaths']:

+ 12 - 4
desktop/libs/indexer/src/indexer/indexers/sql.py

@@ -14,8 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.import logging
 
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
 import logging
-import urllib
+import sys
+import urllib.request, urllib.error
 
 from django.contrib.auth.models import User
 from django.urls import reverse
@@ -24,13 +28,17 @@ from django.utils.translation import ugettext as _
 from desktop.lib import django_mako
 from notebook.models import make_notebook
 
+if sys.version_info[0] > 2:
+  from urllib.parse import unquote as urllib_unquote
+else:
+  from urllib import unquote as urllib_unquote
 
 LOG = logging.getLogger(__name__)
 
 
 try:
   from beeswax.server import dbms
-except ImportError, e:
+except ImportError as e:
   LOG.warn('Hive and HiveServer2 interfaces are not enabled')
 
 
@@ -56,9 +64,9 @@ class SQLIndexer(object):
     kudu_partition_columns = destination['kuduPartitionColumns']
     comment = destination['description']
 
-    source_path = urllib.unquote(source['path'])
+    source_path = urllib_unquote(source['path'])
     external = not destination['useDefaultLocation']
-    external_path = urllib.unquote(destination['nonDefaultLocation'])
+    external_path = urllib_unquote(destination['nonDefaultLocation'])
 
     load_data = destination['importData']
     skip_header = destination['hasHeader']

+ 3 - 2
desktop/libs/indexer/src/indexer/indexers/sql_tests.py

@@ -16,6 +16,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from builtins import object
 import json
 
 from django.contrib.auth.models import User
@@ -26,7 +27,7 @@ from desktop.lib.django_test_util import make_logged_in_client
 from indexer.indexers.sql import SQLIndexer
 
 
-class MockRequest():
+class MockRequest(object):
   def __init__(self, fs=None, user=None):
     self.fs = fs if fs is not None else MockFs()
     if user is None:
@@ -36,7 +37,7 @@ class MockRequest():
       self.user = user
 
 
-class MockFs():
+class MockFs(object):
   def __init__(self, path=None):
     self.path = {'isDir': False, 'split': ('/A', 'a'), 'listdir': ['/A']} if path is None else path
 

+ 4 - 2
desktop/libs/indexer/src/indexer/management/commands/indexer_setup.py

@@ -15,6 +15,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from builtins import next
+from builtins import zip
 import itertools
 import logging
 import os
@@ -107,6 +109,6 @@ class Command(BaseCommand):
     with open(path) as fh:
       field_generator = utils.field_values_from_separated_file(fh, separator, quote_character)
       row = next(field_generator)
-      field_names = row.keys()
-      field_types = utils.get_field_types((row.values() for row in itertools.chain([row], field_generator)), iterations=51)
+      field_names = list(row.keys())
+      field_types = utils.get_field_types((list(row.values()) for row in itertools.chain([row], field_generator)), iterations=51)
       return [{'name': field[0], 'type': field[0] in fieldtypes and fieldtypes[field[0]] or field[1]} for field in zip(field_names, field_types)]

+ 2 - 1
desktop/libs/indexer/src/indexer/rdbms_indexer_tests.py

@@ -16,6 +16,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from builtins import object
 import logging
 
 from django.contrib.auth.models import User
@@ -33,7 +34,7 @@ from librdbms.server import dbms as rdbms
 
 LOG = logging.getLogger(__name__)
 
-class TestRdbmsIndexer():
+class TestRdbmsIndexer(object):
 
   @classmethod
   def setup_class(cls):

+ 1 - 1
desktop/libs/indexer/src/indexer/solr_api.py

@@ -37,7 +37,7 @@ def api_error_handler(func):
 
     try:
       return func(*args, **kwargs)
-    except Exception, e:
+    except Exception as e:
       LOG.exception('Error running %s' % func.__name__)
       response['status'] = -1
       response['message'] = smart_unicode(e)

+ 7 - 6
desktop/libs/indexer/src/indexer/solr_client.py

@@ -16,6 +16,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from builtins import object
 import logging
 import json
 import os
@@ -84,7 +85,7 @@ class SolrClient(object):
         for name in solr_cores:
           indexes.append({'name': name, 'type': 'core', 'collections': []})
 
-    except Exception, e:
+    except Exception as e:
       msg = _('Solr server could not be contacted properly: %s') % e
       LOG.warn(msg)
       raise PopupException(msg, detail=smart_str(e))
@@ -162,7 +163,7 @@ class SolrClient(object):
     try:
       self.api.get_schema(name)
       return True
-    except Exception, e:
+    except Exception as e:
       LOG.info('Check if index %s existed failed: %s' % (name, e))
       return False
 
@@ -183,7 +184,7 @@ class SolrClient(object):
             root_node = '%s/%s' % (ZK_SOLR_CONFIG_NAMESPACE, name)
             with ZookeeperClient(hosts=self.get_zookeeper_host(), read_only=False) as zc:
               zc.delete_path(root_node)
-          except Exception, e:
+          except Exception as e:
             # Re-create collection so that we don't have an orphan config
             self.api.add_collection(name)
             raise PopupException(_('Error in deleting Solr configurations.'), detail=e)
@@ -281,7 +282,7 @@ class SolrClient(object):
           zc.copy_path(root_node, config_root_path)
         else:
           LOG.warn('Config %s already existing.' % name)
-      except Exception, e:
+      except Exception as e:
         if zc.path_exists(root_node):
           zc.delete_path(root_node)
         raise PopupException(_('Could not create index: %s') % e)
@@ -306,7 +307,7 @@ class SolrClient(object):
 
       if not self.api.create_core(name, instancedir):
         raise Exception('Failed to create core: %s' % name)
-    except Exception, e:
+    except Exception as e:
       raise PopupException(_('Could not create index. Check error logs for more info.'), detail=e)
     finally:
       shutil.rmtree(instancedir)
@@ -361,6 +362,6 @@ class SolrClient(object):
       fields = self._format_flags(field_data['schema']['fields'])
       uniquekey = self.api.uniquekey(index_name)
       return uniquekey, fields
-    except Exception, e:
+    except Exception as e:
       LOG.exception(e.message)
       raise SolrClientException(_("Error in getting schema information for index '%s'" % index_name))

Разница между файлами не показана из-за своего большого размера
+ 3 - 2
desktop/libs/indexer/src/indexer/solr_client_tests.py


+ 11 - 4
desktop/libs/indexer/src/indexer/test_utils.py

@@ -16,24 +16,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import StringIO
+from future import standard_library
+standard_library.install_aliases()
+import sys
 
 from nose.tools import assert_equal
 
 from indexer.utils import field_values_from_separated_file
 
 
+if sys.version_info[0] > 2:
+  from io import StringIO as string_io
+else:
+  from StringIO import StringIO as string_io
+
 def test_get_ensemble():
   # Non ascii
-  data = StringIO.StringIO('fieldA\nrel=""nofollow"">Twitter for Péché')
+  data = string_io('fieldA\nrel=""nofollow"">Twitter for Péché')
   result = list(field_values_from_separated_file(data, delimiter='\t', quote_character='"'))
   assert_equal(u'rel=""nofollow"">Twitter for Péché', result[0]['fieldA'])
 
-  data = StringIO.StringIO('fieldA\nrel=""nofollow"">Twitter for BlackBerry®')
+  data = string_io('fieldA\nrel=""nofollow"">Twitter for BlackBerry®')
   result = list(field_values_from_separated_file(data, delimiter='\t', quote_character='"'))
   assert_equal(u'rel=""nofollow"">Twitter for BlackBerry®', result[0]['fieldA'])
 
   # Bad binary
-  data = StringIO.StringIO('fieldA\naaa\x80\x02\x03')
+  data = string_io('fieldA\naaa\x80\x02\x03')
   result = list(field_values_from_separated_file(data, delimiter='\t', quote_character='"'))
   assert_equal(u'aaa\x02\x03', result[0]['fieldA'])

+ 2 - 1
desktop/libs/indexer/src/indexer/tests.py

@@ -15,6 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from builtins import object
 import json
 
 from nose.plugins.skip import SkipTest
@@ -54,7 +55,7 @@ def test_get_ensemble():
       clear()
 
 
-class TestIndexerWithSolr:
+class TestIndexerWithSolr(object):
 
   @classmethod
   def setup_class(cls):

+ 17 - 8
desktop/libs/indexer/src/indexer/utils.py

@@ -16,13 +16,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from future import standard_library
+standard_library.install_aliases()
+from builtins import next
+from builtins import range
+from builtins import object
 import csv
 import logging
 import os
 import pytz
 import re
 import shutil
-import StringIO
+import sys
 import tempfile
 import uuid
 
@@ -36,6 +41,10 @@ from desktop.lib.i18n import force_unicode, smart_str
 from indexer import conf
 from indexer.models import DATE_FIELD_TYPES, TEXT_FIELD_TYPES, INTEGER_FIELD_TYPES, DECIMAL_FIELD_TYPES, BOOLEAN_FIELD_TYPES
 
+if sys.version_info[0] > 2:
+  from io import StringIO as string_io
+else:
+  from StringIO import StringIO as string_io
 
 LOG = logging.getLogger(__name__)
 TIMESTAMP_PATTERN = '\[([\w\d\s\-\/\:\+]*?)\]'
@@ -255,15 +264,15 @@ def field_values_from_separated_file(fh, delimiter, quote_character, fields=None
         continue
       else:
         if headers is None:
-          csvfile = StringIO.StringIO(content[:last_newline])
+          csvfile = string_io(content[:last_newline])
         else:
-          csvfile = StringIO.StringIO('\n' + content[:last_newline])
+          csvfile = string_io('\n' + content[:last_newline])
         content = content[last_newline + 1:] + next_chunk
     else:
       if headers is None:
-        csvfile = StringIO.StringIO(content)
+        csvfile = string_io(content)
       else:
-        csvfile = StringIO.StringIO('\n' + content)
+        csvfile = string_io('\n' + content)
       content = fh.read()
 
     # First line is headers
@@ -276,7 +285,7 @@ def field_values_from_separated_file(fh, delimiter, quote_character, fields=None
 
     remove_keys = None
     for row in reader:
-      row = dict([(force_unicode(k), force_unicode(v, errors='ignore')) for k, v in row.iteritems()]) # Get rid of invalid binary chars and convert to unicode from DictReader
+      row = dict([(force_unicode(k), force_unicode(v, errors='ignore')) for k, v in row.items()]) # Get rid of invalid binary chars and convert to unicode from DictReader
 
       # Remove keys that aren't in collection
       if remove_keys is None:
@@ -334,12 +343,12 @@ def field_values_from_log(fh, fields=[ {'name': 'message', 'type': 'text_general
     message_key = 'message'
   else:
     try:
-      timestamp_key = next(iter(filter(lambda field: field['type'] in DATE_FIELD_TYPES, fields)))['name']
+      timestamp_key = next(iter([field for field in fields if field['type'] in DATE_FIELD_TYPES]))['name']
     except:
       LOG.exception('failed to get timestamp key')
       timestamp_key = None
     try:
-      message_key = next(iter(filter(lambda field: field['type'] in TEXT_FIELD_TYPES, fields)))['name']
+      message_key = next(iter([field for field in fields if field['type'] in TEXT_FIELD_TYPES]))['name']
     except:
       LOG.exception('failed to get message key')
       message_key = None

+ 1 - 1
desktop/libs/indexer/src/indexer/views.py

@@ -133,7 +133,7 @@ def install_examples(request, is_redirect=False):
       data = request.POST.get('data')
       indexer_setup.Command().handle(data=data)
       result['status'] = 0
-    except Exception, e:
+    except Exception as e:
       LOG.exception(e)
       result['message'] = str(e)
 

Некоторые файлы не были показаны из-за большого количества измененных файлов