浏览代码

[indexer] Improve data upload

Abraham Elmahrek 11 年之前
父节点
当前提交
8f4049b5b5

+ 5 - 0
apps/search/src/search/models.py

@@ -370,6 +370,11 @@ class Collection(models.Model):
       properties_python['field_order'] = []
     if 'data_type' not in properties_python:
       properties_python['data_type'] = 'separated'
+    if properties_python['data_type'] == 'separated':
+      if 'separator' not in properties_python:
+        properties_python['separator'] = ','
+      if 'quote_character' not in properties_python:
+        properties_python['quote_character'] = '"'
     return properties_python
 
   def update_properties(self, post_data):

+ 10 - 4
desktop/libs/indexer/src/indexer/api.py

@@ -67,8 +67,9 @@ def parse_fields(request):
         quote = request.POST.get('quote', '"')
         file_obj = request.fs.open(request.POST.get('path'))
         field_list = field_values_from_separated_file(file_obj, delimiter, quote)
-        field_names = next(field_list)
-        field_types = get_field_types(next(field_list))
+        row = next(field_list)
+        field_names = row.keys()
+        field_types = get_field_types(row.values())
         file_obj.close()
 
         result['data'] = zip(field_names, field_types)
@@ -152,13 +153,18 @@ def collections_create(request):
     properties_dict = hue_collection.properties_dict
     properties_dict['data_type'] = request.POST.get('type')
     properties_dict['field_order'] = [field['name'] for field in collection.get('fields', [])]
+    if properties_dict['data_type'] == 'separated':
+      properties_dict['separator'] = request.POST.get('separator', ',')
+      properties_dict['quote_character'] = request.POST.get('quote', '"')
     hue_collection.properties = json.dumps(properties_dict)
     hue_collection.save()
 
     try:
       if request.POST.get('source') == 'file':
         # Index data
-        searcher.update_data_from_hdfs(request.fs, collection.get('name'), request.POST.get('path'), request.POST.get('type'))
+        searcher.update_data_from_hdfs(request.fs,
+                                       hue_collection,
+                                       request.POST.get('path'))
 
       elif request.POST.get('source') == 'hive':
         # Run a custom hive query and post data to collection
@@ -319,7 +325,7 @@ def collections_data(request, collection_or_core):
   if source == 'file':
     searcher = CollectionManagerController(request.user)
 
-    searcher.update_data_from_hdfs(request.fs, collection_or_core, request.POST.get('path'), hue_collection.data_type)
+    searcher.update_data_from_hdfs(request.fs, hue_collection, request.POST.get('path'))
 
     response['status'] = 0
     response['message'] = _('Collections updated!')

+ 24 - 16
desktop/libs/indexer/src/indexer/controller.py

@@ -59,19 +59,19 @@ class CollectionManagerController(object):
 
     return solr_collections
 
-  def get_fields(self, collection_or_core):
+  def get_fields(self, collection_or_core_name):
     try:
-      field_data = SolrApi(SOLR_URL.get(), self.user, SECURITY_ENABLED.get()).fields(collection_or_core)
+      field_data = SolrApi(SOLR_URL.get(), self.user, SECURITY_ENABLED.get()).fields(collection_or_core_name)
       fields = self._format_flags(field_data['schema']['fields'])
     except:
-      LOG.exception(_('Could not fetch fields for collection %s.') % collection_or_core)
-      raise PopupException(_('Could not fetch fields for collection %s. See logs for more info.') % collection_or_core)
+      LOG.exception(_('Could not fetch fields for collection %s.') % collection_or_core_name)
+      raise PopupException(_('Could not fetch fields for collection %s. See logs for more info.') % collection_or_core_name)
 
     try:
-      uniquekey = SolrApi(SOLR_URL.get(), self.user, SECURITY_ENABLED.get()).uniquekey(collection_or_core)
+      uniquekey = SolrApi(SOLR_URL.get(), self.user, SECURITY_ENABLED.get()).uniquekey(collection_or_core_name)
     except:
-      LOG.exception(_('Could not fetch unique key for collection %s.') % collection_or_core)
-      raise PopupException(_('Could not fetch unique key for collection %s. See logs for more info.') % collection_or_core)
+      LOG.exception(_('Could not fetch unique key for collection %s.') % collection_or_core_name)
+      raise PopupException(_('Could not fetch unique key for collection %s. See logs for more info.') % collection_or_core_name)
 
     return uniquekey, fields
 
@@ -152,7 +152,7 @@ class CollectionManagerController(object):
 
     api.add_fields(name, new_fields_filtered)
 
-  def update_data_from_hdfs(self, fs, collection_or_core, path, data_type='log', indexing_strategy='upload'):
+  def update_data_from_hdfs(self, fs, hue_collection, path, indexing_strategy='upload'):
     """
     Add hdfs path contents to index
     """
@@ -162,22 +162,30 @@ class CollectionManagerController(object):
       if stats.size > MAX_UPLOAD_SIZE:
         raise PopupException(_('File size is too large to handle!'))
       else:
+        # Get fields for filtering
+        unique_key, fields = self.get_fields(hue_collection.name)
+        fields = [{'name': field, 'type': fields[field]['type']} for field in fields]
+
+        properties_dict = hue_collection.properties_dict
+
         fh = fs.open(path)
-        if data_type == 'log':
+        if properties_dict['data_type'] == 'log':
           # Transform to JSON then update
-          data = json.dumps([value for value in utils.field_values_from_log(fh)])
+          data = json.dumps([value for value in utils.field_values_from_log(fh, fields)])
           content_type = 'json'
-        else:
+        elif properties_dict['data_type'] == 'separated':
           # 'data' first line should be headers.
-          data = fh.read()
-          content_type = 'csv'
+          data = json.dumps([value for value in utils.field_values_from_separated_file(fh, properties_dict['separator'], properties_dict['quote_character'], fields)])
+          content_type = 'json'
+        else:
+          raise PopupException(_('Could not update index. Unknown type %s') % properties_dict['data_type'])
         fh.close()
-      if not api.update(collection_or_core, data, content_type=content_type):
+      if not api.update(hue_collection.name, data, content_type=content_type):
         raise PopupException(_('Could not update index. Check error logs for more info.'))
     else:
       raise PopupException(_('Could not update index. Indexing strategy %s not supported.') % indexing_strategy)
 
-  def update_data_from_hive(self, db, collection_or_core, database, table, columns, indexing_strategy='upload'):
+  def update_data_from_hive(self, db, collection_or_core_name, database, table, columns, indexing_strategy='upload'):
     """
     Add hdfs path contents to index
     """
@@ -201,7 +209,7 @@ class CollectionManagerController(object):
         for row in result.rows():
           dataset.append(row)
 
-        if not api.update(collection_or_core, dataset.csv, content_type='csv'):
+        if not api.update(collection_or_core_name, dataset.csv, content_type='csv'):
           raise PopupException(_('Could not update index. Check error logs for more info.'))
       else:
         raise PopupException(_('Could not update index. Could not fetch any data from Hive.'))

+ 118 - 0
desktop/libs/indexer/src/indexer/models.py

@@ -14,3 +14,121 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
+FIELD_TYPES = (
+  "alphaOnlySort",
+  "ancestor_path",
+  "boolean",
+  "currency",
+  "date",
+  "descendent_path",
+  "double",
+  "float",
+  "int",
+  "location",
+  "location_rpt",
+  "long",
+  "lowercase",
+  "pdate",
+  "pdouble",
+  "pfloat",
+  "pint",
+  "plong",
+  "point",
+  "random",
+  "string",
+  "tdate",
+  "tdouble",
+  "text_ar",
+  "text_bg",
+  "text_ca",
+  "text_char_norm",
+  "text_cjk",
+  "text_cz",
+  "text_da",
+  "text_de",
+  "text_el",
+  "text_en",
+  "text_en_splitting",
+  "text_en_splitting_tight",
+  "text_es",
+  "text_eu",
+  "text_fa",
+  "text_fi",
+  "text_fr",
+  "text_ga",
+  "text_general",
+  "text_general_rev",
+  "text_gl",
+  "text_greek",
+  "text_hi",
+  "text_hu",
+  "text_hy",
+  "text_id",
+  "text_it",
+  "text_ja",
+  "text_lv",
+  "text_nl",
+  "text_no",
+  "text_pt",
+  "text_ro",
+  "text_ru",
+  "text_sv",
+  "text_th",
+  "text_tr",
+  "text_ws",
+  "tfloat",
+  "tint",
+  "tlong"
+)
+
+TEXT_FIELD_TYPES = (
+  "alphaOnlySort",
+  "lowercase",
+  "random",
+  "string",
+  "text_ar",
+  "text_bg",
+  "text_ca",
+  "text_char_norm",
+  "text_cjk",
+  "text_cz",
+  "text_da",
+  "text_de",
+  "text_el",
+  "text_en",
+  "text_en_splitting",
+  "text_en_splitting_tight",
+  "text_es",
+  "text_eu",
+  "text_fa",
+  "text_fi",
+  "text_fr",
+  "text_ga",
+  "text_general",
+  "text_general_rev",
+  "text_gl",
+  "text_greek",
+  "text_hi",
+  "text_hu",
+  "text_hy",
+  "text_id",
+  "text_it",
+  "text_ja",
+  "text_lv",
+  "text_nl",
+  "text_no",
+  "text_pt",
+  "text_ro",
+  "text_ru",
+  "text_sv",
+  "text_th",
+  "text_tr",
+  "text_ws"
+)
+
+DATE_FIELD_TYPES = (
+  "date",
+  "pdate",
+  "tdate"
+)

+ 58 - 13
desktop/libs/indexer/src/indexer/utils.py

@@ -31,6 +31,7 @@ from django.utils.translation import ugettext as _
 from desktop.lib.i18n import force_unicode, smart_str
 
 from indexer import conf
+from indexer.models import DATE_FIELD_TYPES, TEXT_FIELD_TYPES
 
 LOG = logging.getLogger(__name__)
 TIMESTAMP_PATTERN = '\[([\w\d\s\-\/\:\+]*?)\]'
@@ -124,49 +125,93 @@ def get_type_from_morphline_type(morphline_type):
     return 'string'
 
 
-def field_values_from_separated_file(fh, delimiter, quote_character):
+def field_values_from_separated_file(fh, delimiter, quote_character, fields=None):
+  if fields is None:
+    field_names = None
+  else:
+    field_names = [field['name'] for field in fields]
+
   csvfile = StringIO.StringIO()
   content = fh.read()
+  is_first = True
   while content:
     last_newline = content.rfind('\n')
     if last_newline > -1:
+      if not is_first:
+        csvfile.write('\n')
       csvfile.write(content[:last_newline])
       content = content[last_newline+1:]
+      # print content
+      # print 'here1'
     else:
+      if not is_first:
+        csvfile.write('\n')
       csvfile.write(content[:])
       content = ""
+    is_first = False
     csvfile.seek(0)
-    reader = csv.reader(csvfile, delimiter=smart_str(delimiter), quotechar=smart_str(quote_character))
+    reader = csv.DictReader(csvfile, delimiter=smart_str(delimiter), quotechar=smart_str(quote_character))
+    remove_keys = None
     for row in reader:
-      yield [cell for cell in row]
+      if remove_keys is None:
+        if field_names is None:
+          remove_keys = []
+        else:
+          remove_keys = set(row.keys()) - set(field_names)
+      if remove_keys:
+        for key in remove_keys:
+          del row[key]
+      yield row
     
     csvfile.truncate()
     content += fh.read()
 
 
-def field_values_from_log(fh):
+def field_values_from_log(fh, fields=[ {'name': 'message', 'type': 'text_general'}, {'name': 'tdate', 'type': 'timestamp'} ]):
   """
   Only timestamp and message
   """
   buf = ""
   prev = content = fh.read()
+  if fields is None:
+    timestamp_key = 'timestamp'
+    message_key = 'message'
+  else:
+    try:
+      timestamp_key = next(filter(lambda field: field['type'] in DATE_FIELD_TYPES, fields))['name']
+    except:
+      timestamp_key = None
+    try:
+      message_key = next(filter(lambda field: field['type'] in TEXT_FIELD_TYPES, fields))['name']
+    except:
+      message_key = None
+
+  def value_generator(buf):
+    rows = buf.split('\n')
+    for row in rows:
+      if row:
+        data = {}
+        matches = re.search(TIMESTAMP_PATTERN, row)
+        if matches and timestamp_key:
+          data[timestamp_key] = parse(matches.groups()[0]).astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
+        if message_key:
+          data[message_key] = row
+        yield data
+
   while prev:
     last_newline = content.rfind('\n')
     if last_newline > -1:
       buf = content[:last_newline]
       content = content[last_newline+1:]
-      rows = buf.split('\n')
-      for row in rows:
-        if row:
-          data = {}
-          matches = re.search(TIMESTAMP_PATTERN, row)
-          if matches:
-            data['timestamp'] = parse(matches.groups()[0]).astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
-          data['message'] = row
-          yield data
+      for row in value_generator(buf):
+        yield row
     prev = fh.read()
     content += prev
 
+  if content:
+    for row in value_generator(content):
+      yield row
+
 
 def fields_from_log(fh):
   """

+ 3 - 1
desktop/libs/indexer/static/js/collections.js

@@ -265,7 +265,9 @@ var CreateCollectionViewModel = function() {
           'collection': ko.mapping.toJSON(collection),
           'type': self.sourceType(),
           'path': self.file(),
-          'source': self.source()
+          'source': self.source(),
+          'separator': self.fieldSeparator(),
+          'quote': self.fieldQuoteCharacter()
         }).done(function(data) {
           if (data.status == 0) {
             window.location.href = '/indexer';