utils.py 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. #!/usr/bin/env python
  2. # -- coding: utf-8 --
  3. # Licensed to Cloudera, Inc. under one
  4. # or more contributor license agreements. See the NOTICE file
  5. # distributed with this work for additional information
  6. # regarding copyright ownership. Cloudera, Inc. licenses this file
  7. # to you under the Apache License, Version 2.0 (the
  8. # "License"); you may not use this file except in compliance
  9. # with the License. You may obtain a copy of the License at
  10. #
  11. # http://www.apache.org/licenses/LICENSE-2.0
  12. #
  13. # Unless required by applicable law or agreed to in writing, software
  14. # distributed under the License is distributed on an "AS IS" BASIS,
  15. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. # See the License for the specific language governing permissions and
  17. # limitations under the License.
  18. import csv
  19. import logging
  20. import os
  21. import pytz
  22. import re
  23. import shutil
  24. import StringIO
  25. import tempfile
  26. from dateutil.parser import parse
  27. from django.utils.translation import ugettext as _
  28. from desktop.lib.i18n import force_unicode, smart_str
  29. from indexer import conf
  30. from indexer.models import DATE_FIELD_TYPES, TEXT_FIELD_TYPES
  31. LOG = logging.getLogger(__name__)
  32. TIMESTAMP_PATTERN = '\[([\w\d\s\-\/\:\+]*?)\]'
  33. FIELD_XML_TEMPLATE = '<field name="%(name)s" type="%(type)s" indexed="%(indexed)s" stored="%(stored)s" required="%(required)s" />'
  34. DEFAULT_FIELD = {
  35. 'name': None,
  36. 'type': 'text',
  37. 'indexed': 'true',
  38. 'stored': 'true',
  39. 'required': 'true'
  40. }
  41. def schema_xml_with_fields(schema_xml, fields):
  42. fields_xml = ''
  43. for field in fields:
  44. field_dict = DEFAULT_FIELD.copy()
  45. field_dict.update(field)
  46. fields_xml += FIELD_XML_TEMPLATE % field_dict + '\n'
  47. return force_unicode(force_unicode(schema_xml).replace(u'<!-- REPLACE FIELDS -->', force_unicode(fields_xml)))
  48. def schema_xml_with_unique_key_field(schema_xml, unique_key_field):
  49. return force_unicode(force_unicode(schema_xml).replace(u'<!-- REPLACE UNIQUE KEY -->', force_unicode(unique_key_field)))
  50. def schema_xml_with_fields_and_unique_key(schema_xml, fields, unique_key_field):
  51. return schema_xml_with_unique_key_field(schema_xml_with_fields(schema_xml, fields), unique_key_field)
  52. def example_schema_xml_with_fields_and_unique_key(fields, unique_key_field):
  53. # Get complete schema.xml
  54. with open(os.path.join(conf.CONFIG_TEMPLATE_PATH.get(), 'conf/schema.xml')) as f:
  55. return schema_xml_with_fields_and_unique_key(f.read(), fields, unique_key_field)
  56. def copy_config_with_fields_and_unique_key(fields, unique_key_field):
  57. # Get complete schema.xml
  58. with open(os.path.join(conf.CONFIG_TEMPLATE_PATH.get(), 'conf/schema.xml')) as f:
  59. schema_xml = schema_xml_with_fields_and_unique_key(f.read(), fields, unique_key_field)
  60. # Create temporary copy of solr configs
  61. tmp_path = tempfile.mkdtemp()
  62. solr_config_path = os.path.join(tmp_path, os.path.basename(conf.CONFIG_TEMPLATE_PATH.get()))
  63. shutil.copytree(conf.CONFIG_TEMPLATE_PATH.get(), solr_config_path)
  64. # Write complete schema.xml to copy
  65. with open(os.path.join(solr_config_path, 'conf/schema.xml'), 'w') as f:
  66. f.write(smart_str(schema_xml))
  67. return tmp_path, solr_config_path
  68. def get_field_types(row):
  69. def test_boolean(value):
  70. if value.lower() not in ('false', 'true'):
  71. raise ValueError(_("%s is not a boolean value") % value)
  72. def test_timestamp(value):
  73. if not value:
  74. raise ValueError()
  75. if len(value) > 50:
  76. raise ValueError()
  77. if value.startswith('[') and value.endswith(']'):
  78. value = value[1:-1]
  79. try:
  80. parse(value)
  81. except:
  82. raise ValueError()
  83. test_fns = [('int', int),
  84. ('float', float),
  85. ('boolean', test_boolean),
  86. ('date', test_timestamp)]
  87. field_types = []
  88. for field in row:
  89. field_type = None
  90. for test_fn in test_fns:
  91. try:
  92. test_fn[1](field)
  93. field_type = test_fn[0]
  94. break
  95. except ValueError:
  96. pass
  97. field_types.append(field_type or 'text_general')
  98. return field_types
  99. def get_type_from_morphline_type(morphline_type):
  100. if morphline_type in ('POSINT', 'INT', 'BASE10NUM', 'NUMBER'):
  101. return 'integer'
  102. else:
  103. return 'string'
  104. def field_values_from_separated_file(fh, delimiter, quote_character, fields=None):
  105. if fields is None:
  106. field_names = None
  107. else:
  108. field_names = [field['name'] for field in fields]
  109. csvfile = StringIO.StringIO()
  110. content = fh.read()
  111. is_first = True
  112. while content:
  113. last_newline = content.rfind('\n')
  114. if last_newline > -1:
  115. if not is_first:
  116. csvfile.write('\n')
  117. csvfile.write(content[:last_newline])
  118. content = content[last_newline+1:]
  119. # print content
  120. # print 'here1'
  121. else:
  122. if not is_first:
  123. csvfile.write('\n')
  124. csvfile.write(content[:])
  125. content = ""
  126. is_first = False
  127. csvfile.seek(0)
  128. reader = csv.DictReader(csvfile, delimiter=smart_str(delimiter), quotechar=smart_str(quote_character))
  129. remove_keys = None
  130. for row in reader:
  131. if remove_keys is None:
  132. if field_names is None:
  133. remove_keys = []
  134. else:
  135. remove_keys = set(row.keys()) - set(field_names)
  136. if remove_keys:
  137. for key in remove_keys:
  138. del row[key]
  139. yield row
  140. csvfile.truncate()
  141. content += fh.read()
  142. def field_values_from_log(fh, fields=[ {'name': 'message', 'type': 'text_general'}, {'name': 'tdate', 'type': 'timestamp'} ]):
  143. """
  144. Only timestamp and message
  145. """
  146. buf = ""
  147. prev = content = fh.read()
  148. if fields is None:
  149. timestamp_key = 'timestamp'
  150. message_key = 'message'
  151. else:
  152. try:
  153. timestamp_key = next(iter(filter(lambda field: field['type'] in DATE_FIELD_TYPES, fields)))['name']
  154. except:
  155. timestamp_key = None
  156. try:
  157. message_key = next(iter(filter(lambda field: field['type'] in TEXT_FIELD_TYPES, fields)))['name']
  158. except:
  159. message_key = None
  160. def value_generator(buf):
  161. rows = buf.split('\n')
  162. for row in rows:
  163. if row:
  164. data = {}
  165. matches = re.search(TIMESTAMP_PATTERN, row)
  166. if matches and timestamp_key:
  167. data[timestamp_key] = parse(matches.groups()[0]).astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
  168. if message_key:
  169. data[message_key] = row
  170. yield data
  171. while prev:
  172. last_newline = content.rfind('\n')
  173. if last_newline > -1:
  174. buf = content[:last_newline]
  175. content = content[last_newline+1:]
  176. for row in value_generator(buf):
  177. # print row
  178. yield row
  179. prev = fh.read()
  180. content += prev
  181. if content:
  182. for row in value_generator(content):
  183. # print row
  184. yield row
  185. def fields_from_log(fh):
  186. """
  187. Only timestamp and message
  188. """
  189. rows = fh.read()
  190. row = rows.split('\n')[0]
  191. # Extract timestamp
  192. fields = []
  193. matches = re.search(TIMESTAMP_PATTERN, row)
  194. if matches:
  195. fields.append(('timestamp', 'tdate'))
  196. fields.append(('message', 'text_general'))
  197. return fields