common.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. #!/usr/bin/env python
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. """
  18. Common utils for beeswax.
  19. """
  20. from __future__ import print_function
  21. import numbers
  22. import re
  23. import time
  24. from django import forms
  25. from beeswax.models import Namespace, Compute
  26. HIVE_IDENTIFER_REGEX = re.compile("(^[a-zA-Z0-9]\w*\.)?[a-zA-Z0-9]\w*$")
  27. DL_FORMATS = ['csv', 'xls']
  28. SELECTION_SOURCE = ['', 'table', 'constant',]
  29. AGGREGATIONS = ['', 'COUNT', 'SUM', 'AVG', 'MIN', 'MAX']
  30. JOIN_TYPES = ['', 'LEFT OUTER JOIN', 'RIGHT OUTER JOIN', 'FULL OUTER JOIN', 'JOIN']
  31. SORT_OPTIONS = ['', 'ascending', 'descending']
  32. RELATION_OPS_UNARY = ['IS NULL', 'IS NOT NULL', 'NOT']
  33. RELATION_OPS = ['=', '<>', '<', '<=', '>', '>='] + RELATION_OPS_UNARY
  34. COMPUTE_TYPES = ['hive-compute', 'impala-compute']
  35. TERMINATORS = [
  36. # (hive representation, description, ascii value)
  37. (r'\001', r"'^A' (\001)", 1),
  38. (r'\002', r"'^B' (\002)", 2),
  39. (r'\003', r"'^C' (\003)", 3),
  40. (r'\t', r"Tab (\t)", 9),
  41. (',', "Comma (,)", 44),
  42. (' ', "Space", 32),
  43. ]
  44. def timing(fn):
  45. def decorator(*args, **kwargs):
  46. time1 = time.time()
  47. ret = fn(*args, **kwargs)
  48. time2 = time.time()
  49. print('%s elapsed time: %0.3f ms' % (fn.__name__, (time2-time1)*1000.0))
  50. return ret
  51. return decorator
  52. def to_choices(x):
  53. """
  54. Maps [a, b, c] to [(a,a), (b,b), (c,c)].
  55. Useful for making ChoiceField's.
  56. """
  57. return [(y, y) for y in x]
  58. def apply_natural_sort(collection, key=None):
  59. """
  60. Applies a natural sort (http://rosettacode.org/wiki/Natural_sorting) to a list or dictionary
  61. Dictionary types require a sort key to be specified
  62. """
  63. to_digit = lambda i: int(i) if i.isdigit() else i
  64. def tokenize_and_convert(item, key=None):
  65. if key:
  66. item = item[key]
  67. return [to_digit(c) for c in re.split('([0-9]+)', item)]
  68. return sorted(collection, key=lambda i: tokenize_and_convert(i, key=key))
  69. def is_compute(cluster):
  70. if not cluster:
  71. return False
  72. connector = cluster.get('connector')
  73. compute = cluster.get('compute')
  74. compute_check = lambda x: x and x.get('type') in COMPUTE_TYPES
  75. return compute_check(cluster) or compute_check(connector) or compute_check(compute)
  76. '''
  77. find_compute attempts to find a compute based on the provided criteria.
  78. Following is the priority order
  79. 1. A full/partial compute object available in cluster
  80. 2. Lookup namespace based on namespace_id and return the first compute
  81. filtered by user-access. Needs valid user and namespace_id
  82. 3. Lookup namespace based on dialect from cluster or prpvided dialect
  83. and return the first compute filtered by user-access. Needs valid user
  84. '''
  85. def find_compute(cluster=None, user=None, dialect=None, namespace_id=None):
  86. if cluster:
  87. # If we find a full/partial cluster object, we will attempt to load a compute
  88. connector = cluster.get('connector')
  89. compute = cluster.get('compute')
  90. compute_check = lambda x: x and x.get('type') in COMPUTE_TYPES
  91. # Pick the most probable compute object
  92. selected_compute = (cluster if compute_check(cluster)
  93. else compute if compute_check(compute)
  94. else connector if compute_check(connector) else None)
  95. # If found, we will attempt to reload it, first by id then by name
  96. if selected_compute:
  97. if selected_compute.get('id') and isinstance(selected_compute['id'], numbers.Integral):
  98. c = Compute.objects.filter(id=selected_compute['id']).first()
  99. if c:
  100. return c.to_dict()
  101. if selected_compute.get('name'):
  102. c = Compute.objects.filter(name=selected_compute['name']).first()
  103. if c:
  104. return c.to_dict()
  105. # If we could not load by id or name, then we want to pick a default compute based on dialect
  106. dialect = selected_compute['dialect'] if selected_compute.get('dialect') else dialect
  107. if not dialect and cluster.get('type'):
  108. t = cluster['type']
  109. dialect = 'hive' if t.startswith('hive') else 'impala' if t.startswith('impala') else None
  110. # We will attempt to find a default compute based on other criteria
  111. ns = None
  112. if namespace_id and isinstance(namespace_id, numbers.Integral):
  113. ns = Namespace.objects.filter(id=namespace_id).first()
  114. if not ns and dialect:
  115. ns = Namespace.objects.filter(dialect=dialect).first()
  116. if ns and user:
  117. computes = ns.get_computes(user) if ns else None
  118. if computes:
  119. return computes[0]
  120. class HiveIdentifierField(forms.RegexField):
  121. """
  122. Corresponds to 'Identifier' in Hive.g (Hive's grammar)
  123. """
  124. def __init__(self, *args, **kwargs):
  125. kwargs['regex'] = HIVE_IDENTIFER_REGEX
  126. super(HiveIdentifierField, self).__init__(*args, **kwargs)