cluster.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. #!/usr/bin/env python
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. from hadoop.fs import hadoopfs, webhdfs, LocalSubFileSystem
  18. from hadoop.job_tracker import LiveJobTracker
  19. from desktop.lib.paths import get_build_dir
  20. from hadoop import conf
  21. import os
  22. import logging
  23. LOG = logging.getLogger(__name__)
  24. def _make_filesystem(identifier):
  25. choice = os.getenv("FB_FS")
  26. if choice == "testing":
  27. path = os.path.join(get_build_dir(), "fs")
  28. if not os.path.isdir(path):
  29. LOG.warning(
  30. ("Could not find fs directory: %s. Perhaps you need to run " +
  31. "manage.py filebrowser_test_setup?") % path)
  32. return LocalSubFileSystem(path)
  33. else:
  34. cluster_conf = conf.HDFS_CLUSTERS[identifier]
  35. # The only way to disable webhdfs is to specify an empty value
  36. if cluster_conf.WEBHDFS_URL.get() != '':
  37. return webhdfs.WebHdfs.from_config(cluster_conf)
  38. else:
  39. return hadoopfs.HadoopFileSystem.from_config(
  40. cluster_conf,
  41. hadoop_bin_path=conf.HADOOP_BIN.get())
  42. def _make_mrcluster(identifier):
  43. cluster_conf = conf.MR_CLUSTERS[identifier]
  44. return LiveJobTracker.from_conf(cluster_conf)
  45. FS_CACHE = None
  46. def get_hdfs(identifier="default"):
  47. global FS_CACHE
  48. get_all_hdfs()
  49. return FS_CACHE[identifier]
  50. def get_all_hdfs():
  51. global FS_CACHE
  52. if FS_CACHE is not None:
  53. return FS_CACHE
  54. FS_CACHE = {}
  55. for identifier in conf.HDFS_CLUSTERS.keys():
  56. FS_CACHE[identifier] = _make_filesystem(identifier)
  57. return FS_CACHE
  58. MR_CACHE = None
  59. def get_default_mrcluster():
  60. global MR_CACHE
  61. try:
  62. return get_mrcluster()
  63. except KeyError:
  64. # Return an arbitrary cluster
  65. candidates = all_mrclusters()
  66. if candidates:
  67. return candidates.values()[0]
  68. return None
  69. def get_next_ha_mrcluster():
  70. """
  71. Return the next available JT instance or None
  72. This method currently works for distincting between active/standby JT as a standby JT does not respond.
  73. A cleaner but more complicated way would be to do something like the MRHAAdmin tool and
  74. org.apache.hadoop.ha.HAServiceStatus#getServiceStatus().
  75. """
  76. candidates = all_mrclusters()
  77. has_ha = sum([conf.MR_CLUSTERS[name].SUBMIT_TO.get() for name in conf.MR_CLUSTERS.keys()]) >= 2
  78. for name in conf.MR_CLUSTERS.keys():
  79. config = conf.MR_CLUSTERS[name]
  80. if config.SUBMIT_TO.get():
  81. jt = candidates[name]
  82. if has_ha:
  83. try:
  84. status = jt.cluster_status()
  85. if status.stateAsString == 'RUNNING':
  86. return (config, jt)
  87. else:
  88. LOG.info('JobTracker %s is not RUNNING, skipping it: %s' % (name, status))
  89. except Exception, ex:
  90. LOG.info('JobTracker %s is not available, skipping it: %s' % (name, ex))
  91. else:
  92. return (config, jt)
  93. return None
  94. def get_mrcluster(identifier="default"):
  95. global MR_CACHE
  96. all_mrclusters()
  97. return MR_CACHE[identifier]
  98. def all_mrclusters():
  99. global MR_CACHE
  100. if MR_CACHE is not None:
  101. return MR_CACHE
  102. MR_CACHE = {}
  103. for identifier in conf.MR_CLUSTERS.keys():
  104. MR_CACHE[identifier] = _make_mrcluster(identifier)
  105. return MR_CACHE
  106. def get_cluster_conf_for_job_submission():
  107. """
  108. Check the `submit_to' for each MR/Yarn cluster, and return the
  109. config section of first one that enables submission.
  110. HA support for MR1.
  111. """
  112. for name in conf.YARN_CLUSTERS.keys():
  113. yarn = conf.YARN_CLUSTERS[name]
  114. if yarn.SUBMIT_TO.get():
  115. return yarn
  116. mr = get_next_ha_mrcluster()
  117. if mr is not None:
  118. config, jt = mr
  119. return config
  120. else:
  121. return None
  122. def get_cluster_addr_for_job_submission():
  123. """
  124. Check the `submit_to' for each MR/Yarn cluster, and return the
  125. host:port of first one that enables submission.
  126. """
  127. conf = get_cluster_conf_for_job_submission()
  128. if conf is None:
  129. return None
  130. return "%s:%s" % (conf.HOST.get(), conf.PORT.get())
  131. def is_yarn():
  132. cluster = get_cluster_conf_for_job_submission()
  133. return cluster is not None and 'IS_YARN' in cluster.config.members
  134. def clear_caches():
  135. """
  136. Clears cluster's internal caches. Returns
  137. something that can be given back to restore_caches.
  138. """
  139. global FS_CACHE, MR_CACHE
  140. old = FS_CACHE, MR_CACHE
  141. FS_CACHE, MR_CACHE = None, None
  142. return old
  143. def restore_caches(old):
  144. """
  145. Restores caches from the result of a previous clear_caches call.
  146. """
  147. global FS_CACHE, MR_CACHE
  148. FS_CACHE, MR_CACHE = old