pseudo_hdfs4.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. #!/usr/bin/env python
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. import atexit
  18. import getpass
  19. import logging
  20. import os
  21. import shutil
  22. import signal
  23. import subprocess
  24. import socket
  25. import tempfile
  26. import textwrap
  27. import time
  28. from desktop.lib.python_util import find_unused_port
  29. import hadoop
  30. from hadoop import cluster
  31. from hadoop.mini_cluster import write_config
  32. from hadoop.job_tracker import LiveJobTracker
  33. from desktop.lib.paths import get_run_root
  34. _shared_cluster = None
  35. LOG = logging.getLogger(__name__)
  36. STARTUP_DEADLINE = 60.0
  37. CLEANUP_TMP_DIR = os.environ.get('MINI_CLUSTER_CLEANUP', 'true')
  38. def is_live_cluser():
  39. return os.environ.get('LIVE_CLUSTER', 'false').lower() == 'true'
  40. def get_fs_prefix(fs):
  41. prefix = '/tmp/hue_tests_%s' % str(time.time())
  42. fs.mkdir(prefix, 0777)
  43. return prefix
  44. def get_db_prefix():
  45. return 'hue_test__%s' % str(time.time()).replace('.', '')
  46. class LiveHdfs():
  47. def __init__(self):
  48. self.fs = cluster.get_hdfs('default')
  49. # Assumes /tmp exists and is 1777
  50. self.fs_prefix = get_fs_prefix(self.fs)
  51. LOG.info('Using %s as FS root' % self.fs_prefix)
  52. # Might need more
  53. self.fs.do_as_user('test', self.fs.create_home_dir, '/user/test')
  54. self.fs.do_as_user('hue', self.fs.create_home_dir, '/user/hue')
  55. @property
  56. def superuser(self):
  57. return self.fs.superuser
  58. class PseudoHdfs4(object):
  59. """Run HDFS and MR2 locally, in pseudo-distributed mode"""
  60. def __init__(self):
  61. self._tmpdir = tempfile.mkdtemp(prefix='tmp_hue_')
  62. os.chmod(self._tmpdir, 0755)
  63. self._superuser = getpass.getuser()
  64. self.fs_prefix = None
  65. self._fs = None
  66. self._jt = None
  67. self._mr2_env = None
  68. self._log_dir = None
  69. self._dfs_http_port = None
  70. self._dfs_http_address = None
  71. self._namenode_port = None
  72. self._fs_default_name = None
  73. self._rm_port = None
  74. self._nn_proc = None
  75. self._dn_proc = None
  76. self._rm_proc = None
  77. self._nm_proc = None
  78. self._hs_proc = None
  79. self._fqdn = socket.getfqdn()
  80. self._core_site = None
  81. self._hdfs_site = None
  82. self._mapred_site = None
  83. self.shutdown_hook = None
  84. def __str__(self):
  85. return "PseudoHdfs5 (%(name)s) at %(dir)s --- MR2 (%(mapreduce)s) at http://%(fqdn)s:%(port)s" % {
  86. 'name': self._fs_default_name,
  87. 'dir': self._tmpdir,
  88. 'mapreduce': self.mapred_job_tracker,
  89. 'fqdn': self._fqdn,
  90. 'port': self._rm_port
  91. }
  92. @property
  93. def superuser(self):
  94. return self._superuser
  95. @property
  96. def mr2_env(self):
  97. return self._mr2_env
  98. @property
  99. def log_dir(self):
  100. return self._log_dir
  101. @property
  102. def fs_default_name(self):
  103. return self._fs_default_name
  104. @property
  105. def namenode_port(self):
  106. return self._namenode_port
  107. @property
  108. def dfs_http_address(self):
  109. return self._dfs_http_address
  110. @property
  111. def dfs_http_port(self):
  112. return self._dfs_http_port
  113. @property
  114. def mapred_job_tracker(self):
  115. return "%s:%s" % (self._fqdn, self._rm_port,)
  116. @property
  117. def hadoop_conf_dir(self):
  118. return self._tmppath('conf')
  119. @property
  120. def fs(self):
  121. if self._fs is None:
  122. if self._dfs_http_address is None:
  123. LOG.warn("Attempt to access uninitialized filesystem")
  124. return None
  125. self._fs = hadoop.fs.webhdfs.WebHdfs("http://%s/webhdfs/v1" % (self._dfs_http_address,), self.fs_default_name)
  126. return self._fs
  127. @property
  128. def jt(self):
  129. if self._jt is None:
  130. self._jt = LiveJobTracker(self._fqdn, 0)
  131. return self._jt
  132. def stop(self):
  133. def _kill_proc(name, proc):
  134. try:
  135. while proc is not None and proc.poll() is None:
  136. os.kill(proc.pid, signal.SIGKILL)
  137. LOG.info('Stopping %s pid %s' % (name, proc.pid,))
  138. time.sleep(0.5)
  139. except Exception, ex:
  140. LOG.exception('Failed to stop pid %s. You may want to do it manually: %s' % (proc.pid, ex))
  141. _kill_proc('NameNode', self._nn_proc)
  142. _kill_proc('DataNode', self._dn_proc)
  143. _kill_proc('ResourceManager', self._rm_proc)
  144. _kill_proc('Nodemanager', self._nm_proc)
  145. _kill_proc('HistoryServer', self._hs_proc)
  146. self._nn_proc = None
  147. self._dn_proc = None
  148. self._rm_proc = None
  149. self._nm_proc = None
  150. self._hs_proc = None
  151. if CLEANUP_TMP_DIR == 'false':
  152. LOG.info('Skipping cleanup of temp directory "%s"' % (self._tmpdir,))
  153. else:
  154. LOG.info('Cleaning up temp directory "%s". Use "export MINI_CLUSTER_CLEANUP=false" to avoid.' % (self._tmpdir,))
  155. shutil.rmtree(self._tmpdir, ignore_errors=True)
  156. if self.shutdown_hook is not None:
  157. self.shutdown_hook()
  158. def _tmppath(self, filename):
  159. return os.path.join(self._tmpdir, filename)
  160. def _logpath(self, filename):
  161. return os.path.join(self._log_dir, filename)
  162. def start(self):
  163. LOG.info("Using temporary directory: %s" % (self._tmpdir,))
  164. if not os.path.exists(self.hadoop_conf_dir):
  165. os.mkdir(self.hadoop_conf_dir)
  166. self._log_dir = self._tmppath('logs')
  167. if not os.path.exists(self._log_dir):
  168. os.mkdir(self._log_dir)
  169. self._local_dir = self._tmppath('local')
  170. if not os.path.exists(self._local_dir):
  171. os.mkdir(self._local_dir)
  172. self._write_hadoop_metrics_conf(self.hadoop_conf_dir)
  173. self._write_core_site()
  174. self._write_hdfs_site()
  175. self._write_yarn_site()
  176. self._write_mapred_site()
  177. # More stuff to setup in the environment
  178. env = {
  179. 'YARN_HOME': get_run_root('ext/hadoop/hadoop'),
  180. 'HADOOP_COMMON_HOME': get_run_root('ext/hadoop/hadoop'),
  181. 'HADOOP_MAPRED_HOME': get_run_root('ext/hadoop/hadoop'),
  182. 'HADOOP_HDFS_HOME': get_run_root('ext/hadoop/hadoop'),
  183. 'HADOOP_CONF_DIR': self.hadoop_conf_dir,
  184. 'YARN_CONF_DIR': self.hadoop_conf_dir,
  185. 'HADOOP_HEAPSIZE': '128',
  186. 'HADOOP_LOG_DIR': self._log_dir,
  187. 'USER': self.superuser,
  188. 'LANG': "en_US.UTF-8",
  189. 'PATH': os.environ['PATH'],
  190. }
  191. if "JAVA_HOME" in os.environ:
  192. env['JAVA_HOME'] = os.environ['JAVA_HOME']
  193. LOG.debug("Hadoop Environment:\n" + "\n".join([ str(x) for x in sorted(env.items()) ]))
  194. # Format HDFS
  195. self._format(self.hadoop_conf_dir, env)
  196. # Run them
  197. self._nn_proc = self._start_daemon('namenode', self.hadoop_conf_dir, env)
  198. self._dn_proc = self._start_daemon('datanode', self.hadoop_conf_dir, env)
  199. # Make sure they're running
  200. deadline = time.time() + STARTUP_DEADLINE
  201. while not self._is_hdfs_ready(env):
  202. if time.time() > deadline:
  203. self.stop()
  204. raise RuntimeError('%s is taking too long to start' % (self,))
  205. time.sleep(5)
  206. # Start MR2
  207. self._start_mr2(env)
  208. # Create HDFS directories
  209. if not self.fs.exists('/tmp'):
  210. self.fs.do_as_superuser(self.mkdir, '/tmp', 01777)
  211. self.fs.do_as_superuser(self.fs.chmod, '/tmp', 01777)
  212. self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn', 01777)
  213. self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn', 01777)
  214. self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging', 01777)
  215. self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging', 01777)
  216. self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history', 01777)
  217. self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history', 01777)
  218. self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history/done', 01777)
  219. self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history/done', 01777)
  220. self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history/done/2015', 01777)
  221. self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history/done/2015', 01777)
  222. self.fs.do_as_superuser(self.fs.mkdir, '/var/log/hadoop-yarn/apps', 01777)
  223. self.fs.do_as_superuser(self.fs.chmod, '/var/log/hadoop-yarn/apps', 01777)
  224. self.fs.do_as_user('test', self.fs.create_home_dir, '/user/test')
  225. self.fs.do_as_user('hue', self.fs.create_home_dir, '/user/hue')
  226. self.fs_prefix = get_fs_prefix(self.fs)
  227. def _start_mr2(self, env):
  228. LOG.info("Starting MR2")
  229. self._mr2_env = env.copy()
  230. LOG.debug("MR2 Environment:\n" + "\n".join([ str(x) for x in sorted(self.mr2_env.items()) ]))
  231. # Run YARN
  232. self._rm_proc = self._start_daemon('resourcemanager', self.hadoop_conf_dir, self.mr2_env, self._get_yarn_bin(self.mr2_env))
  233. self._nm_proc = self._start_daemon('nodemanager', self.hadoop_conf_dir, self.mr2_env, self._get_yarn_bin(self.mr2_env))
  234. self._hs_proc = self._start_daemon('historyserver', self.hadoop_conf_dir, self.mr2_env, self._get_mapred_bin(self.mr2_env))
  235. # Give them a moment to actually start
  236. time.sleep(1)
  237. # Make sure they're running
  238. deadline = time.time() + STARTUP_DEADLINE
  239. while not self._is_mr2_ready(self.mr2_env):
  240. if time.time() > deadline:
  241. self.stop()
  242. raise RuntimeError('%s is taking too long to start' % (self,))
  243. time.sleep(5)
  244. def _format(self, conf_dir, env):
  245. args = (self._get_hdfs_bin(env), '--config', conf_dir, 'namenode', '-format')
  246. LOG.info('Formatting HDFS: %s' % (args,))
  247. stdout = tempfile.TemporaryFile()
  248. stderr = tempfile.TemporaryFile()
  249. try:
  250. ret = subprocess.call(args, env=env, stdout=stdout, stderr=stderr)
  251. if ret != 0:
  252. stdout.seek(0)
  253. stderr.seek(0)
  254. raise RuntimeError('Failed to format namenode\n''=== Stdout ===:\n%s\n''=== Stderr ===:\n%s' % (stdout.read(), stderr.read()))
  255. finally:
  256. stdout.close()
  257. stderr.close()
  258. def _log_exit(self, proc_name, exit_code):
  259. LOG.info('%s exited with %s' % (proc_name, exit_code))
  260. LOG.debug('--------------------- STDOUT:\n' + file(self._logpath(proc_name + '.stdout')).read())
  261. LOG.debug('--------------------- STDERR:\n' + file(self._logpath(proc_name + '.stderr')).read())
  262. def _is_hdfs_ready(self, env):
  263. if self._nn_proc.poll() is not None:
  264. self._log_exit('namenode', self._nn_proc.poll())
  265. return False
  266. if self._dn_proc.poll() is not None:
  267. self._log_exit('datanode', self._dn_proc.poll())
  268. return False
  269. # Run a `dfsadmin -report' against it
  270. dfsreport = subprocess.Popen((self._get_hdfs_bin(env), 'dfsadmin', '-report'),
  271. stdout=subprocess.PIPE,
  272. stderr=subprocess.PIPE,
  273. env=env)
  274. ret = dfsreport.wait()
  275. if ret != 0:
  276. LOG.debug('DFS not ready yet.\n%s\n%s' % (dfsreport.stderr.read(), dfsreport.stdout.read()))
  277. return False
  278. # Check that the DN is servicing
  279. report_out = dfsreport.stdout.read()
  280. if 'Live datanodes (1)' in report_out:
  281. return True
  282. LOG.debug('Waiting for DN to come up .................\n%s' % (report_out,))
  283. return False
  284. def _is_mr2_ready(self, env):
  285. if self._rm_proc.poll() is not None:
  286. self._log_exit('resourcemanager', self._rm_proc.poll())
  287. return False
  288. if self._nm_proc.poll() is not None:
  289. self._log_exit('nodemanager', self._nm_proc.poll())
  290. return False
  291. if self._hs_proc.poll() is not None:
  292. self._log_exit('historyserver', self._hs_proc.poll())
  293. return False
  294. # Run a `hadoop job -list all'
  295. list_all = subprocess.Popen(
  296. (self._get_mapred_bin(env), 'job', '-list', 'all'),
  297. stdout=subprocess.PIPE,
  298. stderr=subprocess.PIPE,
  299. env=env)
  300. ret = list_all.wait()
  301. if ret == 0:
  302. return True
  303. LOG.debug('MR2 not ready yet.\n%s\n%s' % (list_all.stderr.read(), list_all.stderr.read()))
  304. return False
  305. def _start_daemon(self, proc_name, conf_dir, env, hadoop_bin=None):
  306. if hadoop_bin is None:
  307. hadoop_bin = self._get_hadoop_bin(env)
  308. args = (hadoop_bin, '--config', conf_dir, proc_name)
  309. LOG.info('Starting Hadoop cluster daemon: %s' % (args,))
  310. stdout = file(self._logpath(proc_name + ".stdout"), 'w')
  311. stderr = file(self._logpath(proc_name + ".stderr"), 'w')
  312. return subprocess.Popen(args=args, stdout=stdout, stderr=stderr, env=env)
  313. def _get_hadoop_bin(self, env):
  314. try:
  315. return env['HADOOP_BIN']
  316. except KeyError:
  317. return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'hadoop')
  318. def _get_mapred_bin(self, env):
  319. try:
  320. return env['MAPRED_BIN']
  321. except KeyError:
  322. return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'mapred')
  323. def _get_yarn_bin(self, env):
  324. try:
  325. return env['YARN_BIN']
  326. except KeyError:
  327. return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'yarn')
  328. def _get_hdfs_bin(self, env):
  329. try:
  330. return env['HDFS_BIN']
  331. except KeyError:
  332. return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'hdfs')
  333. def _write_hdfs_site(self):
  334. self._dfs_http_port = find_unused_port()
  335. self._dfs_http_address = '%s:%s' % (self._fqdn, self._dfs_http_port)
  336. hdfs_configs = {
  337. 'dfs.webhdfs.enabled': 'true',
  338. 'dfs.http.address': self._dfs_http_address,
  339. 'dfs.namenode.safemode.extension': 1,
  340. 'dfs.namenode.safemode.threshold-pct': 0,
  341. 'dfs.datanode.address': '%s:0' % self._fqdn,
  342. 'dfs.datanode.http.address': '0.0.0.0:0', # Work around webhdfs redirect bug -- bind to all interfaces
  343. 'dfs.datanode.ipc.address': '%s:0' % self._fqdn,
  344. 'dfs.replication': 1,
  345. 'dfs.safemode.min.datanodes': 1,
  346. 'dfs.namenode.fs-limits.min-block-size': '1000',
  347. 'dfs.permissions': 'true'
  348. }
  349. self._hdfs_site = self._tmppath('conf/hdfs-site.xml')
  350. write_config(hdfs_configs, self._hdfs_site)
  351. def _write_core_site(self):
  352. self._namenode_port = find_unused_port()
  353. self._fs_default_name = 'hdfs://%s:%s' % (self._fqdn, self._namenode_port,)
  354. core_configs = {
  355. 'fs.default.name': self._fs_default_name,
  356. 'hadoop.security.authorization': 'true',
  357. 'hadoop.security.authentication': 'simple',
  358. 'hadoop.proxyuser.hue.hosts': '*',
  359. 'hadoop.proxyuser.hue.groups': '*',
  360. 'hadoop.proxyuser.oozie.hosts': '*',
  361. 'hadoop.proxyuser.oozie.groups': '*',
  362. 'hadoop.proxyuser.%s.hosts' % (getpass.getuser(),): '*',
  363. 'hadoop.proxyuser.%s.groups' % (getpass.getuser(),): '*',
  364. 'hadoop.tmp.dir': self._tmppath('hadoop_tmp_dir'),
  365. 'fs.trash.interval': 10
  366. }
  367. self._core_site = self._tmppath('conf/core-site.xml')
  368. write_config(core_configs, self._core_site)
  369. def _write_yarn_site(self):
  370. self._rm_resource_port = find_unused_port()
  371. self._rm_port = find_unused_port()
  372. self._rm_scheduler_port = find_unused_port()
  373. self._rm_admin_port = find_unused_port()
  374. self._rm_webapp_port = find_unused_port()
  375. self._nm_port = find_unused_port()
  376. self._nm_webapp_port = find_unused_port()
  377. yarn_configs = {
  378. 'yarn.resourcemanager.resource-tracker.address': '%s:%s' % (self._fqdn, self._rm_resource_port,),
  379. 'yarn.resourcemanager.address': '%s:%s' % (self._fqdn, self._rm_port,),
  380. 'yarn.resourcemanager.scheduler.address': '%s:%s' % (self._fqdn, self._rm_scheduler_port,),
  381. 'yarn.resourcemanager.scheduler.class': 'org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler',
  382. 'yarn.resourcemanager.admin.address': '%s:%s' % (self._fqdn, self._rm_admin_port,),
  383. 'yarn.resourcemanager.webapp.address': '%s:%s' % (self._fqdn, self._rm_webapp_port,),
  384. 'yarn.log-aggregation-enable': 'true',
  385. 'yarn.dispatcher.exit-on-error': 'true',
  386. 'yarn.nodemanager.local-dirs': self._local_dir,
  387. 'yarn.nodemanager.log-dirs': self._logpath('yarn-logs'),
  388. 'yarn.nodemanager.remote-app-log-dir': '/var/log/hadoop-yarn/apps',
  389. 'yarn.nodemanager.localizer.address' : '%s:%s' % (self._fqdn, self._nm_port,),
  390. 'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
  391. 'yarn.nodemanager.aux-services.mapreduce.shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler',
  392. 'yarn.nodemanager.webapp.address': '%s:%s' % (self._fqdn, self._nm_webapp_port,),
  393. 'yarn.app.mapreduce.am.staging-dir': '/tmp/hadoop-yarn/staging',
  394. 'yarn.application.classpath':
  395. '''$HADOOP_CONF_DIR,
  396. $HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
  397. $HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
  398. $HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*,
  399. $HADOOP_YARN_HOME/share/hadoop/yarn/*,$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*''',
  400. }
  401. self._yarn_site = self._tmppath('conf/yarn-site.xml')
  402. write_config(yarn_configs, self._tmppath('conf/yarn-site.xml'))
  403. def _write_mapred_site(self):
  404. self._jh_port = find_unused_port()
  405. self._jh_web_port = find_unused_port()
  406. self._mr_shuffle_port = find_unused_port()
  407. mapred_configs = {
  408. 'mapred.job.tracker': '%s:%s' % (self._fqdn, self._rm_port,),
  409. 'mapreduce.framework.name': 'yarn',
  410. 'mapreduce.jobhistory.address': '%s:%s' % (self._fqdn, self._jh_port,),
  411. 'mapreduce.jobhistory.webapp.address': '%s:%s' % (self._fqdn, self._jh_web_port,),
  412. 'mapreduce.task.tmp.dir': self._tmppath('tasks'),
  413. 'mapreduce.shuffle.port': self._mr_shuffle_port,
  414. }
  415. self._mapred_site = self._tmppath('conf/mapred-site.xml')
  416. write_config(mapred_configs, self._tmppath('conf/mapred-site.xml'))
  417. def _write_hadoop_metrics_conf(self, conf_dir):
  418. f = file(os.path.join(conf_dir, "hadoop-metrics.properties"), "w")
  419. try:
  420. f.write(textwrap.dedent("""
  421. dfs.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
  422. mapred.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
  423. jvm.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
  424. rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
  425. """))
  426. finally:
  427. f.close()
  428. def shared_cluster():
  429. global _shared_cluster
  430. if _shared_cluster is None:
  431. if is_live_cluser():
  432. cluster = LiveHdfs()
  433. else:
  434. cluster = PseudoHdfs4()
  435. atexit.register(cluster.stop)
  436. try:
  437. cluster.start()
  438. except Exception, ex:
  439. LOG.exception("Failed to fully bring up test cluster: %s" % (ex,))
  440. fqdn = socket.getfqdn()
  441. webhdfs_url = "http://%s:%s/webhdfs/v1" % (fqdn, cluster.dfs_http_port,)
  442. closers = [
  443. hadoop.conf.HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing(cluster.fs_default_name),
  444. hadoop.conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(webhdfs_url),
  445. hadoop.conf.YARN_CLUSTERS['default'].HOST.set_for_testing(fqdn),
  446. hadoop.conf.YARN_CLUSTERS['default'].PORT.set_for_testing(cluster._rm_port),
  447. hadoop.conf.YARN_CLUSTERS['default'].RESOURCE_MANAGER_API_URL.set_for_testing('http://%s:%s' % (cluster._fqdn, cluster._rm_webapp_port,)),
  448. hadoop.conf.YARN_CLUSTERS['default'].PROXY_API_URL.set_for_testing('http://%s:%s' % (cluster._fqdn, cluster._rm_webapp_port,)),
  449. hadoop.conf.YARN_CLUSTERS['default'].HISTORY_SERVER_API_URL.set_for_testing('%s:%s' % (cluster._fqdn, cluster._jh_web_port,)),
  450. ]
  451. old = hadoop.cluster.clear_caches()
  452. def restore_config():
  453. hadoop.cluster.restore_caches(old)
  454. for x in closers:
  455. x()
  456. cluster.shutdown_hook = restore_config
  457. _shared_cluster = cluster
  458. return _shared_cluster
  459. """
  460. Manual start from the Hue shell.
  461. build/env/bin/hue shell
  462. >
  463. from hadoop import pseudo_hdfs4
  464. pseudo_hdfs4.main()
  465. >
  466. exit() # To shutdown cleanly
  467. """
  468. def main():
  469. logging.basicConfig(level=logging.DEBUG)
  470. cluster = PseudoHdfs4()
  471. cluster.start()
  472. print "%s running" % (cluster,)
  473. print "fs.default.name=%s" % (cluster.fs_default_name,)
  474. print "dfs.http.address=%s" % (cluster.dfs_http_address,)
  475. print "jobtracker.thrift.port=%s" % (cluster.jt_thrift_port,)
  476. print "mapred.job.tracker=%s" % (cluster.mapred_job_tracker,)
  477. from IPython.Shell import IPShellEmbed
  478. IPShellEmbed()()
  479. cluster.stop()