configure-hadoop.sh 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. #!/bin/bash
  2. # Licensed to Cloudera, Inc. under one
  3. # or more contributor license agreements. See the NOTICE file
  4. # distributed with this work for additional information
  5. # regarding copyright ownership. Cloudera, Inc. licenses this file
  6. # to you under the Apache License, Version 2.0 (the
  7. # "License"); you may not use this file except in compliance
  8. # with the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. set -o errexit
  18. if test -z $HADOOP_HOME ; then
  19. echo "Please define your HADOOP_HOME environment variable."
  20. exit
  21. fi
  22. SCRIPT_DIR=$(dirname $0)
  23. GIT_ROOT=$(cd $SCRIPT_DIR && pwd)/$(cd $SCRIPT_DIR && git rev-parse --show-cdup)
  24. BIND_IP=${BIND_IP:-localhost}
  25. HADOOP_TMP_DIR=$HADOOP_HOME/data
  26. if [ $(uname) == "Darwin" ]; then
  27. export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/1.6.0/Home
  28. fi
  29. SLAVE_IPS=${SLAVE_IPS:-127.0.0.1}
  30. echo -e ".======================== Parameters ========================\n"\
  31. "HADOOP_HOME : $HADOOP_HOME\n"\
  32. "GIT_ROOT : $GIT_ROOT\n"\
  33. "HADOOP_TMP_DIR : $HADOOP_TMP_DIR\n"\
  34. "============================================================\n"
  35. set -x
  36. # Configure a slave
  37. # Used by the sandboxer
  38. function write_hadoop_config() {
  39. SLAVE_IP=$1
  40. MASTER_IP=$2
  41. TARGET_DIR=$3
  42. TMP_DIR=$4
  43. if [ -z "$SLAVE_IP" -o -z "$MASTER_IP" -o -z "$TARGET_DIR" -o -z "$TMP_DIR" ]; then
  44. echo usage: $0 slave_ip master_ip target_conf_dir tmp_dir
  45. exit 1
  46. fi
  47. mkdir -p $TARGET_DIR
  48. cp $HADOOP_HOME/example-confs/conf.pseudo/hadoop-metrics.properties $TARGET_DIR
  49. cat > $TARGET_DIR/core-site.xml <<END
  50. <?xml version="1.0"?>
  51. <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  52. <configuration>
  53. <property>
  54. <name>fs.default.name</name>
  55. <value>hdfs://${MASTER_IP}:8020</value>
  56. </property>
  57. <property>
  58. <name>hadoop.tmp.dir</name>
  59. <value>$TMP_DIR</value>
  60. <description>A base for other temporary directories.</description>
  61. </property>
  62. <property>
  63. <name>webinterface.private.actions</name>
  64. <value>true</value>
  65. </property>
  66. <property>
  67. <name>slave.host.name</name>
  68. <value>${SLAVE_IP}</value>
  69. </property>
  70. <property>
  71. <name>dfs.thrift.address</name>
  72. <value>${MASTER_IP}:10090</value>
  73. </property>
  74. <property>
  75. <name>jobtracker.thrift.address</name>
  76. <value>${MASTER_IP}:9290</value>
  77. </property>
  78. </configuration>
  79. END
  80. cat > $TARGET_DIR/hdfs-site.xml <<END
  81. <?xml version="1.0"?>
  82. <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  83. <configuration>
  84. <property>
  85. <name>dfs.datanode.address</name>
  86. <value>${SLAVE_IP}:0</value>
  87. </property>
  88. <property>
  89. <name>dfs.datanode.ipc.address</name>
  90. <value>${SLAVE_IP}:0</value>
  91. </property>
  92. <property>
  93. <name>dfs.datanode.http.address</name>
  94. <value>${SLAVE_IP}:0</value>
  95. </property>
  96. <property>
  97. <name>dfs.http.address</name>
  98. <value>${MASTER_IP}:50070</value>
  99. </property>
  100. <property>
  101. <name>dfs.namenode.plugins</name>
  102. <value>org.apache.hadoop.thriftfs.NamenodePlugin</value>
  103. <description>Comma-separated list of namenode plug-ins to be activated.
  104. </description>
  105. </property>
  106. <property>
  107. <name>dfs.datanode.plugins</name>
  108. <value>org.apache.hadoop.thriftfs.DatanodePlugin</value>
  109. <description>Comma-separated list of datanode plug-ins to be activated.
  110. </description>
  111. </property>
  112. <!-- we dont really care about being super safe -->
  113. <property>
  114. <name>dfs.safemode.min.datanodes</name>
  115. <value>1</value>
  116. </property>
  117. <property>
  118. <name>dfs.safemode.extension</name>
  119. <value>5000</value>
  120. <description>
  121. Determines extension of safe mode in milliseconds
  122. after the threshold level is reached.
  123. </description>
  124. </property>
  125. </configuration>
  126. END
  127. cat > $TARGET_DIR/mapred-site.xml <<END
  128. <?xml version="1.0"?>
  129. <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  130. <configuration>
  131. <property>
  132. <name>mapred.job.tracker</name>
  133. <value>${MASTER_IP}:8021</value>
  134. </property>
  135. <property>
  136. <name>mapred.job.tracker.http.address</name>
  137. <value>${MASTER_IP}:50030</value>
  138. </property>
  139. <property>
  140. <name>mapred.task.tracker.http.address</name>
  141. <value>${SLAVE_IP}:0</value>
  142. </property>
  143. <property>
  144. <name>mapred.jobtracker.plugins</name>
  145. <value>org.apache.hadoop.thriftfs.ThriftJobTrackerPlugin</value>
  146. <description>Comma-separated list of jobtracker plug-ins to be activated.
  147. </description>
  148. </property>
  149. <property>
  150. <name>mapred.system.dir</name>
  151. <value>/hadoop/mapred/system</value>
  152. </property>
  153. <property>
  154. <name>mapred.local.dir</name>
  155. <value>$TMP_DIR/mapred/local</value>
  156. </property>
  157. </configuration>
  158. END
  159. }
  160. # Configure
  161. function configure() {
  162. perl -p -i -e "s,localhost,$BIND_IP,g" desktop/conf/pseudo-distributed.ini
  163. mkdir -p $HADOOP_TMP_DIR
  164. write_hadoop_config $BIND_IP $BIND_IP $HADOOP_HOME/conf $HADOOP_TMP_DIR
  165. idx=0
  166. for slave in $SLAVE_IPS ; do
  167. idx=$[$idx + 1]
  168. datadir=$HADOOP_TMP_DIR-slave-$idx
  169. write_hadoop_config $slave $BIND_IP $HADOOP_HOME/conf-slave-$idx $datadir
  170. done
  171. }
  172. function start() {
  173. pushd $HADOOP_HOME
  174. export HADOOP_CLASSPATH=$GIT_ROOT/desktop/libs/hadoop/java-lib/\*
  175. if [ ! -d $HADOOP_TMP_DIR/dfs/name ]; then
  176. bin/hadoop namenode -format
  177. fi
  178. # Pass HADOOP_OPTS=$JDB_ON to any hadoop-daemon.sh to enable jdb
  179. JDB_ON="-Xdebug -Xrunjdwp:transport=dt_socket,address=8901,server=y,suspend=n"
  180. HADOOP_PID_DIR="$HADOOP_TMP_DIR-pids-master" \
  181. bin/hadoop-daemon.sh start namenode
  182. idx=0
  183. for ip in $SLAVE_IPS ; do
  184. idx=$[$idx + 1]
  185. HADOOP_PID_DIR="$HADOOP_TMP_DIR-pids-slave-$idx" \
  186. HADOOP_CONF_DIR="$HADOOP_HOME/conf-slave-$idx" \
  187. HADOOP_LOG_DIR="$HADOOP_HOME/logs-slave-$idx" \
  188. bin/hadoop-daemon.sh start datanode || true
  189. done
  190. bin/hadoop dfsadmin -safemode wait
  191. HADOOP_PID_DIR="$HADOOP_TMP_DIR-pids-master" \
  192. bin/hadoop-daemon.sh start secondarynamenode
  193. HADOOP_PID_DIR="$HADOOP_TMP_DIR-pids-master" \
  194. bin/hadoop-daemon.sh start jobtracker
  195. idx=0
  196. for ip in $SLAVE_IPS ; do
  197. idx=$[$idx + 1]
  198. HADOOP_PID_DIR="$HADOOP_TMP_DIR-pids-slave-$idx" \
  199. HADOOP_CONF_DIR="$HADOOP_HOME/conf-slave-$idx" \
  200. HADOOP_LOG_DIR="$HADOOP_HOME/logs-slave-$idx" \
  201. bin/hadoop-daemon.sh start tasktracker || true
  202. done
  203. popd
  204. }
  205. function stop() {
  206. pushd $HADOOP_HOME
  207. for daemon in namenode jobtracker; do
  208. HADOOP_PID_DIR="$HADOOP_TMP_DIR-pids-master" \
  209. bin/hadoop-daemon.sh stop $daemon
  210. done
  211. idx=0
  212. for ip in $SLAVE_IPS ; do
  213. idx=$[$idx + 1]
  214. HADOOP_PID_DIR="$HADOOP_TMP_DIR-pids-slave-$idx" \
  215. HADOOP_CONF_DIR="$HADOOP_HOME/conf-slave-$idx" \
  216. bin/hadoop-daemon.sh --config $HADOOP_HOME/conf-slave-$idx stop tasktracker || true
  217. HADOOP_PID_DIR="$HADOOP_TMP_DIR-pids-slave-$idx" \
  218. HADOOP_CONF_DIR="$HADOOP_HOME/conf-slave-$idx" \
  219. bin/hadoop-daemon.sh --config $HADOOP_HOME/conf-slave-$idx stop datanode || true
  220. done
  221. popd
  222. }
  223. pushd $GIT_ROOT
  224. if [ ! -d .git ]; then
  225. echo "This script moves into your git root, but this has failed."
  226. exit 1
  227. fi
  228. if [ $# -eq 0 ]; then
  229. echo "Usage: $0 (all|checkout|build|build_jobsub|build_plugins|configure|start)"
  230. exit 1
  231. fi
  232. ARG=$1
  233. shift 1
  234. case $ARG
  235. in
  236. configure|start|write_hadoop_config|stop)
  237. $ARG $@
  238. ;;
  239. all)
  240. configure
  241. start
  242. ;;
  243. *)
  244. echo "Unrecognized: $ARG"
  245. exit 1
  246. ;;
  247. esac