hadoop安裝


安裝JDK
安裝Hadoop
配置環境變量
配置core-site.xml
配置hdfs-site.xml
配置mapred-site.xml
配置yarn-site.xml
配置slaves

安裝JDK

cd /usr/local/src
wget http://download.oracle.com/otn-pub/java/jdk/8u73-b02/jdk-8u73-linux-x64.tar.gz?AuthParam=1458008151_64a44ef61864b914ee2cb5adb5a1ffb4
tar -zxf jdk-8u73-linux-x64.tar.gz
mv jdk1.8.0_73/ /usr/local/
vim /etc/profile.d/java.sh

寫入:

 JAVA_HOME=/usr/local/jdk1.8.0_73/ 
 JAVA_BIN=/usr/local/jdk1.8.0_73/bin 
 JRE_HOME=/usr/local/jdk1.8.0_73/jre 
 PATH=$PATH:/usr/local/jdk1.8.0_73/bin:/usr/local/jdk1.8.0_73/jre/bin 
 CLASSPATH=/usr/local/jdk1.8.0_73/jre/lib:/usr/local/jdk1.8.0_73/lib:/usr/local/jdk1.8.0_73/jre/lib/charsets.jar 
 export  JAVA_HOME  JAVA_BIN JRE_HOME  PATH  CLASSPATH
source /etc/profile.d/java.sh

修改三臺服務器hostname./etc/hosts ./etc/sysconfig/network

IPHOSTNAME節點信息
172.16.1.212h1NameNode HMaster SecondaryNameNode ResourceManager zookeeper
172.16.1.213h2DataNode HRegionServer NodeManager ResourceManager zookeeper
172.16.1.214h3DataNode HRegionServer NodeManager ResourceManager zookeeper
h1 h2 h3分別生成ssh公鑰`ssh-keygen -t rsa`
[root@h2 ~]# scp /root/.ssh/id_rsa.pub root@h1:~/h2pub
[root@h3 ~]# scp /root/.ssh/id_rsa.pub root@h1:~/h3pub

[root@h1 ~]# cat ~/.ssh/id_rsa.pub ~/h2pub ~/h3pub > ~/.ssh/authorized_keys
[root@h1 ~]# scp ~/.ssh/authorized_keys root@h2:~/.ssh/authorized_keys
[root@h1 ~]# scp ~/.ssh/authorized_keys root@h3:~/.ssh/authorized_keys

安裝Hadoop

mkdir /home/hadoop
cd !$
wget http://apache.opencas.org/hadoop/common/hadoop-2.6.3/hadoop-2.6.3.tar.gz
tar -zxvf hadoop-2.6.3.tar.gz
mv hadoop-2.6.3 hadoop

配置環境變量

vim ~/.bashrc
加入

#Hadoop Environment Variables
export HADOOP_HOME=/home/hadoop/hadoop
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin

source ~/.bashrc

配置core-site.xml

vim /home/hadoop/hadoop/etc/hadoop/core-site.xml
 <configuration>
 <property>
    <name>fs.defaultFS</name>
    <value>hdfs://h1:9000</value>
 </property>
 <property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
 </property>
 <property>
    <name>hadoop.tmp.dir</name>
    <value>file:/home/hadoop/hadoop/tmp</value>
 </property>
 <property>
    <name>hadoop.proxyuser.hduser.hosts</name>
    <value>*</value>
 </property>
 <property>
    <name>hadoop.proxyuser.hduser.groups</name>
    <value>*</value>
 </property>
 <property>
    <name>io.native.lib.available</name>
    <value>true</value>
 </property>
</configuration>

配置hdfs-site.xml

vim /home/hadoop/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
 <property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>h1:9011</value>
 </property>
 <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:/home/hadoop/hadoop/dfs/name</value>
 </property>
 <property>
    <name>dfs.datanode.data.dir</name>
    <value>file:/home/hadoop/hadoop/dfs/data</value>
 </property>
<property>
 <name>hadoop.tmp.dir</name>
 <value>/home/hadoop/hadoop/tmp</value>
 <description>A base for other temporary directories.</description>
</property>
 <property>
    <name>dfs.replication</name>
    <value>2</value>
 </property>
</configuration>

配置mapred-site.xml

mv mapred-site.xml.template mapred-site.xml
vim mapred-site.xml
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>h1:9001</value>
</property>
</configuration>

配置yarn-site.xml

<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>h1</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>h1:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>h1:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>h1:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>h1:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>h1:8088</value>
</property>
</configuration>

配置slaves

在裏面添加DataNode

[root@h1 hadoop]# vim slaves
h2
h3

將core-site.xml 傳到h2.h3

[root@h1 hadoop]# scp core-site.xml root@h2:/home/hadoop/hadoop/etc/hadoop
[root@h1 hadoop]# scp core-site.xml root@h3:/home/hadoop/hadoop/etc/hadoop

格式化NameNode,並啓動hadoop

[root@h1 hadoop]# /home/hadoop/hadoop/bin/hdfs namenode -format
[root@h1 hadoop]# sbin/start-all.sh

jps查看啓動項目
Web訪問172.16.1.212:50070查看hadoop狀態

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章