HADOOP+ZOOKEEPER+HBASE測試搭建

三個節點環境變量


export PATH
export JAVA_HOME=/usr/local/jdk1.8.0_191
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
export HADOOP_HOME=/usr/local/hadoop-3.1.1
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

互信

[root@master ~]# ssh-keygen -t dsa
[root@master ~]# cd .ssh
[root@master .ssh]# ls
id_dsa  id_dsa.pub  known_hosts
[root@master .ssh]# cat *.pub > authorized_keys
[root@master .ssh]# ssh 192.168.157.22 cat ~/.ssh/*.pub >> authorized_keys
[root@master .ssh]# ssh 192.168.157.33 cat ~/.ssh/*.pub >> authorized_keys
[root@master .ssh]# scp authorized_keys 192.168.157.22:~/.ssh/.   
[root@master .ssh]# scp authorized_keys 192.168.157.33:~/.ssh/.  

解壓java

tar -xvf jdk-8u191-linux-x64.tar -C /usr/local

解壓hadoop

tar -zxvf hadoop-3.1.1.tar.gz -C /usr/local/

三個節點創建目錄

mkdir -p /data/hadoop/hdfs/name /data/hadoop/hdfs/data /var/log/hadoop/tmp

vi /usr/local/hadoop-3.1.1/etc/hadoop/hadoop-env.sh

export JAVA_HOME=/usr/local/jdk1.8.0_191
export HADOOP_HOME=/usr/local/hadoop-3.1.1
export HDFS_DATANODE_USER=root
export HADOOP_SECURE_DN_USER=hdfs
export HDFS_NAMENODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export HADOOP_SECURE_DN_USER=yarn
export YARN_NODEMANAGER_USER=root

vi /usr/local/hadoop-3.1.1/etc/hadoop/yarn-env.sh

export JAVA_HOME=/usr/local/jdk1.8.0_191

vi /usr/local/hadoop-3.1.1/etc/hadoop/workers

slave1
slave2

vi /usr/local/hadoop-3.1.1/etc/hadoop/core-site.xml

<configuration>
        <property>
                <name>fs.defaultFS</name>
                <value>hdfs://192.168.157.11:9000</value>
        </property>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/var/log/hadoop/tmp</value>
        </property>
</configuration>

vi /usr/local/hadoop-3.1.1/etc/hadoop/hdfs-site.xml

<configuration>
    <property>
        <name>dfs.name.dir</name>
        <value>/data/hadoop/hdfs/name</value>
        <description>
            Path on the local filesystem where theNameNode stores the namespace and transactions logs persistently.
        </description>
    </property>
    <property>
        <name>dfs.data.dir</name>
        <value>/data/hadoop/hdfs/data</value>
        <description>
            Comma separated list of paths on the localfilesystem of a DataNode where it should store itsblocks.
        </description>
    </property>
    <property>
        <name>dfs.namenode.http-address</name>
        <value>192.168.157.11:50070</value>
    </property>
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>192.168.157.11:50090</value>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>3</value>
    </property>
    <property>
        <name>dfs.permissions</name>
        <value>false</value>
        <description>need not permissions</description>
    </property>
</configuration>  

[root@master hadoop]# hadoop classpath

/usr/local/hadoop-3.1.1/etc/hadoop:/usr/local/hadoop-3.1.1/share/hadoop/common/lib/*:/usr/local/hadoop-3.1.1/share/hadoop/common/*:/usr/local/hadoop-3.1.1/share/hadoop/hdfs:/usr/local/hadoop-3.1.1/share/hadoop/hdfs/lib/*:/usr/local/hadoop-3.1.1/share/hadoop/hdfs/*:/usr/local/hadoop-3.1.1/share/hadoop/mapreduce/lib/*:/usr/local/hadoop-3.1.1/share/hadoop/mapreduce/*:/usr/local/hadoop-3.1.1/share/hadoop/yarn:/usr/local/hadoop-3.1.1/share/hadoop/yarn/lib/*:/usr/local/hadoop-3.1.1/share/hadoop/yarn/*

[root@master hadoop]# vi /usr/local/hadoop-3.1.1/etc/hadoop/yarn-site.xml

<configuration>
    <!-- Site specific YARN configuration properties -->
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>master</value>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.application.classpath</name>
        <value>/usr/local/hadoop-3.1.1/etc/hadoop:/usr/local/hadoop-3.1.1/share/hadoop/common/lib/*:/usr/local/hadoop-3.1.1/share/hadoop/common/*:/usr/local/hadoop-3.1.1/share/hadoop/hdfs:/usr/local/hadoop-3.1.1/share/hadoop/hdfs/lib/*:/usr/local/hadoop-3.1.1/share/hadoop/hdfs/*:/usr/local/hadoop-3.1.1/share/hadoop/mapreduce/lib/*:/usr/local/hadoop-3.1.1/share/hadoop/mapreduce/*:/usr/local/hadoop-3.1.1/share/hadoop/yarn:/usr/local/hadoop-3.1.1/share/hadoop/yarn/lib/*:/usr/local/hadoop-3.1.1/share/hadoop/yarn/*</value>
    </property>
</configuration>


vi /usr/local/hadoop-3.1.1/etc/hadoop/mapred-site.xml

<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapreduce.application.classpath</name>
        <value>
            $HADOOP_HOME/etc/hadoop,
            $HADOOP_HOME/share/hadoop/common/*,
            $HADOOP_HOME/share/hadoop/common/lib/*,
            $HADOOP_HOME/share/hadoop/hdfs/*,
            $HADOOP_HOME/share/hadoop/hdfs/lib/*,
            $HADOOP_HOME/share/hadoop/mapreduce/*,
            $HADOOP_HOME/share/hadoop/mapreduce/lib/*,
            $HADOOP_HOME/share/hadoop/yarn/*,
            $HADOOP_HOME/share/hadoop/yarn/lib/*
        </value>
    </property>
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>192.168.157.11:10020</value>
    </property>
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>192.168.157.11:19888</value>
    </property>
</configuration>

將配置完的hadoop目錄傳到兩個slave節點

[root@master hadoop]# scp -r /usr/local/hadoop-3.1.1  slave1:/usr/local/
[root@master hadoop]# scp -r /usr/local/hadoop-3.1.1  slave2:/usr/local/

hdfsnamenode格式化

[root@master hadoop]# hdfs namenode -format
[root@master hadoop]# start-all.sh
datanode上執行
[root@slave1 hadoop]# jps
37393 NodeManager
37555 Jps
37194 DataNode

[root@slave2 local]# jps
5137 Jps
4777 DataNode
4973 NodeManager

驗證

http://192.168.157.11:8088/cluster
http://192.168.157.11:50070/dfshealth.html#tab-overview

安裝zookeeper

解壓

tar -zxvf zookeeper-3.4.13.tar.gz -C /usr/local/

創建目錄

[root@master zookeeper-3.4.13]# mkdir -p /usr/local/zookeeper-3.4.13/data
[root@master zookeeper-3.4.13]# mkdir -p /usr/local/zookeeper-3.4.13/logs

配置zoo.cfg 

[root@master conf]# cp /usr/local/zookeeper-3.4.13/conf/zoo_sample.cfg /usr/local/zookeeper-3.4.13/conf/zoo.cfg
[root@master conf]# vi zoo.cfg 
clientPort=2181
dataDir=/usr/local/zookeeper-3.4.13/data
dataLogDir=/usr/local/zookeeper-3.4.13/logs
server.1=master:2881:3881
server.2=slave1:2881:3881
server.3=slave2:2881:3881

傳輸

scp -r /usr/local/zookeeper-3.4.13/ slave1:/usr/local/
scp -r /usr/local/zookeeper-3.4.13/ slave2:/usr/local/

各節點在 /usr/local/zookeeper-3.4.13/data內創建myid

[root@master data]# vi myid
1
[root@slave1 data]# vi myid
2
[root@slave2 data]# vi myid
3

修改環境變量

export PATH
export JAVA_HOME=/usr/local/jdk1.8.0_191
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
export HADOOP_HOME=/usr/local/hadoop-3.1.1
export ZOO_HOME=/usr/local/zookeeper-3.4.13
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZOO_HOME/bin:$ZOO_HOME/

各節點啓動zookeeper

[root@master ~]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.13/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@master ~]# jps
5360 NameNode
7766 Jps
5704 SecondaryNameNode
7741 QuorumPeerMain
5999 ResourceManager

slave1
[root@slave1 ~]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.13/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@slave1 ~]# jps
37393 NodeManager
39271 Jps
37194 DataNode
39231 QuorumPeerMain

slave2
[root@slave2 ~]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.13/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@slave2 ~]# jps
6674 Jps
4777 DataNode
4973 NodeManager
6638 QuorumPeerMain

安裝hbase

解壓

tar -zxvf hbase-1.3.1-bin.tar.gz -C /usr/local/

創建目錄

mkdir -p /usr/local/hbase-1.3.1/log
mkdir -p /usr/local/hbase-1.3.1/tmp

[root@master conf]# vi /usr/local/hbase-1.3.1/conf/hbase-env.sh

export JAVA_HOME=/usr/local/jdk1.8.0_191
export HBASE_MANAGES_ZK=false
export HADOOP_HOME=/usr/local/hadoop-3.1.1
export ZOO_HOME=/usr/local/zookeeper-3.4.13
export HBASE_LOG_DIR=usr/local/hbase-1.3.1/log

[root@master conf]# vi /usr/local/hbase-1.3.1/conf/hbase-site.xml

<configuration>
        <property>
            <name>hbase.rootdir</name>
            <value>hdfs://192.168.157.11:9000/hbase</value>
        </property>

        <property>
            <name>hbase.cluster.distributed</name>
            <value>true</value>
        </property>

        <property>
            <name>hbase.master</name>
            <value>60000</value>
        </property>

        <property>
            <name>hbase.tmp.dir</name>
            <value>/usr/local/hbase-1.3.1/tmp</value>
        </property>

        <property>
            <name>hbase.zookeeper.quorum</name>
            <value>master,slave1,slave2</value>
        </property>

        <property>
            <name>hbase.zookeeper.property.dataDir</name>
            <value>/usr/local/zookeeper-3.4.13/data</value>
        </property>

        <property>
            <name>hbase.zookeeper.property.clientPort</name>
            <value>2181</value>
        </property>

        <property>
            <name>zookeeper.session.timeout</name>
            <value>120000</value>
        </property>

        <property>
            <name>hbase.regionserver.restart.on.zk.expire</name>
            <value>true</value>
        </property>
</configuration>

配置regionservers 

[root@master conf]# vi regionservers 
slave1
slave2

傳輸

[root@master conf]# scp -r /usr/local/hbase-1.3.1/ slave1:/usr/local/
[root@master conf]# scp -r /usr/local/hbase-1.3.1/ slave2:/usr/local/

環境變量

export PATH
export JAVA_HOME=/usr/local/jdk1.8.0_191
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
export HADOOP_HOME=/usr/local/hadoop-3.1.1
export ZOO_HOME=/usr/local/zookeeper-3.4.13
export HBASE_HOME=/usr/local/hbase-1.3.1
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZOO_HOME/bin:$ZOO_HOME/sbin:$HBASE_HOME/bin:$HBASE_HOME/sbin

啓動hbase

[root@master ~]# start-hbase.sh
starting master, logging to usr/local/hbase-1.3.1/log/hbase-root-master-master.out
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0
slave2: starting regionserver, logging to usr/local/hbase-1.3.1/log/hbase-root-regionserver-slave2.out
slave1: starting regionserver, logging to usr/local/hbase-1.3.1/log/hbase-root-regionserver-slave1.out
slave1: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
slave1: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0
slave2: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
slave2: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0

驗證

[root@master ~]# jps
5360 NameNode
5704 SecondaryNameNode
7741 QuorumPeerMain
9053 HMaster
9326 Jps
5999 ResourceManager

[root@slave1 conf]# jps
40080 Jps
37393 NodeManager
39828 HRegionServer
37194 DataNode
39231 QuorumPeerMain

[root@slave2 conf]# jps
4777 DataNode
7418 HRegionServer
7676 Jps
4973 NodeManager
6638 QuorumPeerMain

[root@master ~]# hbase shell
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/hbase-1.3.1/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/hadoop-3.1.1/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
HBase Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 1.3.1, r930b9a55528fe45d8edce7af42fef2d35e77677a, Thu Apr  6 19:36:54 PDT 2017

hbase(main):001:0> 


 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章