大數據集羣之hbase2.1.0高可用安裝配置

一、安裝準備

下載地址:http://archive.apache.org/dist/hbase/

參考文檔:http://hbase.apache.org/book.html

ssh免密登錄:https://blog.csdn.net/qq262593421/article/details/105325593

zookeeper安裝:https://blog.csdn.net/qq262593421/article/details/106955485

hadoop集羣安裝:https://blog.csdn.net/qq262593421/article/details/106956480

二、解壓安裝

1、解壓文件

cd /usr/local/hadoop
tar zxpf hbase-2.1.0-bin.tar.gz

2、創建軟鏈接

ln -s hbase-2.1.0 hbase

三、修改配置文件

regionservers

regionservers配置和hadoop的work一樣,hadoop的DataNode節點是哪個regionservers就是哪幾個

hadoop003
hadoop004
hadoop005

hbase-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
	<!-- 
	<property>
     <name>zookeeper.znode.parent</name>
     <value>/hbase/master</value>
 	</property>
 	-->
	<property>	
	  <name>hbase.master</name>
	  <value>60000</value>
	  <!-- Hbase HA 方式下只需配置端口 -->
    </property>
  	<property>
      <name>hbase.tmp.dir</name>
      <value>/data/cluster/hbase/tmp</value>
  	</property>
  	<property>
	  <name>hbase.rootdir</name>
	  <value>hdfs://ns1/hbase</value>
	</property>
	<property>
	  <name>hbase.cluster.distributed</name>
	  <value>true</value>
	</property>
	<property>
	  <name>hbase.zookeeper.property.clientPort</name>
	  <value>2181</value>
	</property>
	<property>
	  <name>hbase.zookeeper.quorum</name>
	  <value>hadoop001,hadoop002,hadoop003</value>
	  <!-- <value>hadoop001:2181,hadoop002:2181,hadoop003:2181</value> -->
	</property>
	<property>
	  <name>hbase.zookeeper.property.dataDir</name>
	  <value>/usr/local/zookeeper/data</value>
	</property>
	<property>
	  <name>dfs.datanode.max.transfer.threads</name>
	  <value>4096</value>
	</property>
	<!-- 
	<property>
	  <name>hbase.master</name>
	  <value>hadoop1</value>
	</property>
	-->
	<!-- 
	<property>
      <name>hbase.masters</name>
      <value>hadoop1,hadoop2</value>
      <description>List of master rpc end points for the hbase cluster.</description>
    </property>
    -->
    <property>
	  <name>hbase.unsafe.stream.capability.enforce</name>
	  <value>false</value>
	</property>
	<!-- 
	<property>
	  <name>hbase.lease.recovery.dfs.timeout</name>
	  <value>23000</value>
	  <description>How much time we allow elapse between calls to recover lease.
	  Should be larger than the dfs timeout.</description>
	</property>
	<property>
	  <name>dfs.client.socket-timeout</name>
	  <value>10000</value>
	  <description>Down the DFS timeout from 60 to 10 seconds.</description>
	</property>
	<property>
	  <name>dfs.client.socket-timeout</name>
	  <value>10000</value>
	  <description>Down the DFS timeout from 60 to 10 seconds.</description>
	</property>
	<property>
	  <name>dfs.datanode.socket.write.timeout</name>
	  <value>10000</value>
	  <description>Down the DFS timeout from 8 * 60 to 10 seconds.</description>
	</property>
	<property>
	  <name>ipc.client.connect.timeout</name>
	  <value>3000</value>
	  <description>Down from 60 seconds to 3.</description>
	</property>
	-->

	<!-- 
	<property>
	  <name>ipc.client.connect.max.retries.on.timeouts</name>
	  <value>2</value>
	  <description>Down from 45 seconds to 3 (2 == 3 retries).</description>
	</property>
	<property>
	  <name>dfs.namenode.avoid.read.stale.datanode</name>
	  <value>true</value>
	  <description>Enable stale state in hdfs</description>
	</property>
	<property>
	  <name>dfs.namenode.stale.datanode.interval</name>
	  <value>20000</value>
	  <description>Down from default 30 seconds</description>
	</property>
	<property>
	  <name>dfs.namenode.avoid.write.stale.datanode</name>
	  <value>true</value>
	  <description>Enable stale state in hdfs</description>
	</property>
	-->

	<!-- 
	<property>
	  <name>hbase.security.authentication</name>
	  <value>simple</value>
	</property>
	<property>
	  <name>hbase.security.authorization</name>
	  <value>true</value>
	</property>
	<property>
	  <name>hbase.coprocessor.master.classes</name>
	  <value>org.apache.hadoop.hbase.security.access.AccessController</value>
	</property>
	<property>
	  <name>hbase.coprocessor.region.classes</name>
	  <value>org.apache.hadoop.hbase.security.access.AccessController</value>
	</property>
	<property>
	  <name>hbase.coprocessor.regionserver.classes</name>
	  <value>org.apache.hadoop.hbase.security.access.AccessController</value>
	</property>
	<property>
	  <name>hbase.security.authentication</name>
	  <value>simple</value>
	</property>
	<property>
	  <name>hbase.rpc.engine</name>
	  <value>org.apache.hadoop.hbase.ipc.SecureRpcEngine</value>
	</property>
	-->

	<!-- HFile v3 Support -->
	<property>
	  <name>hfile.format.version</name>
	  <value>3</value>
	</property>
	<!-- HBase Superuser -->
	<property>
	  <name>hbase.superuser</name>
	  <value>hbase,admin,root,hdfs,zookeeper,hive,hadoop,hue,impala,spark,kylin</value>
	</property>
	
	<!-- geomesa-hbase -->
	<property>
    	<name>hbase.coprocessor.user.region.classes</name>
    	<value>org.locationtech.geomesa.hbase.coprocessor.GeoMesaCoprocessor</value>
  	</property>
	<property>  
	   <name>hbase.table.sanity.checks</name>  
	   <value>false</value>  
	</property>
	<property>  
	   <name>hbase.coprocessor.abortonerror</name>  
	   <value>false</value>  
	</property>

	<!-- adjust and optimize --> 
	<property>  
	   <name>hfile.block.cache.size</name> 
	   <value/>
	<!--    <value>0.2</value>   -->
	   <description>stofile的讀緩存佔用Heap的大小百分比。該值直接影響數據讀的性能當然是越大越好,如果寫比讀少很多,開到0.4-0.5也沒問題,如果讀寫均衡,設置爲0.3左右。如果寫比讀多,果斷使用默認就行。</description>
	</property>


</configuration>

hbase-env.sh

hbase環境變量配置

#!/usr/bin/env bash

export HBASE_OPTS="-XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:/usr/local/hadoop/hbase/logs/jvm-gc-hbase.log"

export JAVA_HOME=/usr/java/jdk1.8
export HBASE_HEAPSIZE=4G

export HADOOP_HOME=/usr/local/hadoop/hadoop
export HBASE_HOME=/usr/local/hadoop/hbase
export HBASE_CLASSPATH=/usr/local/hadoop/hadoop/etc/hadoop
export HBASE_MANAGES_ZK=false
export HBASE_PID_DIR=/var/hadoop/pids

backup-masters

啓動hbase時會將配置的backup-masters節點作爲備用HMaster

hadoop001
hadoop002

四、環境變量配置

編輯 /etc/profile 文件

vim /etc/profile

添加一下內容

export HBASE_HOME=/usr/local/hadoop/hbase
export PATH=$PATH:$HBASE_HOME

 五、啓動hbase

start-hbase.sh

六、驗證安裝

1、執行shell命令

hbase shell
create 'tb1','cmf1','cmf2','cmf3'
list
list_namespace

 

2、訪問web頁面 

http://hadoop001:16010/master-status

http://hadoop002:16010/master-status

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章