redis-cluster
# 配置節點配置文件
cp redis.conf 6380.conf
vim 6380.conf
# 設置節點端口
port 6380
# 開啓當前節點的集羣模式
cluster-enabled yes
# 設置保存節點配置文件的路徑
cluster-config-file nodes-6380.conf
cluster-node-timeout 5000
appendonly yes
# 通過多次複製,創建多個節點配置文件,修改使其監聽不同的端口
[root@k8s-node01 redis-cluster]# cp 6380.conf 6381.conf
[root@k8s-node01 redis-cluster]# cp 6380.conf 6382.conf
[root@k8s-node01 redis-cluster]# cp 6380.conf 6383.conf
[root@k8s-node01 redis-cluster]# cp 6380.conf 6384.conf
[root@k8s-node01 redis-cluster]# cp 6380.conf 6385.conf
[root@k8s-node01 redis-cluster]# cd ../redis-cluster/
[root@k8s-node01 redis-cluster]# mkdir 638{0..5}
[root@k8s-node01 redis-cluster]# ls
6380 6380.conf 6381 6381.conf 6382 6382.conf 6383 6383.conf 6384 6384.conf 6385 6385.conf redis.conf
[root@k8s-node01 redis-cluster]# mv 6380.conf 6380
[root@k8s-node01 redis-cluster]# mv 6381.conf 6381
[root@k8s-node01 redis-cluster]# mv 6382.conf 6382
[root@k8s-node01 redis-cluster]# mv 6383.conf 6383
[root@k8s-node01 redis-cluster]# mv 6384.conf 6384
[root@k8s-node01 redis-cluster]# mv 6385.conf 6385
[root@k8s-node01 redis-cluster]# ls
6380 6381 6382 6383 6384 6385 redis.conf
# 使用編譯安裝好的redis服務的redis-server程序依次啓動6個節點的配置文件
[root@k8s-node01 ~]# cd /usr/local/src/redis-5.0.8
[root@k8s-node01 redis-5.0.8]# ./src/redis-server ../redis-cluster/6380/6380.conf
24861:C 16 Jun 2020 13:09:20.641 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
24861:C 16 Jun 2020 13:09:20.642 # Redis version=5.0.8, bits=64, commit=00000000, modified=0, pid=24861, just started
24861:C 16 Jun 2020 13:09:20.642 # Configuration loaded
24861:M 16 Jun 2020 13:09:20.642 * Increased maximum number of open files to 10032 (it was originally set to 1024).
24861:M 16 Jun 2020 13:09:20.643 * No cluster configuration found, I'm 467be73fbfb59aef9a78ab6734084e4e4a12d96d
_._
_.-``__ ''-._
_.-`` `. `_. ''-._ Redis 5.0.8 (00000000/0) 64 bit
.-`` .-```. ```\/ _.,_ ''-._
( ' , .-` | `, ) Running in cluster mode
|`-._`-...-` __...-.``-._|'` _.-'| Port: 6380
| `-._ `._ / _.-' | PID: 24861
`-._ `-._ `-./ _.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' | http://redis.io
`-._ `-._`-.__.-'_.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
24861:M 16 Jun 2020 13:09:22.220 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
24861:M 16 Jun 2020 13:09:22.220 # Server initialized
24861:M 16 Jun 2020 13:09:22.220 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
24861:M 16 Jun 2020 13:09:22.221 * Ready to accept connections
#/usr/local/src/redis-5.0.8/src/redis-server /usr/local/src/redis-cluster/6381/6381.conf
# 檢測
[root@k8s-node01 redis-5.0.8]# netstat -anlp| grep redis
tcp 0 0 127.0.0.1:16380 0.0.0.0:* LISTEN 24861/./src/redis-s
tcp 0 0 127.0.0.1:16381 0.0.0.0:* LISTEN 24902/redis-server
tcp 0 0 127.0.0.1:16382 0.0.0.0:* LISTEN 24906/redis-server
tcp 0 0 127.0.0.1:16383 0.0.0.0:* LISTEN 24911/redis-server
tcp 0 0 127.0.0.1:16384 0.0.0.0:* LISTEN 24915/redis-server
tcp 0 0 127.0.0.1:16385 0.0.0.0:* LISTEN 24939/redis-server
tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 21252/./redis-serve
tcp 0 0 127.0.0.1:6380 0.0.0.0:* LISTEN 24861/./src/redis-s
tcp 0 0 127.0.0.1:6381 0.0.0.0:* LISTEN 24902/redis-server
tcp 0 0 127.0.0.1:6382 0.0.0.0:* LISTEN 24906/redis-server
tcp 0 0 127.0.0.1:6383 0.0.0.0:* LISTEN 24911/redis-server
tcp 0 0 127.0.0.1:6384 0.0.0.0:* LISTEN 24915/redis-server
tcp 0 0 127.0.0.1:6385 0.0.0.0:* LISTEN 24939/redis-server
tcp6 0 0 :::6379 :::* LISTEN 21252/./redis-serve
# 建立集羣
[root@k8s-node01 redis-5.0.8]# /usr/local/src/redis-5.0.8/src/redis-cli --cluster create 127.0.0.1:6381 127.0.0.1:6382 127.0.0.1:6383 127.0.0.1:6384 127.0.0.1:6385 127.0.0.1:6380 --cluster-replicas 1 127.0.0.1:6380
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 127.0.0.1:6385 to 127.0.0.1:6381
Adding replica 127.0.0.1:6380 to 127.0.0.1:6382
Adding replica 127.0.0.1:6384 to 127.0.0.1:6383
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: 632eba86cf41797baaf730a13ae8d29249a7de01 127.0.0.1:6381
slots:[0-5460] (5461 slots) master
M: 3597b8d02624087464b0312032d6eb447fed3b92 127.0.0.1:6382
slots:[5461-10922] (5462 slots) master
M: 7317d5b19bdc066a71187fff42070c8635fce381 127.0.0.1:6383
slots:[10923-16383] (5461 slots) master
S: 80499d1b9f025af6f691d13e5074378e26408c1e 127.0.0.1:6384
replicates 7317d5b19bdc066a71187fff42070c8635fce381
S: ada02b816691e1ea6cc4c8b0c26766f673ec507f 127.0.0.1:6385
replicates 632eba86cf41797baaf730a13ae8d29249a7de01
S: 467be73fbfb59aef9a78ab6734084e4e4a12d96d 127.0.0.1:6380
replicates 3597b8d02624087464b0312032d6eb447fed3b92
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
....
>>> Performing Cluster Check (using node 127.0.0.1:6381)
M: 632eba86cf41797baaf730a13ae8d29249a7de01 127.0.0.1:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: 467be73fbfb59aef9a78ab6734084e4e4a12d96d 127.0.0.1:6380
slots: (0 slots) slave
replicates 3597b8d02624087464b0312032d6eb447fed3b92
M: 7317d5b19bdc066a71187fff42070c8635fce381 127.0.0.1:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: 3597b8d02624087464b0312032d6eb447fed3b92 127.0.0.1:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: 80499d1b9f025af6f691d13e5074378e26408c1e 127.0.0.1:6384
slots: (0 slots) slave
replicates 7317d5b19bdc066a71187fff42070c8635fce381
S: ada02b816691e1ea6cc4c8b0c26766f673ec507f 127.0.0.1:6385
slots: (0 slots) slave
replicates 632eba86cf41797baaf730a13ae8d29249a7de01
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
# 檢測
127.0.0.1:6381> set dfq duanfuqiang
OK
127.0.0.1:6381> get dfq
"duanfuqiang"
127.0.0.1:6381> CLUSTER INFO
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:561
cluster_stats_messages_pong_sent:568
cluster_stats_messages_sent:1129
cluster_stats_messages_ping_received:563
cluster_stats_messages_pong_received:561
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:1129
數據持久化
參考地址:
http://www.chenxm.cc/article/38.html
RDB
RDB 持久化可以在指定的時間間隔內生成數據集的時間點快照(point-in-time snapshot)。
save(不推薦)
手動觸發:
[root@k8s-node01 6386]# /usr/local/src/redis-5.0.8/src/redis-cli -c -p 6386
127.0.0.1:6386> set dfq duanfuqiang1
OK
127.0.0.1:6386> get dfq
"duanfuqiang1"
127.0.0.1:6386> save
OK
127.0.0.1:6386> quit
[root@k8s-node01 6386]# ls
6386.conf dump.rdb
[root@k8s-node01 6386]# cat dump.rdb
REDIS0009 redis-ver5.0.8
自動觸發:
vim 6386.conf
save 900 1
save 300 10
save 60 10000
dbfilename dump.rdb
appendfsync everysec
dir /usr/local/src/redis-cluster/6386/
bgsave
手動觸發:
[root@k8s-node01 6386]# /usr/local/src/redis-5.0.8/src/redis-cli -c -p 6386
127.0.0.1:6386> BGSAVE
Background saving started
127.0.0.1:6386> quit
[root@k8s-node01 6386]# ls
6386.conf dump.rdb
AOF
AOF 持久化記錄服務器執行的所有寫操作命令,並在服務器啓動時,通過重新執行這些命令來還原數據集
vim 6386.conf
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
dir /usr/local/src/redis-cluster/6386/
# 指定該文件,重啓server生效
[root@k8s-node01 6386]# ls
6386.conf appendonly.aof dump.rdb
哨兵模式
參考地址:
https://www.cnblogs.com/L-Test/p/11626124.html