centos7 部署redis 哨兵模式测试

centos7 部署redis 哨兵模式测试,第1张

centos7 部署redis 哨兵模式测试

redis哨兵模式 (本测试用的是redis-6.2.5.tar.gz)

要求:

三台机器
192.168.1.41  (主)
192.168.1.42  (从)
192.168.1.43  (从)
systemctl stop firewalld
systemctl disable firewalld
setenforce  0 
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux 
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux 
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config 

1.首先配置redis的主从配置

安装依赖(主节点安装)
yum -y install gcc gcc-c++
tar -zxvf redis-6.2.5.tar.gz -C  /usr/local/
cd /usr/local/redis-6.2.5
make MALLOC=libc
make&&make install
cd ..
scp -r redis-6.2.5/ root@192.168.1.42:/usr/local/
scp -r redis-6.2.5/ root@192.168.1.43:/usr/local/

配置文件redis.conf (主库一台)

#vi /usr/local/redis-6.2.5/redis.conf
#删除里面配置文件,填入下面配置文件在主节点上
protected-mode yes
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
loglevel notice
databases 16
always-show-logo no
set-proc-title yes
proc-title-template "{title} {listen-addr} {server-mode}"
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
rdb-del-sync-files no
dir "/usr/local/redis-6.2.5"
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-diskless-load disabled
repl-disable-tcp-nodelay no
replica-priority 100
acllog-max-len 128
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
lazyfree-lazy-user-del no
lazyfree-lazy-user-flush no
oom-score-adj no
oom-score-adj-values 0 200 800
disable-thp yes
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
jemalloc-bg-thread yes
bind 0.0.0.0
port 6379
daemonize yes
pidfile /var/run/redis_6379.pid
requirepass 123456 
masterauth 123456 #密码自己搞
logfile "/var/log/redis-6379.log"

从库配置文件redis.conf (从库两台)

protected-mode yes
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
loglevel notice
databases 16
always-show-logo no
set-proc-title yes
proc-title-template "{title} {listen-addr} {server-mode}"
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
rdb-del-sync-files no
dir "/usr/local/redis-6.2.5"
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-diskless-load disabled
repl-disable-tcp-nodelay no
replica-priority 100
acllog-max-len 128
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
lazyfree-lazy-user-del no
lazyfree-lazy-user-flush no
oom-score-adj no
oom-score-adj-values 0 200 800
disable-thp yes
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
jemalloc-bg-thread yes
bind 0.0.0.0
port 6379
daemonize yes
pidfile /var/run/redis_6379.pid
requirepass 123456 
masterauth 123456 #密码自己搞
logfile "/var/log/redis-6379.log"
slaveof 192.168.1.41 6379 #写上自己主库的ip

依次启动redis

先启动master 节点 然后依次启动slave节点

可以把redis的命令
cd /usr/local/redis-6.2.5/src
cp redis-cli /usr/local/bin/
这样子就可以全局调用命令了
cd /usr/local/redis-6.2.5/src
./redis-server /usr/local/redis-6.2.5/redis.conf
ss -anlp |grep 6379

可以在 tail -100f /var/log/redis-6379.log中 看到3台机子连接成功的信息


测试 主从
(1)主节点创建key(192.168.1.41)

redis-cli -p 6379

auth 123456

set name test

exit

(2)从节点查询key(jzcgweb02)

redis-cli -p 8000

auth 123456

get name

exit

能获取到name‘test’就代表成功,成功如下图:

测试完后删除测试数据 (主节点上删除就可以)

2. 配置sentinel 哨兵
三台一样就可以

#cd /usr/local/redis-6.2.5
#vi sentinel.conf 删除原有配置
protected-mode no
sentinel deny-scripts-reconfig yes
sentinel monitor mymaster 192.168.1.42 6379 2
sentinel down-after-milliseconds mymaster 5000
sentinel failover-timeout mymaster 15000
sentinel auth-pass mymaster 123456
port 26379
dir "/usr/local/redis-6.2.5"
sentinel config-epoch mymaster 2
sentinel leader-epoch mymaster 2
sentinel known-slave mymaster 192.168.1.42 6379
sentinel known-slave mymaster 192.168.1.43 6379
sentinel current-epoch 2
sentinel announce-ip "192.168.1.41"

全部启动Sentinel
启动顺序 sentinel 1/2/3

#cd /usr/local/redis-6.2.5/src
./redis-sentinel /usr/local/redis-6.2.5/sentinel.conf &
ss -anlp|grep 26379
[root@redis1 src]# ./redis-cli -p 26379 INFO Sentinel
# Sentinel
sentinel_masters:1
sentinel_tilt:0
sentinel_running_scripts:0
sentinel_scripts_queue_length:0
sentinel_simulate_failure_flags:0
master0:name=mymaster,status=ok,address=192.168.1.41:6379,slaves=2,sentinels=3

测试主从切换
192.168.1.41上面 (主上面)

日志输出 其他节点上看日志
tail -100f /var/log/redis-6379.log

欢迎分享,转载请注明来源:内存溢出

原文地址:https://54852.com/zaji/5659958.html

(0)
打赏 微信扫一扫微信扫一扫 支付宝扫一扫支付宝扫一扫
上一篇 2022-12-16
下一篇2022-12-16

发表评论

登录后才能评论

评论列表(0条)

    保存