saltstack自动安装配置redis-3.2.8

一、准备redis自动化配置的文件

即安装一遍redis,然后获取相关文件和配置在salt中执行上线

1、 源码安装redis3.2.8并注册为系统服务

安装依赖

yum install -y tcl

1.1 下载安装包Redis-3.2.8.tar.gz

# cd /usr/local/src
# wget http://download.redis.io/releases/redis-3.2.8.tar.gz

1.2 解压及安装

[root@node2 src]# tar zxf redis-3.2.8.tar.gz
[root@node2 src]# cd redis-3.2.8/src/
[root@node2 redis-3.2.8]# make PREFIX=/usr/local/redis install #指定安装路径

1.3 创建配置文档,修改配置

创建配置文档路径

# mkdir /etc/redis
[root@node2 src]# cp ../redis.conf /etc/redis/redis_6350.conf

以下几个参数常用到

daemonize yes # 后台运行
bind 127.0.0.1 # 绑定ip,需要外网访问时将其注释掉
protected-mode yes # 保护模式,默认是开启的,需要其他客户端链接时,改为no关闭
requirepass redispass # 其他客户端链接时的密码
appendonly yes # 每次更新后记录日志
pidfile /var/run/redis_6350.pid # 如果不是默认的6379端口需要修改该行

1.4 注册系统服务,开机自启

创建启动脚本

# cp ../utils/redis_init_script /etc/rc.d/init.d/redis3

修改redis启动脚本,要修改的地方有

添加 chkconfig 注释

redis-server 、redis-cli 、pidfile、redis.conf路径

如果需要配置密码,还要在停止命令增加参数 -a 指定密码

[root@node2 src]# cat /etc/init.d/redis3 

#!/bin/sh
# chkconfig:   2345 90 10
# Simple Redis init.d script conceived to work on Linux systems
# as it does use of the /proc filesystem.

REDISPORT=6350
EXEC=/usr/local/redis/bin/redis-server
CLIEXEC=/usr/local/redis/bin/redis-cli

PIDFILE=/var/run/redis_${REDISPORT}.pid
CONF="/etc/redis/redis_${REDISPORT}.conf"

usage(){
    echo "usage: $0 [start|stop|status|restart]"
}

redis_start(){
if [ -f $PIDFILE ]
    then
            echo "$PIDFILE exists, process is already running or crashed"
    else
            echo "Starting Redis server..."
            $EXEC $CONF
fi
}

redis_stop(){
    if [ ! -f $PIDFILE ]
    then
            echo "$PIDFILE does not exist, process is not running"
    else
            PID=$(cat $PIDFILE)
            echo "Stopping ..."
            $CLIEXEC -p $REDISPORT -a redispass shutdown
            while [ -x /proc/${PID} ]
            do
                echo "Waiting for Redis to shutdown ..."
                sleep 1
            done
            echo "Redis stopped"
    fi
}

redis_restart(){
    redis_stop
    sleep 1
    redis_start
}
redis_status(){
    ps -ef|grep redis|grep -v grep|grep -v status
}

main(){
    case "$1" in
        start)
            redis_start;;
        stop)
            redis_stop;;
        status)
            redis_status;; 
        restart)
            redis_restart;;
        *)
            usage;
    esac
}

main $1

保存后执行注册成系统服务:

chkconfig --add redis3
chkconfig redis3 on

完成后,可以使用 service redis3 start|stop 启动关闭redis服务

1.5 添加环境变量:

vim /etc/profile

在最后添加:

PATH=$PATH:/usr/local/redis/bin
export PATH

# 使配置生效
source /etc/profile

# service redis3 start
[root@node2 src]# ps -ef|grep redis
root     20818     1  0 09:57 ?        00:00:00 /usr/local/redis/bin/redis-server 127.0.0.1:6350
root     20827 14125  0 09:58 pts/0    00:00:00 grep --color=auto redis

用redis-cli 链接,set,get正常

[root@node2 src]# redis-cli -p 6350 -a redispass

127.0.0.1:6350> set name 2
OK
127.0.0.1:6350> get name
"2"
127.0.0.1:6350> set jack 18
OK
127.0.0.1:6350> get jack
"18"
127.0.0.1:6350> quit

二、salt相关的配置处理

# mkdir -p /srv/salt/prod/pkg /srv/salt/prod/redis /srv/salt/prod/redis/files 
# cd /srv/salt/prod/pkg

1、 初始化nginx相关配置文件

①下载redis-3.2.8.tar.gz上传到/srv/salt/prod/redis/files目录

②配置文件

[root@test7_chat_api_im files]# grep '^[a-Z]' redis_6350.conf 
bind 127.0.0.1
protected-mode yes
port 6350
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis_6350.pid
loglevel notice
logfile ""
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
requirepass redispass
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

③服务管理脚本

[root@test7_chat_api_im files]# cat redis3 
#!/bin/sh
# chkconfig:   2345 90 10
# Simple Redis init.d script conceived to work on Linux systems
# as it does use of the /proc filesystem.

REDISPORT=6350
EXEC=/usr/local/redis/bin/redis-server
CLIEXEC=/usr/local/redis/bin/redis-cli

PIDFILE=/var/run/redis_${REDISPORT}.pid
CONF="/etc/redis/redis_${REDISPORT}.conf"

case "$1" in
    start)
        if [ -f $PIDFILE ]
        then
                echo "$PIDFILE exists, process is already running or crashed"
        else
                echo "Starting Redis server..."
                $EXEC $CONF
        fi
        ;;
    stop)
        if [ ! -f $PIDFILE ]
        then
                echo "$PIDFILE does not exist, process is not running"
        else
                PID=$(cat $PIDFILE)
                echo "Stopping ..."
                $CLIEXEC -p $REDISPORT -a redispass shutdown
                while [ -x /proc/${PID} ]
                do
                    echo "Waiting for Redis to shutdown ..."
                    sleep 1
                done
                echo "Redis stopped"
        fi
        ;;
    *)
        echo "Please use start or stop as first argument"
        ;;
esac

2、 编写依赖包安装

vim /srv/salt/prod/pkg/pkg-init.sls

pkg-init:
  pkg.installed:
    - names:
      - gcc
      - gcc-c++
      - glibc
      - make
      - autoconf
      - openssl
      - openssl-devel
      - pcre
      - pcre-devel
      - glib
      - glib-devel
      - tcl

3、 用户添加模块

# mkdir /srv/salt/prod/user
# vim /srv/salt/prod/user/redis.sls 
redis-user-group:
  group.present:
    - name: redis
    - gid: 6350

  user.present:
    - name: redis
    - fullname: redis
    - shell: /sbin/nologin
    - uid: 6350
    - gid: 6350

4、 编写nginx状态模块

# cd /srv/salt/prod/redis
vim /srv/salt/prod/redis/install.sls

include:
  - pkg.pkg-init
  - user.redis
redis-source-install:
  file.managed:
    - name: /usr/local/src/redis-3.2.8.tar.gz
    - source: salt://redis/files/redis-3.2.8.tar.gz
    - user: root
    - group: root
    - mode: 755
  cmd.run:
    - name: cd /usr/local/src && tar zxf redis-3.2.8.tar.gz && cd cd redis-3.2.8/src/ && make PREFIX=/usr/local/redis install && chown -R redis:redis /usr/local/redis && mkdir /etc/redis
    - unless: test -d /usr/local/redis
    - require:
      - user: redis-user-group
      - file: redis-source-install
      - pkg: pkg-init

服务模块

# vim /srv/salt/prod/redis/service.sls 
include:
  - redis.install

redis-init:
  file.managed:
    - name: /etc/init.d/redis3
    - source: salt://redis/files/redis3
    - mode: 755
    - user: root
    - group: root
    - require:
      - cmd: redis-source-install
  cmd.run:
    - name: chkconfig --add redis3
    - unless: chkconfig --list | grep redis
    - require:
      - file: redis-init

/etc/redis/redis_6350.conf:
  cmd.run:
    - name: mkdir /etc/redis
  file.managed:
    - source: salt://redis/files/redis_6350.conf
    - user: redis
    - group: redis
    - mode: 644 

redis-service:
  service.running:
    - name: redis3
    - enable: True
    - restart: True
    - require:
      - cmd: redis-init
    - watch:
      - file: /etc/redis/redis_6350.conf

执行配置测试,没有问题再安装,至此salt安装redis服务已完毕:

# 先测试先处理一些简单的错误

salt 'test4_haili_dev' state.sls redis.service env=prod test=True

salt 'test4_haili_dev' state.sls redis.service env=prod

k8s(kubernetes)部署三个节点的redis cluster

目的

redis clustor 需要6台服务器才能正常运⾏,由于种种原因,开发或者某些特别的需求,只能在3台服务器上运⾏redis clustor。在不使用哨兵模式情况下,而使⽤最新的clustor模式运行redis。

本文仅作为redis部署方式的研究及理解

准备工作

制作redis docker.latest镜像其中包含以下组件:

  1. redis-cli
  2. ruby
  3. redis-trib

打包到镜像上传到阿里镜像服务器中cluster-redis:latest

创建集群操作

3台服务器上各自运行两个redis容器

使用以下编写好的redis-cluster部署文件,可在一台部署出两个不同端口,不同角⾊的redis容器。

redis-cluster.yaml

apiVersion: extensions/v1beta1
kind: Deployment

metadata:

name: redis-blue

labels:

app: redis

member: redis-blue

spec:

replicas: 3

template:

metadata:

labels:

app: redis

member: redis-blue

spec:

hostNetwork: true

containers:

- name: redis

image: registry.cn-hangzhou.aliyuncs.com/wise2c/cluster-redis:latest

command: ["/bin/sh", "-c"]

args: ["echo 'dir /tmp/data' >> /root/redis.conf && /usr/local/bin/redis-server /root/redis.conf"]

- /usr/local/bin/redis-server

- /root/redis.conf

ports:

- name: redis-port

containerPort: 6379

- name: cluster-port

containerPort: 16379

volumeMounts:

- mountPath: /tmp/data

name: data

volumes:

- name: data

hostPath:

path: /tmp/redis-blue

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

name: redis-green

labels:

app: redis

member: redis-green

spec:

replicas: 3

template:

metadata:

labels:

app: redis

member: redis-green

spec:

hostNetwork: true

containers:

- name: redis

image: registry.cn-hangzhou.aliyuncs.com/wise2c/cluster-redis:latest

command: ["/bin/sh", "-c"]

args: ["sed -i 's/6379/6380/g' /root/redis.conf && echo 'dir /tmp/data' >> /root/redis.conf && /usr/local/ports:

- name: redis-port

containerPort: 6380

- name: cluster-port

containerPort: 16380

volumeMounts:

- mountPath: /tmp/data

name: data

volumes:

- name: data

hostPath:

path: /tmp/redis-green

kubectl create -f redis-cluster.yaml

执行以下脚本,创建出整个redis集群。redis会自动分配哈希槽,使得master与其对应的slave不会出现在同一台服务器上。

create_cluster.sh

!/usr/bin/env bash

redis_count=`kubectl get pod -o wide -l app=redis | grep Running | wc -l`
echo "redis_count:"$redis_count

if [ $redis_count -ne 6 ]; then
echo "the running redis count: ${redis_count} is error"
exit 1
fi
redis_blue_ips=`kubectl get pod -o wide -l app=redis -l member=redis-blue | awk 'NR>1{printf $6":6379 "}'
redis_green_ips=`kubectl get pod -o wide -l app=redis -l member=redis-green | awk 'NR>1{printf $6":6380 "}'
redis_ips=$redis_blue_ips" "$redis_green_ips
echo "redis_ips:"$redis_ips
redis_blue_name=`kubectl get pod -o wide -l app=redis -l member=redis-blue | grep Running | awk '{printf $1" "}'
echo $redis_ips | awk -F' ' '{for( i=1;i<NF; i++ ) print $i}' 
kubectl create -f redis-cluster.yaml

bash create_cluster.sh

关掉其中一台机器

由于master和slave 不在同一台机器上,当我们直接关掉其中⼀一台vm,比如vm2

这时vm3上,由redis cluster自动恢复vm3slave2(6380) —> master2(6380) 提升为master,集群工作正常,业务不中断。

恢复关掉的的机器

当我们重新启动vm2, 这时候集群的状态:

这时集群工作正常,此时vm3有2个master。如果我们关掉vm3,会让集群中2个master同时下线,导致集群无法自动恢复。

重点:执行以下脚本,提升slave2到master2,恢复集群的自动修复功能。

failover.sh

!/usr/bin/env bash

redis_blue_name=`kubectl get pod -o wide -l app=redis -l member=redis-blue | grep Running | awk '{printf $1":6379 redis_names=`kubectl get pod -o wide -l app=redis -l member=redis-blue | grep Running | awk '{printf $1","}'
redis_array=${redis_names//,/ }
for redis_name in $redis_array
do
kubectl exec -it ${redis_name} -- redis-cli cluster failover
done
bash failover.sh

集群自动恢复,变成下面的状态。

集群工作正常,业务不中断。

作者后话

以上的操作,目的是让各台虚拟上不出现有2个master。当其中一台虚拟机出现当机,集群就不能正常工作.。如果是3台master和3台slave分别在不同的机器刚好错开,redis cluster能自动恢复.。最好的解决方案,依然是有6台虚拟机、3台master、3台slave的redis cluster。

To do

我们可以进⼀一步将

1.create-redis.sh
2.failover.sh
3.kubectl

制作为镜像, k8s deployment运行此镜像。实现:

1.创建集群
2.恢复集群的自动修复功能(任意关掉其中一台服务器,业务不中断。每过一秒,检查集群中是否有slave需要提升权限,集群都能正常工作.业务不中断。)

Redis消息队列实现

消息队列

某次在某乎上看到有人提到消息队列的问题,然后有人在回答里提到了Redis,接着便有人在评论里指出:Redis是缓存,不是消息队列。

但不幸的是,Redis的确提供一个简易的消息队列机制,可以用于一些要求不那么高的场合。

方法就是利用Redis的列表类型的push和pop操作。

我对前文所介绍的Redis Cache作了一点简单的扩展,增加了消息队列功能。

实现

代码基本就这么点:

class RedisMQ(RedisCache):
    def __init__(self, dbname, host='localhost', port=6379, db=0):
        super(RedisMQ, self).__init__(dbname, host, port, db)

    def push(self, channel, data):
        ch = self._getkey("channel", channel)
        self.db.lpush(ch, self.SERIALIZER.dumps(data))

    def pop(self, channel, timeout=5):
        ch = self._getkey("channel", channel)
        msg = self.db.brpop(ch, timeout)
        return self.SERIALIZER.loads(msg[1]) if msg else None


class Channel(object):
    MQ = RedisMQ("msgqueue")

    def __init__(self, channel):
        self.channel = channel

    def push(self, **kwargs):
        Channel.MQ.push(self.channel, kwargs)

    def pop(self):
        return Channel.MQ.pop(self.channel)

用法

消息生产者

ch = Channel("test")
ch.push(a=123,b="hello")

消息消费者,可能是另一个线程,甚至是另一个进程,甚至是另外一台主机——只要它们共用同一个redis即可。

ch = Channel("test")
while msg=ch.pop():
    # msg: {"a": 123, "b": "hello"}
# ch is empty

CentOS7配置redis主从复制方法

1. 准备好4台机器

192.168.42.150 redis-node1 #主
192.168.42.151 redis-node2 #从 
192.168.42.152 redis-node3 #从
192.168.42.153 redis-node4 #从

将主机解析写入hosts文件,分发至每台机器

2. 安装redis,配置好基本配置

(1) 4台机器,分别安装redis

cd /usr/local/src
wget http://192.168.42.26/install_package/down/redis-3.2.3-1.el7.x86_64.rpm
yum install redis-3.2.3-1.el7.x86_64.rpm -y

(2) 4台机分别配置好,配置文件,做好备份

cp /etc/redis.conf{,.back}
vim redis.conf
daemonize yes
bind 192.168.42.150  #改为各个节点的IP

(3) 依照上面设定的从主机,在从主机配置文件中开启从配置(需要配置3台机器)

# slaveof <masterip> <masterport>
slaveof  192.168.42.150 6379

(4) 启动redis-server(4台同时启动)

redis-server /etc/redis.conf

(5) 在主机器上登录redis

[root@redis-node1 ~]# redis-cli -h 192.168.42.150
192.168.42.150:6379&gt; 
192.168.42.150:6379&gt; keys *
1) "magedu"
192.168.42.150:6379&gt; 
192.168.42.150:6379&gt; set ok "verygood!!!"
OK
192.168.42.150:6379&gt; get ok
"verygood!!!"
192.168.42.150:6379&gt;

登录其他3台从服务器

redis-cli -h 192.168.42.151
redis-cli -h 192.168.42.152
redis-cli -h 192.168.42.153
拿153做示例:
[root@redis-node4 src]# redis-cli -h 192.168.42.153
192.168.42.153:6379&gt; keys *
1) "magedu"
192.168.42.153:6379&gt; 
192.168.42.153:6379&gt; get ok
"verygood!!!"
192.168.42.153:6379&gt;

至此为至redis主/从已经实现,在主节点上查看信息

192.168.42.150:6379&gt; INFO Replication
# Replication
role:master
connected_slaves:3
slave0:ip=192.168.42.151,port=6379,state=online,offset=22806,lag=1
slave1:ip=192.168.42.152,port=6379,state=online,offset=22806,lag=1
slave2:ip=192.168.42.153,port=6379,state=online,offset=22806,lag=1
master_repl_offset:22806
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:22805

接下来在四个结点上配置sentinel,实现故障转移。此处实现累似于MariaDB的MHA 在上面我们已经配置好主从复制集群,现在我们需要添加一台机器[192.168.42.154]来做高可用 同样我们需要设置好,IP,主机名,下载安装redis

192.168.42.154 redis-sentinel  #将主机解析追加至其他的4台的hosts文件中,本机也需要一份
cd /usr/local/src
wget http://192.168.42.26/install_package/down/redis-3.2.3-1.el7.x86_64.rpm
yum install redis-3.2.3-1.el7.x86_64.rpm -y

(1)配置sentinel

# sentinel monitor     #法定人数
cp /etc/redis-sentinel.conf{,.back}
vim /etc/redis-sentinel.conf

daemonize yes
sentinel monitor mymaster 192.168.42.150 6379 1
sentinel down-after-milliseconds mymaster 5000
sentinel failover-timeout mymaster 18000
sentinel auth-pass mymaster centos.123 #这是认证选项,我们这里的主节点并没有开启认证

(2)启动sentinel

redis-sentinel /etc/redis-sentinel.conf

查看端口是否已经启动

[root@redis-sentinel src]# ss -tnl
State      Recv-Q Send-Q  Local Address:Port                 Peer Address:Port              
LISTEN     0      128                 *:22                              *:*                  
LISTEN     0      100         127.0.0.1:25                              *:*                  
LISTEN     0      511                 *:26379                           *:*                  
LISTEN     0      128                :::22                             :::*                  
LISTEN     0      100               ::1:25                             :::*                  
LISTEN     0      511                :::26379                          :::*

(3)模拟故障:

192.168.42.150:6379
pkill redis
#查看
[root@redis-node1 ~]# ss -tnl
State      Recv-Q Send-Q                             Local Address:Port                                            Peer Address:Port              
LISTEN     0      128                                            *:22                                                         *:*                  
LISTEN     0      100                                    127.0.0.1:25                                                         *:*                  
LISTEN     0      128                                           :::22                                                        :::*                  
LISTEN     0      100                                          ::1:25

(4)查看故障是否转移

登录192.168.42.151:6379 #巧了,恰好151变成主了
192.168.42.151:6379&gt; 
192.168.42.151:6379&gt; INFO Replication
# Replication
role:master
connected_slaves:2
slave0:ip=192.168.42.153,port=6379,state=online,offset=5413,lag=1
slave1:ip=192.168.42.152,port=6379,state=online,offset=5413,lag=2
master_repl_offset:5413
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:5412
192.168.42.151:6379&gt; 

登录192.168.42.152:6379查看,的确是151变成主了
192.168.42.152:6379&gt; INFO Replication
# Replication
role:slave
master_host:192.168.42.151
master_port:6379
master_link_status:up
master_last_io_seconds_ago:1
master_sync_in_progress:0
slave_repl_offset:11402
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
192.168.42.152:6379&gt;

(4)我们再把151的进程杀了,再来看一次,可以看到只有一主一从了

192.168.42.152:6379&gt; INFO Replication
# Replication
role:master
connected_slaves:1
slave0:ip=192.168.42.153,port=6379,state=online,offset=1625,lag=1
master_repl_offset:1768
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:1767
192.168.42.152:6379&gt;

(5)最后我们将杀死的两台redis恢复,再来查看

192.168.42.152:6379&gt; INFO Replication
# Replication
role:master
connected_slaves:3
slave0:ip=192.168.42.153,port=6379,state=online,offset=12763,lag=1
slave1:ip=192.168.42.151,port=6379,state=online,offset=12763,lag=1
slave2:ip=192.168.42.150,port=6379,state=online,offset=12763,lag=0
master_repl_offset:12763
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2
repl_backlog_histlen:12762
192.168.42.152:6379&gt; 

可以看出此时的1主3从又回来了,不过此时主节点,是152

回到150,和151的节点查看效果
192.168.42.150:6379&gt; INFO Replication
# Replication
role:slave
master_host:192.168.42.152
master_port:6379
master_link_status:up
master_last_io_seconds_ago:2
master_sync_in_progress:0
slave_repl_offset:23555
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
192.168.42.150:6379&gt; 



192.168.42.151:6379&gt; INFO Replication
# Replication
role:slave
master_host:192.168.42.152
master_port:6379
master_link_status:up
master_last_io_seconds_ago:1
master_sync_in_progress:0
slave_repl_offset:20782
slave_priority:100
slave_read_only:1
connected_slaves:0
master_repl_offset:0
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0

接下来我们配置有密码认证的主从和高可用

我们之前在配置sentinel,时还记得 #法定人数吗,判断为失效至少需要2个 Sentinel进程的同意,只要同意Sentinel的数量不达标,自动failover就不会执行

    sentinel monitor #法定人数

我们现在将154这台干脆也做成redis的从服务器,而sentinel,是这5台的集合(150,151,152,153,154)

(1).将153的配置文件推到154一份

scp /etc/redis.conf{,.back} [email protected]:/etc/
 到154这边稍微修改一下
 vim /etc/redis.conf
 bind 192.168.42.154

因此这次我们是要做密码认证的,因此5台机器都需要加上密码

(2).我们现在查看之前做了 sentinel后,配置文件系统自动帮我们改了,现在我们要恢复到初始状态,重新来过

所有的配置文件还需要设置我们的密码(为了方便管理,我们这里统一设置成magedu) requirepass magedu

所有从节点加上

#masterauth 
masterauth magedu
slaveof 192.168.42.150 6379

(3).密码设置好之后,启动所有服务

redis-server /etc/redis.conf
ss -tnl

登录150,需要认证了

[root@redis-node1 ~]# redis-cli -h 192.168.42.150
192.168.42.150:6379&gt; keys *
(error) NOAUTH Authentication required.

[root@redis-node1 ~]# redis-cli -h 192.168.42.150
192.168.42.150:6379&gt; keys *
(error) NOAUTH Authentication required.
192.168.42.150:6379&gt; keys *
(error) NOAUTH Authentication required.
192.168.42.150:6379&gt; AUTH magedu
OK

同样的,我们需要登录认证其他机器

再150机器上设置key
192.168.42.150:6379&gt; set zlyc "zai lai yi ci"
OK
192.168.42.150:6379&gt; get zlyc
"zai lai yi ci"
192.168.42.150:6379&gt; 

其他机器读取OK
192.168.42.151:6379&gt; get zlyc
"zai lai yi ci"
192.168.42.151:6379&gt;