目录
- 创建镜像
- 文件结构
- dockerfile
- entrypoint.sh
- function
- generatefile
- 构建镜像
- 运行镜像
- 总结
- 附图
上一篇文章向大家介绍了repmgr的搭建过程,实现了自动切换,今天将向大家介绍,如何搭建容器下的patroni集群环境,patroni作为开箱即用pg高可用工具,越来越多的被各个厂商用于云环境下使用。
patroni基本架构如图所示:
etcd作为分布式注册中心、进行集群选主工作;vip-manager为主节点设置漂移ip;patroni负责引导集群的创建、运行和管理工作,并可以使用patronictl来进行终端访问。
具体流程:
1、首先启动etcd集群,本例中etcd数量为3个。
2、检测etcd集群状态健康后,启动patroni并竞争选主,其他跟随节点进行数据同步过程。
3、启动vip-manager,通过访问etcd集群中/ s e r v i c e n a m e / {service_name}/ servicename/{cluster_name}/leader键中的具体值,判断当前节点是否为主节点ip,如果是则为该节点设置vip,提供对外读写服务。
注:建议真实环境下将etcd部署到独立容器上,对外提供服务。
创建镜像
文件结构
其中dockerfile为镜像主文件,docker服务通过该文件在本地仓库创建镜像;entrypoint.sh为容器入口文件,负责业务逻辑的处理;function为执行业务方法的入口文件,负责启动etcd,监控etcd集群状态、启动patroni和vip-manager;generatefile为整个容器生成对应的配置文件,包括etcd、patroni及vip-mananger。
目录结构大致如图所示:
注:数据库安装包和patroni安装包请自行构建。
dockerfile
from centos:7
maintainer wangzhibin <wangzhibin>
env user="postgresql" \
password=123456 \
group=postgresql
run useradd ${user} \
&& chown -r ${user}:${group} /home/${user} \
&& yum -y update && yum install -y iptables sudo net-tools iproute openssh-server openssh-clients which vim sudo crontabs
#安装etcd
copy etcd/etcd /usr/sbin
copy etcd/etcdctl /usr/sbin
#安装database
copy lib/ /home/${user}/lib
copy include/ /home/${user}/include
copy share/ /home/${user}/share
copy bin/ /home/${user}/bin/
copy patroni/ /home/${user}/patroni
#安装vip-manager
copy vip-manager/vip-manager /usr/sbin
#安装执行脚本
copy runtime/ /home/${user}/runtime
copy entrypoint.sh /sbin/entrypoint.sh
#设置环境变量
env ld_library_path /home/${user}/lib
env path /home/${user}/bin:$path
env etcdctl_api=3
#安装patroni
run yum -y install epel-release python-devel && yum -y install python-pip \
&& pip install /home/${user}/patroni/1/pip-20.3.3.tar.gz \
&& pip install /home/${user}/patroni/1/psycopg2-2.8.6-cp27-cp27mu-linux_x86_64.whl \
&& pip install --no-index --find-links=/home/${user}/patroni/2/ -r /home/${user}/patroni/2/requirements.txt \
&& pip install /home/${user}/patroni/3/patroni-2.0.1-py2-none-any.whl
#修改执行权限
run chmod 755 /sbin/entrypoint.sh \
&& mkdir /home/${user}/etcddata \
&& chown -r ${user}:${group} /home/${user} \
&& echo 'root:root123456' | chpasswd \
&& chmod 755 /sbin/etcd \
&& chmod 755 /sbin/etcdctl \
&& chmod 755 /sbin/vip-manager
#设置sudo
run chmod 777 /etc/sudoers \
&& sed -i '/## allow root to run any commands anywhere/a '${user}' all=(all) nopasswd:all' /etc/sudoers \
&& chmod 440 /etc/sudoers
#切换用户
user ${user}
#切换工作目录
workdir /home/${user}
#启动入口程序
cmd ["/bin/bash", "/sbin/entrypoint.sh"]
entrypoint.sh
#!/bin/bash
set -e
# shellcheck source=runtime/functions
source "/home/${user}/runtime/function"
configure_patroni
function
#!/bin/bash
set -e
source /home/${user}/runtime/env-defaults
source /home/${user}/runtime/generatefile
pg_datadir=/home/${user}/pgdata
pg_bindir=/home/${user}/bin
configure_patroni()
{
#生成配置文件
generate_etcd_conf
generate_patroni_conf
generate_vip_conf
#启动etcd
etcdcount=${etcd_count}
count=0
ip_temp=""
array=(${hostlist//,/ })
for host in ${array[@]}
do
ip_temp+="http://${host}:2380,"
done
etcd --config-file=/home/${user}/etcd.yml >/home/${user}/etcddata/etcd.log 2>&1 &
while [ $count -lt $etcdcount ]
do
line=(`etcdctl --endpoints=${ip_temp%?} endpoint health -w json`)
count=`echo $line | awk -f"\"health\":true" '{print nf-1}'`
echo "waiting etcd cluster"
sleep 5
done
#启动patroni
patroni /home/${user}/postgresql.yml > /home/${user}/patroni/patroni.log 2>&1 &
#启动vip-manager
sudo vip-manager --config /home/${user}/vip.yml
}
generatefile
#!/bin/bash
set -e
hostname="`hostname`"
hostip=`ping ${hostname} -c 1 -w 1 | sed '1{s/[^(]*(//;s/).*//;q}'`
#generate etcd
generate_etcd_conf()
{
echo "name : ${hostname}" >> /home/${user}/etcd.yml
echo "data-dir: /home/${user}/etcddata" >> /home/${user}/etcd.yml
echo "listen-client-urls: http://0.0.0.0:2379" >> /home/${user}/etcd.yml
echo "advertise-client-urls: http://${hostip}:2379" >> /home/${user}/etcd.yml
echo "listen-peer-urls: http://0.0.0.0:2380" >> /home/${user}/etcd.yml
echo "initial-advertise-peer-urls: http://${hostip}:2380" >> /home/${user}/etcd.yml
ip_temp="initial-cluster: "
array=(${hostlist//,/ })
for host in ${array[@]}
do
ip_temp+="${host}=http://${host}:2380,"
done
echo ${ip_temp%?} >> /home/${user}/etcd.yml
echo "initial-cluster-token: etcd-cluster-token" >> /home/${user}/etcd.yml
echo "initial-cluster-state: new" >> /home/${user}/etcd.yml
}
#generate patroni
generate_patroni_conf()
{
echo "scope: ${cluster_name}" >> /home/${user}/postgresql.yml
echo "namespace: /${service_name}/ " >> /home/${user}/postgresql.yml
echo "name: ${hostname} " >> /home/${user}/postgresql.yml
echo "restapi: " >> /home/${user}/postgresql.yml
echo " listen: ${hostip}:8008 " >> /home/${user}/postgresql.yml
echo " connect_address: ${hostip}:8008 " >> /home/${user}/postgresql.yml
echo "etcd: " >> /home/${user}/postgresql.yml
echo " host: ${hostip}:2379 " >> /home/${user}/postgresql.yml
echo " username: ${etcd_user} " >> /home/${user}/postgresql.yml
echo " password: ${etcd_passwd} " >> /home/${user}/postgresql.yml
echo "bootstrap: " >> /home/${user}/postgresql.yml
echo " dcs: " >> /home/${user}/postgresql.yml
echo " ttl: 30 " >> /home/${user}/postgresql.yml
echo " loop_wait: 10 " >> /home/${user}/postgresql.yml
echo " retry_timeout: 10 " >> /home/${user}/postgresql.yml
echo " maximum_lag_on_failover: 1048576 " >> /home/${user}/postgresql.yml
echo " postgresql: " >> /home/${user}/postgresql.yml
echo " use_pg_rewind: true " >> /home/${user}/postgresql.yml
echo " use_slots: true " >> /home/${user}/postgresql.yml
echo " parameters: " >> /home/${user}/postgresql.yml
echo " initdb: " >> /home/${user}/postgresql.yml
echo " - encoding: utf8 " >> /home/${user}/postgresql.yml
echo " - data-checksums " >> /home/${user}/postgresql.yml
echo " pg_hba: " >> /home/${user}/postgresql.yml
echo " - host replication ${user} 0.0.0.0/0 md5 " >> /home/${user}/postgresql.yml
echo " - host all all 0.0.0.0/0 md5 " >> /home/${user}/postgresql.yml
echo "postgresql: " >> /home/${user}/postgresql.yml
echo " listen: 0.0.0.0:5432 " >> /home/${user}/postgresql.yml
echo " connect_address: ${hostip}:5432 " >> /home/${user}/postgresql.yml
echo " data_dir: ${pg_datadir} " >> /home/${user}/postgresql.yml
echo " bin_dir: ${pg_bindir} " >> /home/${user}/postgresql.yml
echo " pgpass: /tmp/pgpass " >> /home/${user}/postgresql.yml
echo " authentication: " >> /home/${user}/postgresql.yml
echo " replication: " >> /home/${user}/postgresql.yml
echo " username: ${user} " >> /home/${user}/postgresql.yml
echo " password: ${passwd} " >> /home/${user}/postgresql.yml
echo " superuser: " >> /home/${user}/postgresql.yml
echo " username: ${user} " >> /home/${user}/postgresql.yml
echo " password: ${passwd} " >> /home/${user}/postgresql.yml
echo " rewind: " >> /home/${user}/postgresql.yml
echo " username: ${user} " >> /home/${user}/postgresql.yml
echo " password: ${passwd} " >> /home/${user}/postgresql.yml
echo " parameters: " >> /home/${user}/postgresql.yml
echo " unix_socket_directories: '.' " >> /home/${user}/postgresql.yml
echo " wal_level: hot_standby " >> /home/${user}/postgresql.yml
echo " max_wal_senders: 10 " >> /home/${user}/postgresql.yml
echo " max_replication_slots: 10 " >> /home/${user}/postgresql.yml
echo "tags: " >> /home/${user}/postgresql.yml
echo " nofailover: false " >> /home/${user}/postgresql.yml
echo " noloadbalance: false " >> /home/${user}/postgresql.yml
echo " clonefrom: false " >> /home/${user}/postgresql.yml
echo " nosync: false " >> /home/${user}/postgresql.yml
}
#........ 省略部分内容
构建镜像
docker build -t patroni .
运行镜像
运行容器节点1:
docker run –privileged –name patroni1 -itd –hostname patroni1 –net my_net3 –restart always –env ‘cluster_name=patronicluster’ –env ‘service_name=service’ –env ‘etcd_user=etcduser’ –env ‘etcd_passwd=etcdpasswd’ –env ‘passwd=zalando’ –env ‘hostlist=patroni1,patroni2,patroni3′ –env ‘vip=172.22.1.88′ –env ‘net_device=eth0′ –env ‘etcd_count=3′ patroni
运行容器节点2:
docker run –privileged –name patroni2 -itd –hostname patroni2 –net my_net3 –restart always –env ‘cluster_name=patronicluster’ –env ‘service_name=service’ –env ‘etcd_user=etcduser’ –env ‘etcd_passwd=etcdpasswd’ –env ‘passwd=zalando’ –env ‘hostlist=patroni1,patroni2,patroni3′ –env ‘vip=172.22.1.88′ –env ‘net_device=eth0′ –env ‘etcd_count=3′ patroni
运行容器节点3:
docker run –privileged –name patroni3 -itd –hostname patroni3 –net my_net3 –restart always –env ‘cluster_name=patronicluster’ –env ‘service_name=service’ –env ‘etcd_user=etcduser’ –env ‘etcd_passwd=etcdpasswd’ –env ‘passwd=zalando’ –env ‘hostlist=patroni1,patroni2,patroni3′ –env ‘vip=172.22.1.88′ –env ‘net_device=eth0′ –env ‘etcd_count=3′ patroni
总结
本操作过程仅限于试验环境,为了演示etcd+patroni+vipmanager整体的容器化搭建。在真实环境下,etcd应该部署在不同容器下,形成独立的分布式集群,并且pg的存储应该映射到本地磁盘或网络磁盘,另外容器集群的搭建尽量使用编排工具,例如docker-compose、docker-warm或者kubernetes等。
附图
etcd集群状态如图:
patroni集群状态如图:
vip-manager状态如图:
到此这篇关于深入浅析docker容器中的patroni的文章就介绍到这了,更多相关docker容器patroni内容请搜索www.887551.com以前的文章或继续浏览下面的相关文章希望大家以后多多支持www.887551.com!