Add rendered example

This commit is contained in:
Daniel Berteaud 2024-01-08 13:05:47 +01:00
parent 00561f2ccf
commit 7d36543184
25 changed files with 1286 additions and 0 deletions

View File

@ -0,0 +1,12 @@
Kind = "service-intentions"
Name = "postgres"
Sources = [
{
Name = "postgres-manage"
Action = "allow"
},
{
Name = "traefik"
Action = "allow"
}
]

View File

@ -0,0 +1,6 @@
Kind = "service-resolver"
Name = "postgres-master"
Redirect {
Service = "postgres"
ServiceSubset = "master"
}

View File

@ -0,0 +1,6 @@
Kind = "service-resolver"
Name = "postgres-replica"
Redirect {
Service = "postgres"
ServiceSubset = "replica"
}

View File

@ -0,0 +1,11 @@
Kind = "service-resolver"
Name = "postgres"
DefaultSubset = "master"
Subsets = {
"master" = {
filter = "\"master\" in Service.Tags"
}
"replica" = {
filter = "\"replica\" in Service.Tags"
}
}

View File

@ -0,0 +1,15 @@
node_prefix "" {
policy = "read"
}
service "postgres" {
policy = "write"
}
service "postgres-sidecar-proxy" {
policy = "write"
}
key_prefix "service/postgres" {
policy = "write"
}
session_prefix "" {
policy = "write"
}

View File

@ -0,0 +1,32 @@
FROM alpine AS ldap2pg
ARG LDAP2PG_VERSION=6.0
RUN set -eux &&\
cd /tmp &&\
apk --no-cache add ca-certificates curl &&\
curl -sSLO https://github.com/dalibo/ldap2pg/releases/download/v${LDAP2PG_VERSION}/ldap2pg_${LDAP2PG_VERSION}_linux_amd64.tar.gz &&\
curl -sSLO https://github.com/dalibo/ldap2pg/releases/download/v${LDAP2PG_VERSION}/ldap2pg_${LDAP2PG_VERSION}_checksums.txt &&\
grep ldap2pg_${LDAP2PG_VERSION}_linux_amd64.tar.gz ldap2pg_${LDAP2PG_VERSION}_checksums.txt | sha256sum -c &&\
tar xvzf ldap2pg_${LDAP2PG_VERSION}_linux_amd64.tar.gz &&\
chown root:root ldap2pg &&\
chmod 755 ldap2pg
FROM danielberteaud/alpine:23.12-3
MAINTAINER Daniel Berteaud <dbd@ehtrace.com>
ENV LANG=fr_FR.utf8 \
TZ=Europe/Paris \
PGHOST=localhost \
PGPORT=5432 \
PGUSER=postgres \
LDAP2PG_MODE=dry \
LDAP2PG_CRON=
COPY --from=ldap2pg /tmp/ldap2pg /usr/local/bin/ldap2pg
RUN set -eux &&\
apk --no-cache upgrade &&\
apk --no-cache add postgresql15-client ca-certificates supercronic
COPY root/ /
CMD ["run.sh"]

View File

@ -0,0 +1,42 @@
#!/bin/sh
set -euo pipefail
for IDX in $(printenv | grep -E '^PG_DB_([0-9]+)=' | sed -E 's/^PG_DB_([0-9]+)=.*/\1/'); do
DB_NAME=$(printenv PG_DB_${IDX})
echo "Found DB ${DB_NAME} to create"
DB_OWNER=$(printenv PG_DB_${IDX}_OWNER || echo "${DB_NAME}")
DB_ENCODING=$(printenv PG_DB_${IDX}_ENCODING || echo "UTF8")
DB_TEMPLATE=$(printenv PG_DB_${IDX}_TEMPLATE || echo "")
DB_LOCALE=$(printenv PG_DB_${IDX}_LOCALE || echo "${LANG}")
DB_EXTENSIONS=$(printenv PG_DB_${IDX}_EXTENSIONS || echo "")
if [ "${LDAP2PG_MODE}" = "real" ]; then
echo "Create postgres role ${DB_OWNER} if needed"
psql <<_EOSQL
SELECT 'CREATE ROLE "${DB_OWNER}"'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '${DB_OWNER}')\gexec
_EOSQL
TEMPLATE=""
if [ "${DB_TEMPLATE}" != "" ]; then
TEMPLATE="TEMPLATE ${DB_TEMPLATE}"
fi
echo "Create postgres database ${DB_NAME} (OWNER \"${DB_OWNER}\" ENCODING \"${DB_ENCODING}\" LOCALE \"${DB_LOCALE}\" ${TEMPLATE}) if needed"
psql <<_EOSQL
SELECT 'CREATE DATABASE "${DB_NAME}" OWNER "${DB_OWNER}" ENCODING "${DB_ENCODING}" LOCALE "${DB_LOCALE}" ${TEMPLATE}'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '${DB_NAME}')\gexec
_EOSQL
psql -d ${DB_NAME} <<_EOSQL
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON FUNCTIONS TO pg_database_owner;
_EOSQL
if [ -n "${DB_EXTENSIONS}" ]; then
for EXT in $(echo ${DB_EXTENSIONS} | sed -e 's/,/\n/g'); do
psql -d ${DB_NAME} <<_EOSQL
CREATE EXTENSION IF NOT EXISTS ${EXT};
_EOSQL
done
fi
else
echo "Would create user ${DB_OWNER} and database ${DB_NAME} (OWNER \"${DB_OWNER}\" ENCODING \"${DB_ENCODING}\" LOCALE \"${DB_LOCALE}\"), but we're running in dry mode"
fi
done

View File

@ -0,0 +1,20 @@
#!/bin/sh
set -euo pipefail
# Set permissions
if [ -e "${LDAP2PG_CONFIG}" ]; then
if [ "${LDAP2PG_MODE}" = "real" ]; then
echo "Applying privileges with ldap2pg"
ldap2pg --real --config ${LDAP2PG_CONFIG}
else
echo "Running ldap2pg in dry mode"
ldap2pg --config ${LDAP2PG_CONFIG}
fi
# Run cron if needed
if [ -n "${LDAP2PG_CRON}" ]; then
echo "Running ldap2pg as a cron job (${LDAP2PG_CRON})"
echo "${LDAP2PG_CRON} ldap2pg --real --config ${LDAP2PG_CONFIG}" > /dev/shm/cron
supercronic /dev/shm/cron
fi
fi

View File

@ -0,0 +1,21 @@
FROM danielberteaud/postgres:15.23.12-5
MAINTAINER Daniel Berteaud <dbd@ehtrace.com>
ARG PATRONI_VERSION=3.2.1
ENV EDITOR=nano
USER root
RUN set -eux &&\
dnf -y update &&\
dnf -y install jq python3 python3-psycopg2 python3-pip nano zstd &&\
pip --no-cache-dir install patroni["consul"]==${PATRONI_VERSION} &&\
dnf -y clean all &&\
rm -rf /var/cache/yum/* /var/log/yum/* /tmp/pre.txt /tmp/post.txt /var/lib/dnf/history* &&\
# Remove pg-init and pg-conf from base image as patroni handle this \
rm -f /entrypoint.d/50-initdb.sh /entrypoint.d/60-pg-conf.sh
COPY root/ /
USER postgres
CMD ["patroni"]

View File

@ -0,0 +1,8 @@
#!/bin/sh
set -eu
# Cleanup env from the parent postgres image, as the PG_CONF_xxx are not used with patroni
for VAR in $(printenv | grep -E '^PG_CONF_' | sed -E 's/^(PG_CONF_[^=]+)=.*/\1/'); do
unset ${VAR}
done

View File

@ -0,0 +1,10 @@
#!/bin/sh
set -euo pipefail
ROLE=$(curl -k https://localhost:8080/health | jq -r .role)
if [ "${ROLE}" = "master" ]; then
exec "$@"
else
echo "Not running $@ as our instance is ${ROLE}"
fi

View File

@ -0,0 +1,27 @@
FROM danielberteaud/alpine:23.12-3
MAINTAINER Daniel Berteaud <dbd@ehtrace.com>
ARG PG_FROM= \
PG_TO=
ENV LANG=fr_FR.utf8 \
TZ=Europe/Paris
COPY --from=walg /usr/local/bin/wal-g /usr/local/bin/wal-g
RUN set -eux &&\
apk --no-cache upgrade &&\
for VER in 12 13 14 15; do \
apk --no-cache add postgresql${PG_VERSION} \
postgresql${PG_VERSION}-client \
postgresql${PG_VERSION}-contrib \
done
apk --no-cache add icu-data-full \
tzdata &&\
mkdir -p /run/postgresql &&\
chown -R postgres:postgres /run/postgresql
COPY root/ /
USER postgres
CMD ["pg-major-upgrade"]

View File

@ -0,0 +1,56 @@
#!/bin/sh
set -euo pipefail
if [ -z "${PG_FROM}" ]; then
echo "You must set PG_FROM env var to the source version"
exit 1
elif [ -z "${PG_TO}" ]; then
echo "You must set PG_TO env var to the destination version"
exit 1
elif [ ! -d "${PG_BASE_DATA}/${PG_FROM}" ]; then
echo "Source data dir ${PG_BASE_DATA}/${PG_FROM} must already exist"
exit 1
fi
if [ -z "${DO_PG_UPGRADE}" -o "${DO_PG_UPGRADE}" != "1" ]; then
echo "Not running the upgrade. Please set DO_PG_UPGRADE=1"
fi
cd ${PG_BASE_DATA}
echo "Creating new data dir for version ${PG_TO}"
mkdir -p ${PG_BASE_DATA}/${PG_TO}
chmod 700 ${PG_BASE_DATA}/${PG_TO}
echo "Commenting SSL directives (SSL cert not available, nor needed in the upgrade context)"
cp ${PG_BASE_DATA}/${PG_FROM}/postgresql.conf ${PG_BASE_DATA}/${PG_FROM}/postgresql.conf.old
sed -i -r 's/^(ssl.*)/#\1/g' ${PG_BASE_DATA}/${PG_FROM}/postgresql.conf
echo "Replacing pg_hba with a custom one"
cp ${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf ${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf.old
cat <<_EOF > ${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf
local all postgres peer
_EOF
echo "Initializing new PG cluster"
/usr/libexec/postgresql${PG_TO}/bin/initdb --pgdata=${PG_BASE_DATA}/${PG_TO} --auth-host=scram-sha-256 --auth-local=peer --icu-locale=${LANG} --data-checksums --encoding=UTF8 --locale-provider=icu
echo "Upgrading PG data from ${PG_BASE_DATA}/${PG_FROM} to ${PG_BASE_DATA}/${PG_TO}"
/usr/libexec/postgresql${PG_TO}/bin/pg_upgrade \
--clone \
--old-datadir ${PG_BASE_DATA}/${PG_FROM} \
--new-datadir ${PG_BASE_DATA}/${PG_TO} \
--old-bindir /usr/libexec/postgresql${PG_FROM}/bin \
--new-bindir /usr/libexec/postgresql${PG_TO}/bin
echo "Keep old patroni.dynamic.json config"
if [ -e "${PG_BASE_DATA}/${PG_FROM}/patroni.dynamic.json" ]; then
cp ${PG_BASE_DATA}/${PG_FROM}/patroni.dynamic.json ${PG_BASE_DATA}/${PG_TO}/
fi
echo "Restoring configuration"
cp -f ${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf.old ${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf
cp -f ${PG_BASE_DATA}/${PG_FROM}/postgresql.conf.old ${PG_BASE_DATA}/${PG_FROM}/postgresql.conf
cp -f ${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf ${PG_BASE_DATA}/${PG_TO}/pg_hba.conf
cp -f ${PG_BASE_DATA}/${PG_FROM}/postgresql.conf ${PG_BASE_DATA}/${PG_TO}/postgresql.conf

View File

@ -0,0 +1,18 @@
FROM danielberteaud/alma:9.23.12-3
MAINTAINER Daniel Berteaud <dbd@ehtrace.com>
ENV PG_BASE_DATA=/data/db/
RUN set -eux \
dnf update -y &&\
rpm -i https://download.postgresql.org/pub/repos/yum/reporpms/EL-9-x86_64/pgdg-redhat-repo-latest.noarch.rpm &&\
dnf module -y disable postgresql &&\
dnf install -y --setopt=install_weak_deps=0 glibc-langpack-fr glibc-langpack-en &&\
for VER in 11 12 13 14 15 16; do dnf install -y postgresql${VER} postgresql${VER}-server postgresql${VER}-contrib; done &&\
dnf clean all &&\
rm -rf /var/cache/yum/* /var/log/yum/* /var/lib/yum/history*
COPY root/ /
USER postgres
CMD ["pg-upgrade.sh"]

View File

@ -0,0 +1,54 @@
#!/bin/sh
set -euxo pipefail
if [ -z "${PG_FROM}" ]; then
echo "You must set PG_FROM env var to the source version"
exit 1
elif [ -z "${PG_TO}" ]; then
echo "You must set PG_TO env var to the destination version"
exit 1
elif [ ! -d "${PG_BASE_DATA}/${PG_FROM}" ]; then
echo "Source data dir ${PG_BASE_DATA}/${PG_FROM} must already exist"
exit 1
fi
if [ -z "${DO_PG_UPGRADE}" -o "${DO_PG_UPGRADE}" != "1" ]; then
echo "Not running the upgrade. Please set DO_PG_UPGRADE=1"
fi
cd "${PG_BASE_DATA}"
echo "Creating new data dir for version ${PG_TO}"
mkdir -p "${PG_BASE_DATA}/${PG_TO}"
chmod 700 "${PG_BASE_DATA}/${PG_TO}"
echo "Commenting SSL directives (SSL cert not available, nor needed in the upgrade context)"
cp "${PG_BASE_DATA}/${PG_FROM}/postgresql.conf" "${PG_BASE_DATA}/${PG_FROM}/postgresql.conf.old"
sed -i -r 's/^(ssl.*)/#\1/g' "${PG_BASE_DATA}/${PG_FROM}/postgresql.conf"
echo "Replacing pg_hba with a custom one"
cp "${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf" "${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf.old"
cat <<_EOF > "${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf"
local all postgres peer
_EOF
echo "Initializing new PG cluster"
/usr/pgsql-${PG_TO}/bin/initdb --pgdata=${PG_BASE_DATA}/${PG_TO} --data-checksums --encoding UTF-8
echo "Upgrading PG data from ${PG_BASE_DATA}/${PG_FROM} to ${PG_BASE_DATA}/${PG_TO}"
/usr/pgsql-${PG_TO}/bin/pg_upgrade \
--clone \
--old-datadir "${PG_BASE_DATA}/${PG_FROM}" \
--new-datadir "${PG_BASE_DATA}/${PG_TO}" \
--old-bindir /usr/pgsql-${PG_FROM}/bin \
--new-bindir /usr/pgsql-${PG_TO}/bin
if [ -e "${PG_BASE_DATA}/${PG_FROM}/patroni.dynamic.json" ]; then
echo "Keep old patroni.dynamic.json config"
cp ${PG_BASE_DATA}/${PG_FROM}/patroni.dynamic.json ${PG_BASE_DATA}/${PG_TO}/
fi
echo "Restoring configuration"
cp -f "${PG_BASE_DATA}/${PG_FROM}/postgresql.conf.old" "${PG_BASE_DATA}/${PG_FROM}/postgresql.conf"
cp -f "${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf.old" "${PG_BASE_DATA}/${PG_FROM}/pg_hba.conf"

4
example/init/consul Executable file
View File

@ -0,0 +1,4 @@
#!/bin/sh
# vim: syntax=sh
vault write consul/roles/postgres ttl=720h max_ttl=720h consul_policies="postgres"

11
example/init/passwords Executable file
View File

@ -0,0 +1,11 @@
#!/bin/sh
for USER in pg monitor replicator rewind api vault_initial; do
vault kv get -field ${USER}_pwd kv/service/postgres > /dev/null 2>&1
RES=$?
if [ "${RES}" = "1" ]; then
vault kv patch kv/service/postgres ${USER}_pwd=$(pwgen -s -y -r\\\`\'\"\#\^\| -n 50 1)
elif [ "${RES}" = "2" ]; then
vault kv put kv/service/postgres ${USER}_pwd=$(pwgen -s -y -r\\\`\'\"\#\^\| -n 50 1)
fi
done

101
example/init/pki Executable file
View File

@ -0,0 +1,101 @@
#!/bin/sh
#!/bin/sh
# vim: syntax=sh
set -euo pipefail
TMP=$(mktemp -d)
INITIAL_SETUP=false
if [ "$(vault secrets list -format json | jq -r '.["pki/postgres/"].type')" != "pki" ]; then
INITIAL_SETUP=true
fi
if [ "${INITIAL_SETUP}" = "true" ]; then
# Enable the secret engine
echo "Mounting new PKI secret engine at pki/postgres"
vault secrets enable -path=pki/postgres pki
else
echo "Secret engine already mounted at pki/postgres"
fi
# Configure max-lease-ttl
echo "Tune PKI secret engine"
vault secrets tune -max-lease-ttl=131400h pki/postgres
# Configure PKI URLs
echo "Configure URL endpoints"
vault write pki/postgres/config/urls \
issuing_certificates="${VAULT_ADDR}/v1/pki/postgres/ca" \
crl_distribution_points="${VAULT_ADDR}/v1/pki/postgres/crl" \
ocsp_servers="${VAULT_ADDR}/v1/pki/postgres/ocsp"
vault write pki/postgres/config/cluster \
path="${VAULT_ADDR}/v1/pki/postgres"
vault write pki/postgres/config/crl \
auto_rebuild=true \
enable_delta=true
# Configure tidy
echo "Configure auto tidy for the PKI"
vault write pki/postgres/config/auto-tidy \
enabled=true \
tidy_cert_store=true \
tidy_expired_issuers=true \
tidy_revocation_queue=true \
tidy_revoked_cert_issuer_associations=true \
tidy_revoked_certs=true \
tidy_acme=true \
tidy_cross_cluster_revoked_certs=true \
tidy_move_legacy_ca_bundle=true \
maintain_stored_certificate_counts=true
if [ "${INITIAL_SETUP}" = "true" ]; then
# Generate an internal CA
echo "Generating an internal CA"
vault write -format=json pki/postgres/intermediate/generate/internal \
common_name="postgres Certificate Authority " \
ttl="131400h" \
organization="Ehtrace" \
ou="PostgreSQL" \
locality="Pessac" \
key_type=rsa \
key_bits=4096 \
| jq -r '.data.csr' > ${TMP}/postgres.csr
# Sign this PKI with a root PKI
echo "Signing the new CA with the authority from pki/root"
vault write -format=json pki/root/root/sign-intermediate \
csr=@${TMP}/postgres.csr \
format=pem_bundle \
ttl="131400h" \
| jq -r '.data.certificate' > ${TMP}/postgres.crt
# Update the intermediate CA with the signed one
echo "Update the new CA with the signed version"
vault write pki/postgres/intermediate/set-signed \
certificate=@${TMP}/postgres.crt
fi
# Remove temp files
echo "Cleaning temp files"
rm -rf ${TMP}
vault write pki/postgres/roles/postgres-server \
allowed_domains="postgres.service.consul" \
allow_bare_domains=true \
allow_subdomains=true \
allow_localhost=false \
allow_ip_sans=true \
allow_wildcard_certificates=false \
max_ttl=72h

13
example/init/vault-database Executable file
View File

@ -0,0 +1,13 @@
#!/bin/sh
echo "Required .pg.server.public_url is missing"
echo "Creating dba role in vault"
vault write database/roles/postgres-admin \
db_name="postgres" \
creation_statements="CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; \
GRANT \"dba\" TO \"{{name}}\"; \
ALTER ROLE \"{{name}}\" SET role = \"dba\"" \
default_ttl="12h" \
max_ttl="720h"

230
example/manage.nomad.hcl Normal file
View File

@ -0,0 +1,230 @@
job "postgres-manage" {
type = "batch"
meta {
# Force job to run each time
run = "${uuidv4()}"
}
datacenters = ["dc1"]
group "manage" {
network {
mode = "bridge"
}
ephemeral_disk {
size = 101
}
service {
name = "postgres-manage"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 64
}
}
}
}
# wait for required services tp be ready before starting the main task
task "wait-for" {
driver = "docker"
user = 1053
config {
image = "danielberteaud/wait-for:23.12-2"
readonly_rootfs = true
pids_limit = 20
}
lifecycle {
hook = "prestart"
}
env {
SERVICE_0 = "master.postgres.service.consul"
}
resources {
cpu = 10
memory = 10
memory_max = 30
}
}
task "postgres-manage" {
driver = "docker"
config {
image = "danielberteaud/ldap2pg:6.0-6"
readonly_rootfs = true
pids_limit = 20
}
vault {
policies = ["postgres"]
env = false
disable_file = true
}
env {
LDAP2PG_CONFIG = "/secrets/ldap2pg.yml"
LDAP2PG_MODE = "dry"
}
# Use a template block instead of env {} so we can fetch values from vault
template {
data = <<_EOT
LANG=fr_FR.utf8
TZ=Europe/Paris
_EOT
destination = "secrets/.env"
perms = 400
env = true
}
template {
data = <<_EOF
PGHOST=localhost
PGPORT=5432
PGUSER=postgres
PGPASSWORD={{ with secret "kv/service/postgres" }}{{ .Data.data.pg_pwd | sprig_squote }}{{ end }}
_EOF
destination = "secrets/pg-manage.env"
uid = 100000
gid = 100000
perms = 0400
env = true
}
template {
data = <<_EOF
postgres:
managed_roles_query: |
VALUES
('public'),
('managed_roles')
UNION
SELECT DISTINCT role.rolname
FROM pg_roles AS role
JOIN pg_auth_members AS ms ON ms.member = role.oid
JOIN pg_roles AS parent
ON parent.rolname = 'managed_roles' AND parent.oid = ms.roleid
ORDER BY 1;
privileges:
owner:
- writer
- __create_on_schemas__
- __truncate_on_tables__
reader:
- user
- __select_on_tables__
- __select_on_sequences__
- __usage_on_sequences__
rewinder:
- __connect__
- __execute_on_functions__
user:
- __connect__
- __usage_on_schema__
writer:
- reader
- __temporary__
- __insert_on_tables__
- __update_on_tables__
- __delete_on_tables__
- __update_on_sequences__
- __execute_on_functions__
- __trigger_on_tables__
version: 6
rules:
- roles:
- comment: Parent role for all ldap2pg managed roles
name: managed_roles
- comment: Parent role for LDAP synced roles
name: ldap_roles
options: NOLOGIN
parents:
- managed_roles
- comment: DB backup
name: backup
options: LOGIN REPLICATION
parents:
- pg_read_all_data
- managed_roles
- comment: Databases admins
name: dba
options: SUPERUSER NOLOGIN
parents:
- managed_roles
- comment: Databases rewinder
name: rewind
options: LOGIN
parents:
- managed_roles
- comment: Databases monitor
name: monitor
options: LOGIN
parents:
- managed_roles
- pg_monitor
- comment: Hashicorp Vault
name: vault
options: SUPERUSER LOGIN
parents:
- managed_roles
- grant:
databases: postgres
privileges: reader
role: vault
- grant:
privileges: user
role: monitor
- grant:
databases: postgres
privileges: rewinder
role: rewind
- grant:
privileges: owner
role: dba
_EOF
destination = "secrets/ldap2pg.yml"
uid = 100000
gid = 100000
perms = 0400
}
resources {
cpu = 50
memory = 32
}
}
}
}

469
example/postgres.nomad.hcl Normal file
View File

@ -0,0 +1,469 @@
job "postgres" {
datacenters = ["dc1"]
priority = 100
group "server" {
ephemeral_disk {
# Use minimal ephemeral disk
size = 101
}
# Force different instances to run on distinct nodes
constraint {
operator = "distinct_hosts"
value = "true"
}
count = 1
network {
mode = "bridge"
# Patroni API for node to check each others
port "patroni" {
to = 8080
}
# When running with patroni, nodes must reach each others postgres service
port "postgres" {
to = 5432
}
}
update {
# Set super high deadlines as recovery can take lots of time
healthy_deadline = "48h"
progress_deadline = "72h"
}
service {
name = "postgres"
port = 5432
connect {
sidecar_service {
}
sidecar_task {
resources {
cpu = 50
memory = 64
}
}
}
tags = [
"postgres-${NOMAD_ALLOC_INDEX}",
# Note : we don't add traefik.enable=true
# This will be done dynamically only on the current master node using the update_tags.sh script
"traefik.tcp.routers.postgres.rule=HostSNI(`*`)",
"traefik.tcp.routers.postgres.tls=true",
"traefik.tcp.routers.postgres.entrypoints=postgres",
]
# Use patroni health endpoint to verify postgres status
check {
name = "healthy"
type = "http"
port = "patroni"
path = "/health"
protocol = "https"
interval = "20s"
timeout = "10s"
# Patroni REST API is using a cert from a private CA
tls_skip_verify = true
}
# This check will ensure the current role is published in Consul tags (if the callback during a role change failed for example)
check {
name = "tags"
type = "script"
command = "/bin/sh"
args = [
"-c",
"ROLE=$(curl -k https://localhost:${NOMAD_PORT_patroni}/health | jq -r .role) && /local/update_tags.sh ensure_tags ${ROLE}"
]
task = "postgres"
interval = "60s"
timeout = "10s"
}
check {
name = "ready"
type = "script"
interval = "30s"
timeout = "10s"
task = "postgres"
command = "pg_isready"
}
# Patroni will run a script to update the tags (master / replica)
enable_tag_override = true
}
volume "data" {
type = "csi"
source = "postgres-data"
access_mode = "single-node-writer"
attachment_mode = "file-system"
per_alloc = true
}
volume "backup" {
type = "csi"
source = "postgres-backup"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
task "postgres" {
driver = "docker"
leader = true
kill_timeout = "10m"
config {
image = "danielberteaud/patroni:15.23.12-3"
# Set shm_size to half of the total size
shm_size = 536870912
volumes = ["local/mkdir-socket.sh:/entrypoint.d/70-mkdir-socket.sh"]
command = "patroni"
args = ["/secrets/patroni.yml"]
pids_limit = 700
}
vault {
policies = ["postgres"]
env = false
disable_file = true
}
# Use a template block instead of env {} so we can fetch values from vault
template {
data = <<_EOT
LANG=fr_FR.utf8
TZ=Europe/Paris
_EOT
destination = "secrets/.env"
perms = 400
env = true
}
template {
data = <<_EOT
# Get a Consul token from vault, so we're able to update the tags in Consul from the containers
CONSUL_HTTP_TOKEN={{ with secret "consul/creds/postgres" }}{{ .Data.token }}{{ end }}
PATRONICTL_CONFIG_FILE=/secrets/patroni.yml
PGBACKREST_STANZA=postgres
_EOT
destination = "secrets/pg.env"
uid = 100000
gid = 100026
perms = 440
change_mode = "noop"
env = true
}
# Scripts to update tags attached to the service in consul catalog
# with either master or replica
template {
data = <<_EOT
def nest_service_params:
# Select all objects with keys that start with the the name 'Service'
# Remove 'Service' prefix from key names
with_entries(select(.key | startswith("Service")) | .key = (.key | sub("^Service"; "")))
;
def create_svc_txn:
# Select our own instance
select(.ServiceTags[] | contains($mytag))
# Add needed tags
| .ServiceTags -= ["master", "replica", "uninitialized"]
| .ServiceTags += [$role]
| if $role == "master" then .ServiceTags += ["traefik.enable=true"] else .ServiceTags -= ["traefik.enable=true"] end
# Rewrite keys to remove the Service prefix
| nest_service_params as $nested_params
| $nested_params
;
. | map(create_svc_txn) | .[]
_EOT
destination = "local/serviceformat.jq"
change_mode = "noop"
}
template {
data = <<_EOT
#!/bin/sh
# vim: syntax=sh
set -eo pipefail
EVENT=$1
NEW_ROLE=$2
source /secrets/pg.env
# translate promoted = master and demoted = recplica
if [ "${NEW_ROLE}" = "promoted" ]; then
NEW_ROLE="master"
elif [ "${NEW_ROLE}" = "demoted" ]; then
NEW_ROLE="replica"
fi
CURL_OPTS="--connect-timeout 5 --max-time 10 --retry 5 --retry-delay 1 --retry-max-time 40 --retry-connrefused"
# Update tags on the main service
curl ${CURL_OPTS} -H "X-Consul-Token: ${CONSUL_HTTP_TOKEN}" http://{{ sockaddr "GetInterfaceIP \"nomad\"" }}:8500/v1/catalog/service/postgres |\
jq --from-file /local/serviceformat.jq --arg role ${NEW_ROLE} --arg mytag postgres-{{ env "NOMAD_ALLOC_INDEX" }} |\
curl ${CORL_OPTS} -H "X-Consul-Token: ${CONSUL_HTTP_TOKEN}" -X PUT -d @- http://{{ sockaddr "GetInterfaceIP \"nomad\"" }}:8500/v1/agent/service/register
# Update tags on the sidecar service (connect-proxy)
curl ${CURL_OPTS} -H "X-Consul-Token: ${CONSUL_HTTP_TOKEN}" http://{{ sockaddr "GetInterfaceIP \"nomad\"" }}:8500/v1/catalog/service/postgres-sidecar-proxy |\
jq --from-file /local/serviceformat.jq --arg role ${NEW_ROLE} --arg mytag postgres-{{ env "NOMAD_ALLOC_INDEX" }} |\
curl ${CURL_OPTS} -H "X-Consul-Token: ${CONSUL_HTTP_TOKEN}" -X PUT -d @- http://{{ sockaddr "GetInterfaceIP \"nomad\"" }}:8500/v1/agent/service/register
_EOT
destination = "local/update_tags.sh"
perms = 755
change_mode = "noop"
}
# A small entrypoint scriptlet to ensure /alloc/data/postgres dir exists
template {
data = <<_EOT
#!/bin/sh
set -eu
mkdir -p /alloc/data/postgres
_EOT
destination = "local/mkdir-socket.sh"
perms = 755
}
# Patroni main configuration file
template {
data = <<_EOT
name: postgres-{{ env "NOMAD_ALLOC_INDEX" }}
scope: postgres
consul:
url: http://{{ sockaddr "GetInterfaceIP \"nomad\"" }}:8500
token: {{ with secret "consul/creds/postgres" }}{{ .Data.token }}{{ end }}
bootstrap:
dcs:
synchronous_mode: False
initdb:
- data-checksum
- encoding: UTF-8
#- locale-provider: icu
#- icu-locale: fr_FR.utf8
post_bootstrap: /local/create_users.sh
postgresql:
create_replica_methods:
- basebackup
callbacks:
on_role_change: /local/update_tags.sh
on_start: /local/update_tags.sh
connect_address: {{ env "NOMAD_HOST_ADDR_postgres" }}
bin_dir: /usr/pgsql-15/bin
data_dir: /data/db/15
listen: 0.0.0.0:{{ env "NOMAD_ALLOC_PORT_postgres" }}
use_pg_rewind: True
#remove_data_directory_on_rewind_failure: True
pg_hba:
- local all postgres peer
- local replication postgres peer
- local all postgres scram-sha-256
- host all all 127.0.0.0/8 scram-sha-256
- host replication backup 127.0.0.0/8 scram-sha-256
- hostssl replication replicator 0.0.0.0/0 cert clientcert=verify-full map=patroni-map
- hostssl postgres rewind 0.0.0.0/0 cert clientcert=verify-full map=patroni-map
- hostssl all all 0.0.0.0/0 cert clientcert=verify-full
pg_ident:
- patroni-map postgres.service.consul postgres
- patroni-map postgres.service.consul replicator
- patroni-map postgres.service.consul rewind
parameters:
ssl: on
ssl_cert_file: /secrets/postgres.bundle.pem
ssl_key_file: /secrets/postgres.bundle.pem
ssl_ca_file: /local/postgres.ca.pem
#ssl_crl_file: /local/postgres.crl.pem
# Add a socket in /alloc/data/postgres
# so other tasks in the same group can reach it
unix_socket_directories: /run/postgresql, /alloc/data/postgres
autovacuum_analyze_scale_factor: 0.05
autovacuum_analyze_threshold: 500
autovacuum_vacuum_scale_factor: 0.1
autovacuum_vacuum_threshold: 500
datestyle: ISO, DMY
log_connections: on
log_destination: stderr
log_directory: /proc/1/fd
log_disconnections: on
log_filename: 1
log_line_prefix: '[%m] u=%u,d=%d,a=%a,c=%h,xid=%x '
log_min_duration_statement: 2000
log_statement: ddl
log_timezone: {{ env "TZ" }}
maintenance_work_mem: 51MB
shared_buffers: 512MB
timezone: {{ env "TZ" }}
wal_compression: zstd
wal_keep_size: 512
work_mem: 10MB
recovery_conf:
authentication:
superuser:
username: postgres
password: '{{ with secret "kv/service/postgres" }}{{ .Data.data.pg_pwd }}{{ end }}'
sslmode: verify-ca
sslrootcert: /local/postgres.ca.pem
replication:
username: replicator
sslmode: verify-ca
sslrootcert: /local/postgres.ca.pem
sslcert: /secrets/postgres.bundle.pem
sslkey: /secrets/postgres.bundle.pem
rewind:
username: rewind
sslmode: verify-ca
sslrootcert: /local/postgres.ca.pem
sslcert: /secrets/postgres.bundle.pem
sslkey: /secrets/postgres.bundle.pem
restapi:
connect_address: {{ env "NOMAD_HOST_ADDR_patroni" }}
listen: 0.0.0.0:{{ env "NOMAD_ALLOC_PORT_patroni" }}
keyfile: /secrets/postgres.bundle.pem
certfile: /secrets/postgres.bundle.pem
cafile: /local/postgres.ca.pem
verify_client: optional
authentication:
username: patroni
password: '{{ with secret "kv/service/postgres" }}{{ .Data.data.api_pwd }}{{ end }}'
ctl:
insecure: False
keyfile: /secrets/postgres.bundle.pem
certfile: /secrets/postgres.bundle.pem
cafile: /local/postgres.ca.pem
watchdog:
mode: off
_EOT
destination = "secrets/patroni.yml"
perms = "0400"
uid = 100026
gid = 100026
change_mode = "signal"
change_signal = "SIGHUP"
}
# Post bootstrap script, to create users
template {
data = <<_EOT
#!/bin/sh
set -euo pipefail
# Create roles needed for patroni
{{ with secret "kv/service/postgres" }}
psql <<'_EOSQL'
ALTER ROLE postgres WITH SUPERUSER LOGIN PASSWORD '{{ .Data.data.pg_pwd }}';
CREATE ROLE replicator WITH LOGIN REPLICATION PASSWORD '{{ .Data.data.replicator_pwd }}';
CREATE ROLE rewind WITH LOGIN PASSWORD '{{ .Data.data.rewind_pwd }}';
CREATE ROLE vault WITH LOGIN SUPERUSER PASSWORD '{{ .Data.data.vault_initial_pwd }}';
CREATE ROLE monitor WITH LOGIN PASSWORD '{{ .Data.data.monitor_pwd }}';
GRANT "pg_monitor" TO "monitor";
_EOSQL
{{ end }}
_EOT
destination = "secrets/create_users.sh"
perms = "0750"
uid = 100026
gid = 100026
change_mode = "noop"
}
# Post bootstrap wrapper, as /secrets is mounted with noexec
template {
data = <<_EOT
#!/bin/sh
set -euo pipefail
sh /secrets/create_users.sh
_EOT
destination = "local/create_users.sh"
perms = "0750"
uid = 100026
gid = 100026
change_mode = "noop"
}
# Obtain a certificate from Vault
template {
data = <<_EOT
{{ with pkiCert
"pki/postgres/issue/postgres-server"
"common_name=postgres.service.consul"
(printf "ip_sans=%s" (env "NOMAD_IP_patroni")) "ttl=72h" }}
{{ .Cert }}
{{ .Key }}
{{ end }}
_EOT
destination = "secrets/postgres.bundle.pem"
perms = "0400"
uid = 100026
gid = 100026
change_mode = "signal"
change_signal = "SIGHUP"
}
# CA certificate chains
template {
data = <<_EOT
{{ with secret "pki/postgres/cert/ca_chain" }}{{ .Data.ca_chain }}{{ end }}
_EOT
destination = "local/postgres.ca.pem"
change_mode = "signal"
change_signal = "SIGHUP"
}
# Mount the persistent volume in /data
volume_mount {
volume = "data"
destination = "/data"
}
# Mount the backup volume (which can be used for PITR with pgbackrest)
volume_mount {
volume = "backup"
destination = "/backup"
}
resources {
cpu = 1000
memory = 1024
}
}
}
}

22
example/prep.d/10-rand-pwd.sh Executable file
View File

@ -0,0 +1,22 @@
#!/bin/sh
set -euo pipefail
# Initialize random passwords if needed
if ! vault kv list kv/service 2>/dev/null | grep -q -E '^postgres$'; then
vault kv put kv/service/postgres \
pg_pwd=$(pwgen -s -n 50 1) \
api_pwd=$(pwgen -s -n 50 1) \
monitor_pwd=$(pwgen -s -n 50 1) \
replicator_pwd=$(pwgen -s -n 50 1) \
rewind_pwd=$(pwgen -s -n 50 1) \
vault_initial_pwd=$(pwgen -s -n 50 1)
fi
for PWD in pg_pwd api_pwd monitor_pwd replicator_pwd rewind_pwd vault_initial_pwd; do
if ! vault kv get -field ${PWD} kv/service/postgres >/dev/null 2>&1; then
vault kv patch kv/service/postgres \
${PWD}=$(pwgen -s -n 50 1)
fi
done

19
example/prep.d/mv_conf.sh Executable file
View File

@ -0,0 +1,19 @@
#!/bin/sh
set -eu
if [ "postgres" != "postgres" ]; then
for DIR in vault consul nomad; do
if [ -d output/${DIR} ]; then
for FILE in $(find output/${DIR} -name "*postgres*.hcl" -type f); do
NEW_FILE=$(echo "${FILE}" | sed -E "s/postgres/postgres/g")
mv "${FILE}" "${NEW_FILE}"
done
fi
done
fi

65
example/upgrade.nomad.hcl Normal file
View File

@ -0,0 +1,65 @@
job "postgres-upgrade" {
datacenters = ["dc1"]
type = "batch"
meta {
# Force job to be different for each execution
run_uuid = "${uuidv4()}"
}
group "upgrade" {
volume "data" {
type = "csi"
source = "postgres-data"
access_mode = "single-node-writer"
attachment_mode = "file-system"
per_alloc = true
}
task "postgres-upgrade" {
driver = "docker"
config {
image = "danielberteaud/pg-major-upgrade:latest"
readonly_rootfs = true
}
env {
PG_FROM = ""
PG_TO = ""
PG_DO_UPGRADE = false
}
# Use a template block instead of env {} so we can fetch values from vault
template {
data = <<_EOT
LANG=fr_FR.utf8
TZ=Europe/Paris
_EOT
destination = "secrets/.env"
perms = 400
env = true
}
volume_mount {
volume = "data"
destination = "/data"
}
resources {
cpu = 1000
memory = 1024
}
}
}
}

View File

@ -0,0 +1,14 @@
# Read secrets from vault KV
path "kv/data/service/postgres" {
capabilities = ["read"]
}
# Get a consul token to access the kv store, where patroni will manage the leader lock
path "consul/creds/postgres" {
capabilities = ["read"]
}
# Get a certificate for patroni REST API and Postgres
path "pki/postgres/issue/postgres-server" {
capabilities = ["update"]
}