postgres/variables.yml

413 lines
11 KiB
YAML

---
# Name of the job to generate
# Also used to controler service names
instance: postgres
pg:
nomad:
# Set higher priority for the postgres job
priority: 80
vault:
pki:
ou: Postgres
# List of vault policies to attach to the task
policies:
- '[[ .instance ]][[ .consul.suffix ]]'
database:
role: '[[ .instance ]]-admin'
pgrole: dba
# Random secrets to generate if missing, and store in vault KV
rand_secrets:
fields:
- pg_pwd
- api_pwd
- monitor_pwd
- replicator_pwd
- rewind_pwd
- vault_initial_pwd
# Postgres server settings
server:
# Major version of postgres to use
pg_version: 15
# Version of patroni
patroni_version: 3.3.0
# The image to use
image: '[[ .docker.repo ]]patroni:[[ .pg.server.pg_version ]]-[[ .pg.server.patroni_version ]]-2'
# Number of postgres instance. Patroni will handle leader election and replication
count: 1
# Optional en vars to pass to the container
# You should set PGBACKREST_XXX variable if you intend to use the back service
# Eg
# env:
# PGBACKREST_PROCESS_MAX: 4
# PGBACKREST_REPO1_RETENTION_FULL: 1
# PGBACKREST_REPO1_RETENTION_DIFF: 7
# PGBACKREST_REPO1_TYPE: sftp
# PGBACKREST_REPO1_SFTP_HOST: pbs.lapiole.org
# PGBACKREST_REPO1_SFTP_HOST_USER: pitr
# PGBACKREST_REPO1_PATH: /postgres/pgbackrest
# PGBACKREST_REPO1_SFTP_HOST_KEY_HASH_TYPE: sha256
# # awk '{print $2}' ssh_host_ecdsa_key.pub | base64 -d | sha256sum
# PGBACKREST_REPO1_SFTP_HOST_FINGERPRINT: ce6eb1c79ce6596d7580f3b08021b48e39e5a30f2fd751a7fa82b480d821eb99
# PGBACKREST_REPO1_SFTP_HOST_KEY_CHECK_TYPE: fingerprint
env:
PGBACKREST_STANZA: '[[ .instance ]]'
nomad:
# Enforce running on distinct hosts
constraints:
- operator: distinct_hosts
value: true
update:
# When running in recovery mode, use huge deadlines as it can take a lot of time
healthy_deadline: '[[ .pg.server.recovery | ternary "48h" "2h" ]]'
progress_deadline: '[[ .pg.server.recovery | ternary "72h" "3h" ]]'
# In recovery mode, neither patroni nor postgres will be started. The container will start and wait for manual recovery
recovery: false
# Recovery configuration to pass to patroni config
recovery_conf: {}
# How many nodes should use synchronous replication. No effect unless count > 1
synchronous_node_count: 0
# Options to pass to initdb when initializing the cluster
initdb:
- "data-checksum"
- "encoding: UTF-8"
# Postgres parameters
# The following memory related settings can be expressed as a percentage, and wil be computed based on the memory allocation of the container
# shared_buffers effective_cache_size maintenance_work_mem wal_buffers work_mem
parameters:
log_line_prefix: "'[%m] u=%u,d=%d,a=%a,c=%h,xid=%x '"
wal_compression: zstd
log_min_duration_statement: 2000
log_timezone: '{{ env "TZ" }}'
timezone: '{{ env "TZ" }}'
log_destination: stderr
log_directory: /proc/1/fd
log_filename: 1
#logging_collector: on
#log_truncate_on_rotation: on
#log_rotation_size: 0
#log_rotation_age: 1440
log_statement: ddl
log_connections: on
log_disconnections: on
datestyle: 'ISO, DMY'
autovacuum_vacuum_threshold: 500
autovacuum_analyze_threshold: 500
autovacuum_vacuum_scale_factor: 0.1
autovacuum_analyze_scale_factor: 0.05
shared_buffers: 50%
maintenance_work_mem: 5%
work_mem: 1%
wal_keep_size: 512
# You can configure ldap auth for postgres
# users needing ldap auth should be members of the ldap_roles role in postgres
ldap_auth:
# Is the auth enabled
enabled: false
# List of servers to try to connect to
servers:
- localhost
# Port
port: 389
# Should StartTLS be used to connect
starttls: true
# The base DN where postgres will start looking for users
base_dn: OU=People,DC=example,DC=org
# Search filter to find matching users
search_filter: "(&(objectClass=user)(!(useraccountcontrol:1.2.840.113556.1.4.803:=2))(memberOf:1.2.840.113556.1.4.1941:=CN=Role_DBA,OU=Roles,DC=example,DC=org)(sAMAccountName=$username))"
# Optional bind DN and password to do the search operation
# If undefined, the search will be done anonymously
#bind_dn: CN=Postgres,OU=Apps,DC=example,DC=org
bind_password: '{{ with secret "[[ .vault.root ]]kv/service/[[ .instance ]]" }}{{ .Data.data.ldap_pwd }}{{ end }}'
# Resource allocation for the container
resources:
cpu: 1000
memory: 1024
# URL of the service as seen from the outside
# This is used to initialize connection from vault to handle database secrets
# public_url: postgres://postgres.example.org:5432
traefik:
proto: tcp
# List of entrypoint the service will listen to
entrypoints:
- postgres
prometheus:
enabled: '[[ .prometheus.available ]]'
metrics_url: http://localhost:9187
# Backup service uses pgbackrest to perform
# WAL archive, and regular full/incr/diff backups
backup:
# Additional env var.
# Note that pg.server.env will be inherited so PGBACKREST config only needs to be set there
env: {}
# pgbackrest based backups
pgbackrest:
enabled: false
# Schedules for backups. Empty string to disable
cron:
full: 15 02 * * sun
diff: 15 02 * * mon,tue,wed,thu,fri,sat
incr: ''
# pg_dump based backups
dumps:
enabled: false
format: custom
compression: 6
cron: 39 03 * * *
# Resource allocations
resources:
cpu: 300
memory: 50
memory_max: 256
# Postgres exporter for prometheus
# Only used if prometheus.enabled is true
exporter:
version: 0.15.0
# Image to use
image: '[[ .docker.repo ]]postgres-exporter:[[ .pg.exporter.version ]]-1'
# Additional env var
env: {}
# Resources
resources:
cpu: 50
memory: 32
# manage can create database, users and sync permissions from LDAP (using ldap2pg)
manage:
# Image to use
image: '[[ .docker.repo ]]ldap2pg:6.0-10'
# Resource allocation
resources:
cpu: 50
memory: 32
# Services to wait before running
wait_for:
- service: 'master.[[ .instance ]]'
# Additional env var
env:
WAIT_FOR_TARGETS: localhost:5432
# Connection to postgres through the service mesg
consul:
connect:
upstreams:
- destination_name: "[[ .instance ]]"
local_bind_port: 5432
# List of databases to create (so permissions can be applied)
# For each database, a role with the same name (and NOLOGIN) will be created and be owner of the database
# databases:
# - name: vaultwarden
# - name: odoo
# owner: erp
# encoding: UTF-8
# locale: fr_FR.utf8
# template: template1
# extensions:
# - uuid-ossp
databases: []
# Schedule to run ldap2pg regularily, to ensure permissions are up to date
# This is especially useful when syncing roles from LDAP
# An empty string disable running the job as a cron
cron: ""
# mode can be dry (no change will be made) or real
#
mode: dry
# Default config for ldap2pg (except for rules which are handled separately)
default_config:
version: 6
postgres:
managed_roles_query: |
VALUES
('public'),
('managed_roles')
UNION
SELECT DISTINCT role.rolname
FROM pg_roles AS role
JOIN pg_auth_members AS ms ON ms.member = role.oid
JOIN pg_roles AS parent
ON parent.rolname = 'managed_roles' AND parent.oid = ms.roleid
ORDER BY 1;
privileges:
user:
- __connect__
- __usage_on_schema__
reader:
- user
- __select_on_tables__
- __select_on_sequences__
- __usage_on_sequences__
writer:
- reader
- __temporary__
- __insert_on_tables__
- __update_on_tables__
- __delete_on_tables__
- __update_on_sequences__
- __execute_on_functions__
- __trigger_on_tables__
owner:
- writer
- __create_on_schemas__
- __truncate_on_tables__
rewinder:
- __connect__
- __execute_on_functions__
# Custom config : will be merged on top of default_config
config: {}
# A set of default rules to apply
default_rules:
- roles:
- name: managed_roles
comment: Parent role for all ldap2pg managed roles
- name: ldap_roles
comment: "Parent role for LDAP synced roles"
options: NOLOGIN
parents:
- managed_roles
- name: backup
comment: "DB backup"
options: LOGIN REPLICATION
parents:
- pg_read_all_data
- managed_roles
- name: dba
comment: "Databases admins"
options: SUPERUSER NOLOGIN
parents:
- managed_roles
- name: rewind
comment: "Databases rewinder"
options: LOGIN
parents:
- managed_roles
- name: monitor
comment: "Databases monitor"
options: LOGIN
parents:
- managed_roles
- pg_monitor
- name: vault
comment: "Hashicorp Vault"
options: SUPERUSER LOGIN
parents:
- managed_roles
- grant:
role: vault
privileges: reader
databases: postgres
- grant:
role: monitor
privileges: user
- grant:
role: rewind
privileges: rewinder
databases: postgres
- grant:
role: dba
privileges: owner
# Additional custom rules to apply (will be appended to default_rules)
rules: []
# Settings for major upgrades
upgrade:
# Set to true to run the upgrade
enabled: false
# Docker image to use
image: '[[ .docker.repo ]]postgres-major-upgrade:latest'
# Custom env var to set in the container
env: {}
# Options to pass to pg_upgrade
options:
# Will only work if using XFS, ZFS or btrfs. Else, replace with link
- clone
# Major postgres versions, eg
# from: 15
# to: 16
from: ""
to: ""
# Resource allocation
resources:
cpu: '[[ .pg.server.resources.cpu ]]'
memory: '[[ .pg.server.resources.memory ]]'
# Volumes
volumes:
# The data volume is used to store postgres data
# It'll be opened as single-node-writer, and it's recommended to be a block based volume (eg, iSCSI)
# The volumes are connected using per_alloc, so the alloc ID will be appended. Eg postgres-data[0], postgres-data[1] etc.
data:
type: csi
source: '[[ .instance ]]-data'
per_alloc: true
# Backup volume (can be used for pgbackrest and dumps)
# Will be opened as multi-node-multi-writer. Can be NFS
backup:
type: csi
source: '[[ .instance ]]-backup'
access_mode: multi-node-multi-writer