Bundlify ZooKeeper
This commit is contained in:
parent
bef89e167e
commit
af6d57e814
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
|
||||
dependencies:
|
||||
- url: ../common.git
|
|
@ -0,0 +1,12 @@
|
|||
Kind = "service-intentions"
|
||||
Name = "[[ .instance ]][[ .consul.suffix ]]"
|
||||
Sources = [
|
||||
{
|
||||
Name = "exchange-broker-bootstrap[[ .consul.suffix ]]"
|
||||
Action = "allow"
|
||||
},
|
||||
{
|
||||
Name = "eventbus-broker-bootstrap[[ .consul.suffix ]]"
|
||||
Action = "allow"
|
||||
}
|
||||
]
|
|
@ -0,0 +1,37 @@
|
|||
FROM [[ .docker.repo ]][[ .docker.base_images.java17.image ]] AS builder
|
||||
|
||||
ARG ZK_VERSION=3.9.1
|
||||
|
||||
RUN set -x &&\
|
||||
apk --no-cache add tar gzip gnupg curl ca-certificates &&\
|
||||
export GNUPGHOME="$(mktemp -d)" &&\
|
||||
cd /tmp &&\
|
||||
curl -sSLO https://dlcdn.apache.org/zookeeper/zookeeper-${ZK_VERSION}/apache-zookeeper-${ZK_VERSION}-bin.tar.gz &&\
|
||||
curl -sSLO https://dlcdn.apache.org/zookeeper/zookeeper-${ZK_VERSION}/apache-zookeeper-${ZK_VERSION}-bin.tar.gz.asc &&\
|
||||
curl https://dist.apache.org/repos/dist/release/zookeeper/KEYS | gpg --import - &&\
|
||||
gpg --batch --verify "apache-zookeeper-${ZK_VERSION}-bin.tar.gz.asc" "apache-zookeeper-${ZK_VERSION}-bin.tar.gz" &&\
|
||||
tar xzf "apache-zookeeper-${ZK_VERSION}-bin.tar.gz" &&\
|
||||
mv apache-zookeeper-${ZK_VERSION}-bin /opt/zookeeper &&\
|
||||
chown -R 2181:2181 /opt/zookeeper/conf/ &&\
|
||||
mkdir /opt/zookeeper/logs &&\
|
||||
chown 2181:2181 /opt/zookeeper/logs
|
||||
|
||||
FROM [[ .docker.repo ]][[ .docker.base_images.java17.image ]]
|
||||
MAINTAINER [[ .docker.maintainer ]]
|
||||
|
||||
ENV PATH=/opt/zookeeper/bin:$PATH \
|
||||
ZK_DATA=/data
|
||||
|
||||
COPY --from=builder /opt/zookeeper/ /opt/zookeeper/
|
||||
RUN set -eux &&\
|
||||
apk --no-cache upgrade &&\
|
||||
apk --no-cache add openssl bash libc6-compat gcompat &&\
|
||||
addgroup -g 2181 zookeeper &&\
|
||||
adduser --system --ingroup zookeeper --disabled-password --uid 2181 --home /opt/zookeeper --no-create-home --shell /sbin/nologin zookeeper &&\
|
||||
mkdir /data &&\
|
||||
chown zookeeper.zookeeper /data &&\
|
||||
chmod 700 /data
|
||||
COPY root/ /
|
||||
|
||||
USER zookeeper
|
||||
CMD ["zkServer.sh", "start-foreground"]
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ZooKeeper rewrite its config file. So we just copy our own local config on start
|
||||
# in the /conf dir so ZooKeeper can then modify at as it wishes.
|
||||
# When /local/zoo.dyn.cfg is modified, it'll be pushed to ZooKeeper (using zkCli.sh)
|
||||
# which in turns will update it's local copy /conf/zoo.dyn.cfg on each node
|
||||
if [ -f "/local/conf/zoo.cfg" ]; then
|
||||
cp /local/conf/zoo.cfg /opt/zookeeper/conf/zoo.cfg
|
||||
fi
|
||||
if [ -f "/local/conf/zoo.dyn.cfg" ]; then
|
||||
cp /local/conf/zoo.dyn.cfg /opt/zookeeper/conf/zoo.dyn.cfg
|
||||
fi
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
# This scripts is a helper to convert PEM certificates and key
|
||||
# to PKCS12 which ZooKeeper expects
|
||||
#
|
||||
# To use it, the following env vars to be set :
|
||||
# ZK_SRV_KEY_FILE : the path of the PEM private key (must exists)
|
||||
# ZK_SRV_CERT_FILE : the path of the PEM certificate (must exists)
|
||||
# (both ZK_SRV_KEY_FILE and ZK_SRV_CERT_FILE can refer to the same file if it contains both)
|
||||
# ZK_SRV_P12_FILE : the path of the PKCS12 bundle to create
|
||||
|
||||
if [ -n "${ZK_SRV_KEY_FILE}" -a -f "${ZK_SRV_KEY_FILE}" -a -n "${ZK_SRV_CERT_FILE}" -a -f "${ZK_SRV_CERT_FILE}" -a -n "${ZK_SRV_P12_FILE}" ]; then
|
||||
if [ ! -f "${ZK_SRV_P12_FILE}" -o "${ZK_SRV_KEY_FILE}" -nt "${ZK_SRV_P12_FILE}" ]; then
|
||||
echo "Creating ${ZK_SRV_P12_FILE} (from ${ZK_SRV_CERT_FILE}/${ZK_SRV_KEY_FILE})"
|
||||
openssl pkcs12 -export -out ${ZK_SRV_P12_FILE} -in ${ZK_SRV_CERT_FILE} -inkey ${ZK_SRV_KEY_FILE} -passout pass:password
|
||||
chmod 640 ${ZK_SRV_P12_FILE}
|
||||
else
|
||||
echo "${ZK_SRV_P12_FILE} already exists and is newer than ${ZK_SRV_KEY_FILE}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Now same for the root cert/key
|
||||
# ZK_ROOT_KEY_FILE : the path of the PEM private key (must exists)
|
||||
# ZK_ROOT_CERT_FILE : the path of the PEM certificate (must exists)
|
||||
# (both ZK_ROOT_KEY_FILE and ZK_ROOT_CERT_FILE can refer to the same file if it contains both)
|
||||
# ZK_ROOT_P12_FILE : the path of the PKCS12 bundle to create
|
||||
|
||||
if [ -n "${ZK_ROOT_KEY_FILE}" -a -f "${ZK_ROOT_KEY_FILE}" -a -n "${ZK_ROOT_CERT_FILE}" -a -f "${ZK_ROOT_CERT_FILE}" -a -n "${ZK_ROOT_P12_FILE}" ]; then
|
||||
if [ ! -f "${ZK_ROOT_P12_FILE}" -o "${ZK_ROOT_KEY_FILE}" -nt "${ZK_ROOT_P12_FILE}" ]; then
|
||||
echo "Creating ${ZK_ROOT_P12_FILE} (from ${ZK_ROOT_CERT_FILE}/${ZK_ROOT_KEY_FILE})"
|
||||
openssl pkcs12 -export -out ${ZK_ROOT_P12_FILE} -in ${ZK_ROOT_CERT_FILE} -inkey ${ZK_ROOT_KEY_FILE} -passout pass:password
|
||||
chmod 640 ${ZK_ROOT_P12_FILE}
|
||||
else
|
||||
echo "${ZK_ROOT_P12_FILE} already exists and is newer than ${ZK_ROOT_KEY_FILE}"
|
||||
fi
|
||||
fi
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
export CLIENT_JVMFLAGS="-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.ssl.keyStore.location=${ZK_ROOT_P12_FILE} -Dzookeeper.ssl.trustStore.location=${ZK_CA_CERT} -Dzookeeper.ssl.keyStore.type=PKCS12 -Dzookeeper.ssl.keyStore.password=password -Dzookeeper.ssl.trustStore.type=PEM -Dzookeeper.ssl.hostnameVerification=false"
|
||||
zkCli.sh "$@"
|
|
@ -0,0 +1 @@
|
|||
[[ template "common/mv_conf.sh" dict "ctx" . "services" (dict "zookeeper" .instance) ]]
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
[[ template "common/vault.rand_secrets" dict "ctx" . "task" "users" "keys" (coll.Slice "kafka-eventbus" "kafka-exchange") ]]
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
[[- $c := merge .zookeeper .]]
|
||||
[[ template "common/vault.mkpki.sh.tpl" $c ]]
|
||||
|
||||
vault write [[ $c.vault.pki.path ]]/roles/server \
|
||||
allowed_domains="[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]" \
|
||||
allow_bare_domains=true \
|
||||
allow_subdomains=true \
|
||||
allow_localhost=false \
|
||||
allow_ip_sans=true \
|
||||
allow_wildcard_certificates=false \
|
||||
max_ttl=72h
|
||||
|
||||
vault write [[ $c.vault.pki.path ]]/roles/user-root \
|
||||
allowed_domains="root.[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]" \
|
||||
allow_bare_domains=true \
|
||||
allow_subdomains=false \
|
||||
allow_localhost=false \
|
||||
allow_ip_sans=false \
|
||||
allow_wildcard_certificates=false \
|
||||
max_ttl=72h
|
|
@ -0,0 +1,9 @@
|
|||
Server {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
{{- with secret "[[ .vault.prefix ]]kv/service/[[ .instance ]]/users" }}
|
||||
{{- range $k, $v := .Data.data }}
|
||||
user_{{ $k }}="{{ $v }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
;
|
||||
};
|
|
@ -0,0 +1,47 @@
|
|||
dataDir=/data
|
||||
dataLogDir=/data/log
|
||||
|
||||
autopurge.purgeInterval=1
|
||||
snapshot.compression.method=snappy
|
||||
|
||||
initLimit=10
|
||||
syncLimit=2
|
||||
audit.enable=true
|
||||
learner.closeSocketAsync=true
|
||||
leader.closeSocketAsync=true
|
||||
quorumListenOnAllIPs=true
|
||||
standaloneEnabled=false
|
||||
reconfigEnabled=true
|
||||
dynamicConfigFile=/opt/zookeeper/conf/zoo.dyn.cfg
|
||||
4lw.commands.whitelist=*
|
||||
|
||||
sslQuorum=true
|
||||
serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
|
||||
ssl.quorum.keyStore.location=/secrets/zookeeper.p12
|
||||
ssl.quorum.keyStore.type=PKCS12
|
||||
ssl.quorum.keyStore.password=password
|
||||
ssl.quorum.trustStore.location=/secrets/zookeeper.ca.pem
|
||||
ssl.quorum.trustStore.type=PEM
|
||||
ssl.quorum.clientAuth=need
|
||||
|
||||
ssl.keyStore.location=/secrets/zookeeper.p12
|
||||
ssl.keyStore.type=PKCS12
|
||||
ssl.keyStore.password=password
|
||||
ssl.trustStore.location=/secrets/zookeeper.ca.pem
|
||||
ssl.trustStore.type=PEM
|
||||
|
||||
client.portUnification=true
|
||||
zookeeper.superUser=root
|
||||
X509AuthenticationProvider.superUser=CN=root.[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]],OU=[[ .vault.pki.ou ]]
|
||||
enforce.auth.schemes=sasl,x509
|
||||
#enforce.auth.schemes=x509
|
||||
enforce.auth.enabled=true
|
||||
authProvider.sasl=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
|
||||
authProvider.x509=org.apache.zookeeper.server.auth.X509AuthenticationProvider
|
||||
enableEagerACLCheck=true
|
||||
|
||||
metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
|
||||
metricsProvider.httpPort=7000
|
||||
metricsProvider.exportJvmInfo=true
|
||||
|
||||
admin.enableServer=false
|
|
@ -0,0 +1,6 @@
|
|||
server.{{ env "NOMAD_ALLOC_INDEX" | parseInt | add 1 }}={{ env "NOMAD_IP_quorum" }}:{{ env "NOMAD_PORT_quorum" }}:{{ env "NOMAD_HOST_PORT_elect" }};127.0.0.1:2181
|
||||
{{ range $_, $instance := service "[[ .instance ]]-quorum[[ .consul.suffix ]]" }}
|
||||
{{- if not (eq (env "NOMAD_ALLOC_INDEX") (index $instance.ServiceMeta "alloc")) -}}
|
||||
server.{{ (index $instance.ServiceMeta "alloc") | parseInt | add 1 }}={{ $instance.Address }}:{{ $instance.Port }}:{{ (index $instance.ServiceMeta "elect-port") }};127.0.0.1:2181
|
||||
{{- end }}
|
||||
{{ end }}
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
|
||||
# Name of this instance of the job
|
||||
instance: zookeeper
|
||||
|
||||
# ZooKeeper settings
|
||||
zookeeper:
|
||||
|
||||
# Docker image to use
|
||||
image: '[[ .docker.repo ]]zookeeper:latest'
|
||||
|
||||
# Number of instances to run (should be 3 or 5 for high availability)
|
||||
count: 3
|
||||
|
||||
# Env vars to set in the task
|
||||
env:
|
||||
JVMFLAGS: "-Djava.net.preferIPv4Stack=true"
|
||||
|
||||
# Resource allocation
|
||||
resources:
|
||||
cpu: 100
|
||||
memory: 512
|
||||
|
||||
# Vault settings
|
||||
vault:
|
||||
# ZooKeeper will use a PKI from vault to issue certificates
|
||||
pki:
|
||||
path: '[[ .vault.prefix ]]pki/[[ .instance ]]'
|
||||
ou: ZooKeeper
|
||||
issuer: '[[ .vault.prefix ]]pki/root'
|
||||
|
||||
# List of vault policies to attach to the task
|
||||
policies:
|
||||
- '[[ .instance ]][[ .consul.suffix ]]'
|
||||
|
||||
# Volumes for data persistence
|
||||
volumes:
|
||||
data:
|
||||
source: '[[ .instance ]]-data'
|
||||
type: csi
|
||||
per_alloc: true
|
||||
|
||||
prometheus:
|
||||
# URL (from inside the container POV) where prometheus metrics are available
|
||||
metrics_url: http://127.0.0.1:7000/metrics
|
|
@ -0,0 +1,15 @@
|
|||
# Read the KV store
|
||||
path "[[ .vault.prefix ]]kv/data/service/[[ .instance ]]" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
path "[[ .vault.prefix ]]kv/data/service/[[ .instance ]]/users" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
# Issue server cert
|
||||
path "[[ .zookeeper.vault.pki.path ]]/issue/server" {
|
||||
capabilities = ["update"]
|
||||
}
|
||||
# Issue super user cert
|
||||
path "[[ .zookeeper.vault.pki.path ]]/issue/user-root" {
|
||||
capabilities = ["update"]
|
||||
}
|
|
@ -0,0 +1,193 @@
|
|||
job "[[ .instance ]]" {
|
||||
|
||||
[[- $c := merge .zookeeper . ]]
|
||||
|
||||
[[ template "common/job_start" $c ]]
|
||||
|
||||
group "server" {
|
||||
|
||||
count = [[ $c.count ]]
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "quorum" {}
|
||||
port "elect" {}
|
||||
[[- if $c.prometheus.enabled ]]
|
||||
port "metrics" {}
|
||||
[[- end ]]
|
||||
}
|
||||
|
||||
[[ template "common/volumes" $c ]]
|
||||
|
||||
# This is the main service clients will contact to reach ZooKeeper cluster
|
||||
service {
|
||||
name = "[[ .instance ]][[ .consul.suffix ]]"
|
||||
port = 2181
|
||||
|
||||
[[ template "common/metrics_meta" $c ]]
|
||||
[[ template "common/connect" $c ]]
|
||||
|
||||
# Check for ZooKeeper to be either in leader or follower mode
|
||||
check {
|
||||
name = "mode"
|
||||
type = "script"
|
||||
command = "bash"
|
||||
args = [
|
||||
"-c",
|
||||
"ZKMODE=$(zkServer.sh status 2>&1 | grep Mode | awk '{ print $2 }'); echo ${ZKMODE}; if [[ "[[" ]] (\"${ZKMODE}\" =~ ^(leader|follower)$ && [[ $c.count ]] -gt 1) || (\"${ZKMODE}\" = \"standalone\" && [[ $c.count ]] -eq 1) [[ "]]" ]]; then exit 0; else exit 1; fi"
|
||||
]
|
||||
on_update = "ignore"
|
||||
interval = "30s"
|
||||
timeout = "8s"
|
||||
task = "zookeeper"
|
||||
}
|
||||
}
|
||||
|
||||
# This service will be used for the different instances of ZooKeeper to find each others
|
||||
service {
|
||||
name = "[[ .instance ]]-quorum[[ .consul.suffix ]]"
|
||||
port = "quorum"
|
||||
|
||||
meta {
|
||||
elect-port = "${NOMAD_PORT_elect}"
|
||||
alloc = "${NOMAD_ALLOC_INDEX}"
|
||||
}
|
||||
}
|
||||
|
||||
[[ template "common/task.metrics_proxy" $c ]]
|
||||
|
||||
task "zookeeper" {
|
||||
driver = "[[ $c.nomad.driver ]]"
|
||||
user = 2181
|
||||
|
||||
config {
|
||||
image = "[[ $c.image ]]"
|
||||
pids_limit = 100
|
||||
# ZooKeeper needs write access to /opt/zookeeper/conf
|
||||
# So until Nomad supports passing uid/gid for tmpfs mount (see https://github.com/hashicorp/nomad/issues/12533)
|
||||
# We must keep the rootfs writable
|
||||
# readonly_rootfs = true
|
||||
volumes = ["local/conf/myid:/data/myid:ro"]
|
||||
}
|
||||
|
||||
[[ template "common/vault.policies" $c ]]
|
||||
|
||||
env {
|
||||
SERVER_JVMFLAGS = "-Djava.security.auth.login.config=/secrets/jaas.conf"
|
||||
ZK_SRV_CERT_FILE = "/secrets/zookeeper.bundle.pem"
|
||||
ZK_SRV_KEY_FILE = "/secrets/zookeeper.bundle.pem"
|
||||
ZK_SRV_P12_FILE = "/secrets/zookeeper.p12"
|
||||
ZK_ROOT_CERT_FILE = "/secrets/root.bundle.pem"
|
||||
ZK_ROOT_KEY_FILE = "/secrets/root.bundle.pem"
|
||||
ZK_ROOT_P12_FILE = "/secrets/root.p12"
|
||||
ZK_CA_CERT = "/secrets/zookeeper.ca.pem"
|
||||
}
|
||||
|
||||
[[ template "common/file_env" $c ]]
|
||||
|
||||
# This is just the myid, which corresponds to the alloc index + 1
|
||||
# (must be between 1 and 255)
|
||||
template {
|
||||
data = <<_EOT
|
||||
{{ env "NOMAD_ALLOC_INDEX" | parseInt | add 1 -}}
|
||||
_EOT
|
||||
destination = "local/conf/myid"
|
||||
}
|
||||
|
||||
# Main ZooKeeper configuration
|
||||
template {
|
||||
data = <<_EOT
|
||||
[[ template "scandm-zookeeper/zoo.cfg.tpl" $c ]]
|
||||
_EOT
|
||||
destination = "local/conf/zoo.cfg"
|
||||
uid = 102181
|
||||
gid = 102181
|
||||
}
|
||||
|
||||
# Dynamic configuration (contains nodes in the cluster)
|
||||
template {
|
||||
data = <<_EOT
|
||||
[[ template "scandm-zookeeper/zoo.dyn.cfg.tpl" $c ]]
|
||||
_EOT
|
||||
destination = "local/conf/zoo.dyn.cfg"
|
||||
# When this file changes, do not restart, but just run a script to reconfigure ZooKeeper
|
||||
change_mode = "script"
|
||||
change_script {
|
||||
command = "zkadm.sh"
|
||||
args = ["reconfig", "-file", "/local/conf/zoo.dyn.cfg"]
|
||||
}
|
||||
}
|
||||
|
||||
# SASL users
|
||||
template {
|
||||
data = <<_EOT
|
||||
[[ template "scandm-zookeeper/jaas.conf.tpl" $c ]]
|
||||
_EOT
|
||||
destination = "secrets/jaas.conf"
|
||||
uid = 100000
|
||||
gid = 102181
|
||||
perms = "0640"
|
||||
splay = "60s"
|
||||
}
|
||||
|
||||
# Server certificate
|
||||
template {
|
||||
data = <<-EOF
|
||||
{{ with pkiCert "[[ $c.vault.pki.path ]]/issue/server"
|
||||
(printf "common_name=%s.[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]" (env "NOMAD_ALLOC_INDEX"))
|
||||
"alt_names=[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]"
|
||||
(printf "ip_sans=%s" (env "NOMAD_IP_quorum"))
|
||||
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 650)) }}
|
||||
{{ .Cert }}
|
||||
{{ .Key }}
|
||||
{{ end }}
|
||||
EOF
|
||||
destination = "secrets/zookeeper.bundle.pem"
|
||||
uid = 102181
|
||||
gid = 102181
|
||||
perms = "0640"
|
||||
}
|
||||
|
||||
# Super user certificate
|
||||
template {
|
||||
data = <<_EOT
|
||||
{{- with pkiCert "[[ $c.vault.pki.path ]]/issue/user-root" "common_name=root.[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]" "ttl=24h" }}
|
||||
{{ .Cert }}
|
||||
{{ .Key }}
|
||||
{{- end }}
|
||||
_EOT
|
||||
destination = "secrets/root.bundle.pem"
|
||||
uid = 102181
|
||||
gid = 102181
|
||||
perms = "0640"
|
||||
change_mode = "script"
|
||||
# When certificate is renewed, no need to restart the alloc
|
||||
# just update the root p12
|
||||
change_script {
|
||||
command = "/entrypoint.d/20-cert-pkcs.sh"
|
||||
}
|
||||
}
|
||||
|
||||
# Certificate authority, used by server to verify peers in the quorum communications
|
||||
# But also for the mTLS auth during client <-> server communications
|
||||
template {
|
||||
data = <<_EOT
|
||||
{{ with secret "[[ $c.vault.pki.path ]]/cert/ca_chain" }}
|
||||
{{ .Data.ca_chain }}
|
||||
{{ end }}
|
||||
_EOT
|
||||
destination = "secrets/zookeeper.ca.pem"
|
||||
uid = 100000
|
||||
gid = 100000
|
||||
}
|
||||
|
||||
[[ template "common/resources" $c ]]
|
||||
|
||||
# Mount the persistant volume in /data, where ZooKeeper will store its data
|
||||
volume_mount {
|
||||
volume = "data"
|
||||
destination = "/data"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue