zookeeper/zookeeper.nomad.hcl

199 lines
5.8 KiB
HCL

job "[[ .instance ]]" {
[[- $c := merge .zookeeper . ]]
[[ template "common/job_start" $c ]]
group "server" {
count = [[ $c.count ]]
constraint {
operator = "distinct_hosts"
value = "true"
}
network {
mode = "bridge"
port "quorum" {}
port "elect" {}
[[- if conv.ToBool $c.prometheus.enabled ]]
port "metrics" {}
[[- end ]]
}
[[ template "common/volumes" $c ]]
# This is the main service clients will contact to reach ZooKeeper cluster
service {
name = "[[ .instance ]][[ .consul.suffix ]]"
port = 2181
[[ template "common/metrics_meta" $c ]]
[[ template "common/connect" $c ]]
# Check for ZooKeeper to be either in leader or follower mode
check {
name = "mode"
type = "script"
command = "bash"
args = [
"-c",
"ZKMODE=$(zkServer.sh status 2>&1 | grep Mode | awk '{ print $2 }'); echo ${ZKMODE}; if [[ "[[" ]] (\"${ZKMODE}\" =~ ^(leader|follower)$ && [[ $c.count ]] -gt 1) || (\"${ZKMODE}\" = \"standalone\" && [[ $c.count ]] -eq 1) [[ "]]" ]]; then exit 0; else exit 1; fi"
]
on_update = "ignore"
interval = "30s"
timeout = "8s"
task = "zookeeper"
}
}
# This service will be used for the different instances of ZooKeeper to find each others
service {
name = "[[ .instance ]]-quorum[[ .consul.suffix ]]"
port = "quorum"
meta {
elect-port = "${NOMAD_PORT_elect}"
alloc = "${NOMAD_ALLOC_INDEX}"
}
}
[[ template "common/task.metrics_proxy" $c ]]
task "zookeeper" {
driver = "[[ $c.nomad.driver ]]"
user = 2181
config {
image = "[[ $c.image ]]"
pids_limit = 100
# ZooKeeper needs write access to /opt/zookeeper/conf
# So until Nomad supports passing uid/gid for tmpfs mount (see https://github.com/hashicorp/nomad/issues/12533)
# We must keep the rootfs writable
# readonly_rootfs = true
volumes = ["local/conf/myid:/data/myid:ro"]
}
[[ template "common/vault.policies" $c ]]
env {
SERVER_JVMFLAGS = "-Djava.security.auth.login.config=/secrets/jaas.conf"
ZK_SRV_CERT_FILE = "/secrets/zookeeper.bundle.pem"
ZK_SRV_KEY_FILE = "/secrets/zookeeper.bundle.pem"
ZK_SRV_P12_FILE = "/secrets/zookeeper.p12"
ZK_ROOT_CERT_FILE = "/secrets/root.bundle.pem"
ZK_ROOT_KEY_FILE = "/secrets/root.bundle.pem"
ZK_ROOT_P12_FILE = "/secrets/root.p12"
ZK_CA_CERT = "/secrets/zookeeper.ca.pem"
}
[[ template "common/file_env" $c ]]
# This is just the myid, which corresponds to the alloc index + 1
# (must be between 1 and 255)
template {
data = <<_EOT
{{ env "NOMAD_ALLOC_INDEX" | parseInt | add 1 -}}
_EOT
destination = "local/conf/myid"
}
# Main ZooKeeper configuration
template {
data = <<_EOT
[[ template "zookeeper/zoo.cfg.tpl" $c ]]
_EOT
destination = "local/conf/zoo.cfg"
uid = 102181
gid = 102181
}
# Dynamic configuration (contains nodes in the cluster)
template {
data = <<_EOT
[[ template "zookeeper/zoo.dyn.cfg.tpl" $c ]]
_EOT
destination = "local/conf/zoo.dyn.cfg"
# When this file changes, do not restart, but just run a script to reconfigure ZooKeeper
change_mode = "script"
change_script {
command = "zkadm.sh"
args = ["reconfig", "-file", "/local/conf/zoo.dyn.cfg"]
}
}
# SASL users
template {
data = <<_EOT
[[ template "zookeeper/jaas.conf.tpl" $c ]]
_EOT
destination = "secrets/jaas.conf"
uid = 100000
gid = 102181
perms = "0640"
splay = "60s"
}
# Server certificate
template {
data = <<-EOF
{{ with pkiCert "[[ $c.vault.pki.path ]]/issue/server"
(printf "common_name=%s.[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]" (env "NOMAD_ALLOC_INDEX"))
"alt_names=[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]"
(printf "ip_sans=%s" (env "NOMAD_IP_quorum"))
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 650)) }}
{{ .Cert }}
{{ .Key }}
{{ end }}
EOF
destination = "secrets/zookeeper.bundle.pem"
uid = 102181
gid = 102181
perms = "0640"
}
# Super user certificate
template {
data = <<_EOT
{{- with pkiCert "[[ $c.vault.pki.path ]]/issue/user-root" "common_name=root.[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]" "ttl=24h" }}
{{ .Cert }}
{{ .Key }}
{{- end }}
_EOT
destination = "secrets/root.bundle.pem"
uid = 102181
gid = 102181
perms = "0640"
change_mode = "script"
# When certificate is renewed, no need to restart the alloc
# just update the root p12
change_script {
command = "/entrypoint.d/20-cert-pkcs.sh"
}
}
# Certificate authority, used by server to verify peers in the quorum communications
# But also for the mTLS auth during client <-> server communications
template {
data = <<_EOT
{{ with secret "[[ $c.vault.pki.path ]]/cert/ca_chain" }}
{{ .Data.ca_chain }}
{{ end }}
_EOT
destination = "secrets/zookeeper.ca.pem"
uid = 100000
gid = 100000
}
[[ template "common/resources" $c ]]
# Mount the persistant volume in /data, where ZooKeeper will store its data
volume_mount {
volume = "data"
destination = "/data"
}
}
}
}