Update to 8.13.1 and render example

This commit is contained in:
Daniel Berteaud 2024-04-02 13:15:09 +02:00
parent 86ee5be1be
commit 17c8f817ca
17 changed files with 1246 additions and 1 deletions

21
example/README.md Normal file
View File

@ -0,0 +1,21 @@
# elasticsearch
Nomad job template for Elasticsearch cluster
# Setup initial password
A random password is generated for the elastic system account (in vault, kv/service/elasticsearch, key elastic_pwd) but is not used automaticaly. You must setup passwords with
```
elasticsearch-setup-passwords interactive
```
# Configure replicas for indices
Once elastic passwords are OK, you can configure indices to have the desired number of replicas
```
for INDEX in inventory stock purchase-requests; do
curl --user 'elastic:XXX' \
-X PUT \
-H 'Content-Type: application/json' \
http://localhost:9200/${INDEX}/_settings \
-d '{ "index.number_of_replicas" : 2 }'
done
```

View File

@ -0,0 +1,4 @@
Kind = "service-defaults"
Name = "elasticsearch"
Protocol = "http"
LocalRequestTimeoutMs = "300000"

View File

@ -0,0 +1,15 @@
Kind = "service-intentions"
Name = "elasticsearch"
Sources = [
{
Name = "traefik"
Permissions = [
{
Action = "allow"
HTTP {
PathPrefix = "/"
}
}
]
},
]

View File

@ -0,0 +1,3 @@
Kind = "service-resolver"
Name = "elasticsearch"
RequestTimeout = "5m"

View File

@ -0,0 +1,370 @@
job "elasticsearch" {
datacenters = ["dc1"]
region = "global"
group "server" {
count = 3
constraint {
operator = "distinct_hosts"
value = "true"
}
# Wait a few seconds between service deregistration from consul catalog and task kill
shutdown_delay = "6s"
network {
mode = "bridge"
port "transport" {}
}
volume "data" {
source = "elasticsearch-data"
type = "csi"
access_mode = "single-node-writer"
attachment_mode = "file-system"
per_alloc = true
}
# The main elasticsearch service. It'll only be available through the mesh
service {
name = "elasticsearch"
port = 9200
meta {
addr = "es-${NOMAD_ALLOC_INDEX}.elasticsearch.service.consul"
alloc = "${NOMAD_ALLOC_INDEX}"
datacenter = "${NOMAD_DC}"
group = "${NOMAD_GROUP_NAME}"
job = "${NOMAD_JOB_NAME}"
namespace = "${NOMAD_NAMESPACE}"
node = "${node.unique.name}"
region = "${NOMAD_REGION}"
}
connect {
sidecar_service {
}
sidecar_task {
config {
args = [
"-c",
"${NOMAD_SECRETS_DIR}/envoy_bootstrap.json",
"-l",
"${meta.connect.log_level}",
"--concurrency",
"${meta.connect.proxy_concurrency}",
"--disable-hot-restart"
]
}
resources {
cpu = 50
memory = 64
}
}
}
# Use a script check instead of http so we can report ok for green, warning for yellow and critical for any other state
check {
name = "health"
type = "script"
command = "sh"
task = "server"
args = [
"-c",
"set -e; STATUS=$(curl localhost:9200/_cluster/health?local=true | jq -r .status); if [ \"$STATUS\" == \"green\" ]; then exit 0; elif [ \"$STATUS\" == \"yellow\" ]; then exit 1; else exit 2; fi"
]
interval = "30s"
timeout = "8s"
}
tags = [
"es-${NOMAD_ALLOC_INDEX}",
]
}
# The transport service is used for the différent instances to find each others and form the cluster
service {
name = "elasticsearch-transport"
port = "transport"
meta {
alloc = "${NOMAD_ALLOC_INDEX}"
}
}
# The main Elasticsearch task
task "server" {
driver = "docker"
leader = true
# Give ES some time to shutdown
kill_timeout = "120s"
config {
image = "danielberteaud/elasticsearch:8.13.1-1"
pids_limit = 1024
readonly_rootfs = true
volumes = [
"secrets/entrypoint.env:/entrypoint.d/94-elasticsearch-users.env"
]
mount {
type = "tmpfs"
target = "/tmp"
tmpfs_options {
size = 2000000
}
}
}
vault {
policies = ["elasticsearch"]
env = false
disable_file = true
change_mode = "noop"
}
env {
# Use /local/tmp as rootfs is read only
ES_TMPDIR = "/local/tmp"
TMPDIR = "/local/tmp"
ES_PATH_CONF = "/secrets"
}
# Use a template block instead of env {} so we can fetch values from vault
template {
data = <<_EOT
JVM_XMX_RATIO=0.4
LANG=fr_FR.utf8
TZ=Europe/Paris
_EOT
destination = "secrets/.env"
perms = 400
env = true
}
# The main configuration file
template {
data = <<_EOT
path:
data: /data
logs: /alloc/logs
cluster:
name: elasticsearch
initial_master_nodes:
- elasticsearch-0
- elasticsearch-1
- elasticsearch-2
node:
name: elasticsearch-{{ env "NOMAD_ALLOC_INDEX" }}
roles:
- master
- data
- ingest
- transform
http:
port: 9200
host: 127.0.0.1
transport:
port: {{ env "NOMAD_ALLOC_PORT_transport" }}
host: 0.0.0.0
publish_port: {{ env "NOMAD_HOST_PORT_transport" }}
publish_host: {{ env "NOMAD_HOST_IP_transport" }}
discovery:
seed_providers: file
xpack:
watcher:
enabled: false
security:
enabled: true
authc:
anonymous:
username: anonymous
roles: health
transport:
ssl:
enabled: True
verification_mode: full
client_authentication: required
key: /secrets/es.key
certificate: /secrets/es.crt
certificate_authorities: /secrets/ca.crt
_EOT
destination = "secrets/elasticsearch.yml"
}
# This is the list of nodes. It's updated when an instance is restarted, but elasticsearch
# already monitors and reloads it automatically, so make it a noop for Nomad
template {
data = <<_EOT
{{ range $index, $instance := service "elasticsearch-transport" }}
{{- if not (eq (env "NOMAD_ALLOC_INDEX") (index $instance.ServiceMeta "alloc")) }}
{{ .Address }}:{{ .Port }}
{{- end }}
{{- end }}
_EOT
destination = "secrets/unicast_hosts.txt"
change_mode = "noop"
}
# Custom roles
template {
data = <<_EOT
monitor:
cluster:
- monitor
indices:
- names:
- '*'
privileges:
- monitor
health:
cluster:
- 'cluster:monitor/health'
healthdata-services:
indices:
- names:
- eht-scandmweb-surgery
privileges:
- all
stock-services:
indices:
- names:
- eht-scandmweb-purchase-requests
- eht-scandmweb-inventory
- eht-scandmweb-stock
privileges:
- all
_EOT
destination = "secrets/roles.yml"
}
# An entrypoint snippet to create users
# Note : created with .env extension so it's sourced in the current shell
# as it cannot be executed from /secrets which is mounted with noexec
template {
data = <<_EOT
#!/bin/sh
# vim: syntax=sh
set -euo pipefail
# ES is using ES_JAVA_HOME, and having JAVA_HOME set split warnings in logs
unset JAVA_HOME
echo "Adding elastic bootstrap password in the keystore"
[ -f config/elasticsearch.keystore ] || elasticsearch-keystore create
echo '{{ with secret "kv/service/elasticsearch" }}{{ .Data.data.es_bootstrap_pwd }}{{ end }}' | elasticsearch-keystore add -x 'bootstrap.password'
{{ with secret "kv/service/elasticsearch" -}}
echo "Creating exporter user"
(elasticsearch-users list | grep -qE '^exporter$') || elasticsearch-users useradd exporter -r monitor -p '{{ .Data.data.exporter_pwd }}'
{{- end }}
echo "Creating user healthdata-services"
(elasticsearch-users list | grep -qE '^healthdata-services$') || elasticsearch-users useradd healthdata-services -p '{{ with secret "kv/service/elasticsearch" }}{{ .Data.data.healthdata_services_pwd }}{{ end }}' -r healthdata-services
echo "Creating user stock-services"
(elasticsearch-users list | grep -qE '^stock-services$') || elasticsearch-users useradd stock-services -p '{{ with secret "kv/service/elasticsearch" }}{{ .Data.data.stock_services_pwd }}{{ end }}' -r stock-services
_EOT
destination = "secrets/entrypoint.env"
uid = 100000
gid = 109200
perms = "0440"
}
# The certificate and private key for ES.
# ES doesn't support PEM bundles with both the cert and the key in the same file. So we create 2 files
template {
data = <<_EOT
{{ with secret "pki/elasticsearch/issue/server"
"common_name=elasticsearch.service.consul"
(printf "alt_names=es-%s.elasticsearch.service.consul" (env "NOMAD_ALLOC_INDEX"))
(printf "ip_sans=%s,127.0.0.1" (env "NOMAD_HOST_IP_transport"))
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 72)) }}
{{ .Data.certificate }}
{{ end }}
_EOT
destination = "secrets/es.crt"
# ES monitors and reload cert every 5 sec, make it a noop on Nomad
change_mode = "noop"
}
# The private key
template {
data = <<_EOT
{{ with secret "pki/elasticsearch/issue/server"
"common_name=elasticsearch.service.consul"
(printf "alt_names=es-%s.elasticsearch.service.consul" (env "NOMAD_ALLOC_INDEX"))
(printf "ip_sans=%s,127.0.0.1" (env "NOMAD_HOST_IP_transport"))
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 72)) }}
{{ .Data.private_key }}
{{ end }}
_EOT
destination = "secrets/es.key"
# ES monitors and reload the key every 5 sec, make it a noop on Nomad
change_mode = "noop"
uid = 100000
gid = 109200
perms = "0440"
}
# The trusted CA to verify other nodes
template {
data = <<_EOT
{{ with secret "pki/elasticsearch/cert/ca_chain" }}{{ .Data.ca_chain }}{{ end }}
_EOT
destination = "secrets/ca.crt"
change_mode = "noop"
}
# Data is stored in /data
volume_mount {
volume = "data"
destination = "/data"
}
resources {
cpu = 300
memory = 2048
}
}
}
}

View File

@ -0,0 +1,25 @@
FROM danielberteaud/alpine:24.4-1 AS builder
ARG ES_EXPORTER_VERSION=1.7.0
RUN set -eux &&\
apk add curl ca-certificates &&\
cd /tmp &&\
curl -sSLO https://github.com/prometheus-community/elasticsearch_exporter/releases/download/v${ES_EXPORTER_VERSION}/elasticsearch_exporter-${ES_EXPORTER_VERSION}.linux-amd64.tar.gz &&\
curl -sSLO https://github.com/prometheus-community/elasticsearch_exporter/releases/download/v${ES_EXPORTER_VERSION}/sha256sums.txt &&\
grep "elasticsearch_exporter-${ES_EXPORTER_VERSION}.linux-amd64.tar.gz" sha256sums.txt | sha256sum -c &&\
tar xzf elasticsearch_exporter-${ES_EXPORTER_VERSION}.linux-amd64.tar.gz &&\
mv elasticsearch_exporter-${ES_EXPORTER_VERSION}.linux-amd64/elasticsearch_exporter /
FROM danielberteaud/alpine:24.4-1
MAINTAINER Daniel Berteaud <dbd@ehtrace.com>
RUN set -eux &&\
apk --no-cache upgrade &&\
apk add ca-certificates
COPY --from=builder /elasticsearch_exporter /usr/local/bin/
EXPOSE 9114
USER 9114
CMD ["elasticsearch_exporter"]

View File

@ -0,0 +1,49 @@
FROM danielberteaud/alpine:24.4-1 AS builder
ARG ES_VERSION=8.13.1
ADD https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ES_VERSION}-linux-x86_64.tar.gz /tmp
ADD https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ES_VERSION}-linux-x86_64.tar.gz.sha512 /tmp
RUN set -eux &&\
apk add tar &&\
cd /tmp &&\
sha512sum -c elasticsearch-${ES_VERSION}-linux-x86_64.tar.gz.sha512 &&\
tar xzf elasticsearch-${ES_VERSION}-linux-x86_64.tar.gz &&\
mv elasticsearch-${ES_VERSION} /opt/elasticsearch &&\
# Remove X-Pack ML as it's not used, and not working on Alpine \
rm -rf /opt/elasticsearch/modules/x-pack-ml/ &&\
# Remove the JDK, we have our own \
rm -rf /opt/elasticsearch/jdk &&\
sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' /opt/elasticsearch/bin/elasticsearch-env &&\
mkdir -p /opt/elasticsearch/config/jvm.config.d &&\
mkdir -p /opt/elasticsearch/.aws/config &&\
chown -R 9200:9200 /opt/elasticsearch/config &&\
chown -R 9200:9200 /opt/elasticsearch/.aws
FROM danielberteaud/java:17.24.4-1
MAINTAINER Daniel Berteaud <dbd@ehtrace.com>
ENV PATH=/opt/elasticsearch/bin:$PATH \
ES_JAVA_HOME=/opt/java/openjdk \
ES_PATH_CONF=/opt/elasticsearch/config \
ES_TMPDIR=/tmp
COPY --from=builder /opt/elasticsearch/ /opt/elasticsearch/
RUN set -eux &&\
apk --no-cache update &&\
apk --no-cache add openssl bash jq &&\
addgroup -g 9200 elasticsearch &&\
adduser --system --ingroup elasticsearch --disabled-password --uid 9200 elasticsearch --home /opt/elasticsearch --no-create-home --shell /sbin/nologin &&\
mkdir /data &&\
chown -R elasticsearch:elasticsearch /data
COPY root/ /
RUN set -eux &&\
chown -R elasticsearch:elasticsearch /opt/elasticsearch/config
WORKDIR /opt/elasticsearch
USER elasticsearch
EXPOSE 9200 9300
CMD ["elasticsearch"]

View File

@ -0,0 +1,15 @@
#!/bin/sh
set -euo pipefail
[ -d "${ES_TMPDIR}" ] || mkdir -p "${ES_TMPDIR}"
if [ -n "${ES_PATH_CONF}" -a "${ES_PATH_CONF}" != "/opt/elasticsearch/config" ]; then
[ -d "${ES_PATH_CONF}" ] || mkdir -p "${ES_PATH_CONF}"
for FILE in /opt/elasticsearch/config/*; do
if [ ! -e "${ES_PATH_CONF}/$(basename ${FILE})" ]; then
echo "Copy ${FILE} to ${ES_PATH_CONF}/$(basename ${FILE})"
cp -r ${FILE} ${ES_PATH_CONF}/$(basename ${FILE})
fi
done
fi

View File

@ -0,0 +1,10 @@
#!/bin/sh
set -euo pipefail
[ -d "${ES_TMPDIR}" ] || mkdir -p ${ES_TMPDIR}
export ES_JAVA_OPTS="${JAVA_OPTS:-} -Xms${JVM_XMX} ${ES_JAVA_OPTS:-} -Des.cgroups.hierarchy.override=/"
echo "ES_JAVA_OPTS=${ES_JAVA_OPTS}"
# Elasticsearch uses ES_JAVA_OPTS, unset JAVA_OPTS to prevent warnings in logs
unset JAVA_OPTS

View File

@ -0,0 +1,3 @@
cluster.name: "docker-cluster"
network.host: 0.0.0.0
path.data: /data

View File

@ -0,0 +1,7 @@
## G1GC Configuration
14-19:-XX:+UseG1GC
## Log settings
-Xlog:disable
-Xlog:all=warning:stderr:utctime,level,tags
-Xlog:gc=debug:stderr:utctime

View File

@ -0,0 +1,159 @@
status = error
######## Server JSON ############################
appender.rolling.type = Console
appender.rolling.name = rolling
appender.rolling.layout.type = ESJsonLayout
appender.rolling.layout.type_name = server
################################################
################################################
rootLogger.level = info
rootLogger.appenderRef.rolling.ref = rolling
######## Deprecation JSON #######################
appender.deprecation_rolling.type = Console
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.layout.type = ESJsonLayout
appender.deprecation_rolling.layout.type_name = deprecation.elasticsearch
appender.deprecation_rolling.layout.esmessagefields=x-opaque-id,key,category,elasticsearch.elastic_product_origin
appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
appender.header_warning.type = HeaderWarningAppender
appender.header_warning.name = header_warning
#################################################
#################################################
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = WARN
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.appenderRef.header_warning.ref = header_warning
logger.deprecation.additivity = false
######## Search slowlog JSON ####################
appender.index_search_slowlog_rolling.type = Console
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.layout.type = ESJsonLayout
appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog
appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,types,stats,search_type,total_shards,source,id
#################################################
#################################################
logger.index_search_slowlog_rolling.name = index.search.slowlog
logger.index_search_slowlog_rolling.level = trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
logger.index_search_slowlog_rolling.additivity = false
######## Indexing slowlog JSON ##################
appender.index_indexing_slowlog_rolling.type = Console
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout
appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog
appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source
#################################################
#################################################
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
logger.index_indexing_slowlog.additivity = false
appender.audit_rolling.type = Console
appender.audit_rolling.name = audit_rolling
appender.audit_rolling.layout.type = PatternLayout
appender.audit_rolling.layout.pattern = {\
"type":"audit", \
"timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
%varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
%varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
%varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
%varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
%varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
%varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
%varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
%varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
%varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
%varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
%varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
%varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
%varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
%varsNotEmpty{, "user.roles":%map{user.roles}}\
%varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
%varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
%varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
%varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
%varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
%varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
%varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
%varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
%varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
%varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\
%varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
%varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
%varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
%varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
%varsNotEmpty{, "indices":%map{indices}}\
%varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
%varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\
%varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
%varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
%varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
%varsNotEmpty{, "put":%map{put}}\
%varsNotEmpty{, "delete":%map{delete}}\
%varsNotEmpty{, "change":%map{change}}\
%varsNotEmpty{, "create":%map{create}}\
%varsNotEmpty{, "invalidate":%map{invalidate}}\
}%n
# "node.name" node name from the `elasticsearch.yml` settings
# "node.id" node id which should not change between cluster restarts
# "host.name" unresolved hostname of the local node
# "host.ip" the local bound ip (i.e. the ip listening for connections)
# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal"
# "user.name" the subject name as authenticated by a realm
# "user.run_by.name" the original authenticated subject name that is impersonating another one.
# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
# "user.realm" the name of the realm that authenticated "user.name"
# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
# "user.roles" the roles array of the user; these are the roles that are granting privileges
# "apikey.id" this field is present if and only if the "authentication.type" is "api_key"
# "apikey.name" this field is present if and only if the "authentication.type" is "api_key"
# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token
# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token
# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change"
# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
# "request.body" the content of the request body entity, JSON escaped
# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request
# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
# "indices" the array of indices that the "action" is acting upon
# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
# "trace_id" an identifier conveyed by the part of "traceparent" request header
# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array)
# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
# "rule" name of the applied rule if the "origin.type" is "ip_filter"
# the "put", "delete", "change", "create", "invalidate" fields are only present
# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect
logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
logger.xpack_security_audit_logfile.level = info
logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
logger.xpack_security_audit_logfile.additivity = false
logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
logger.xmlsig.level = error
logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
logger.samlxml_decrypt.level = fatal
logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
logger.saml2_decrypt.level = fatal

102
example/init/vault-pki Executable file
View File

@ -0,0 +1,102 @@
#!/bin/sh
set -e
#!/bin/sh
# vim: syntax=sh
set -euo pipefail
TMP=$(mktemp -d)
INITIAL_SETUP=false
if [ "$(vault secrets list -format json | jq -r '.["pki/elasticsearch/"].type')" != "pki" ]; then
INITIAL_SETUP=true
fi
if [ "${INITIAL_SETUP}" = "true" ]; then
# Enable the secret engine
echo "Mounting new PKI secret engine at pki/elasticsearch"
vault secrets enable -path=pki/elasticsearch pki
else
echo "Secret engine already mounted at pki/elasticsearch"
fi
# Configure max-lease-ttl
echo "Tune PKI secret engine"
vault secrets tune -max-lease-ttl=131400h pki/elasticsearch
# Configure PKI URLs
echo "Configure URL endpoints"
vault write pki/elasticsearch/config/urls \
issuing_certificates="${VAULT_ADDR}/v1/pki/elasticsearch/ca" \
crl_distribution_points="${VAULT_ADDR}/v1/pki/elasticsearch/crl" \
ocsp_servers="${VAULT_ADDR}/v1/pki/elasticsearch/ocsp"
vault write pki/elasticsearch/config/cluster \
path="${VAULT_ADDR}/v1pki/elasticsearch"
vault write pki/elasticsearch/config/crl \
auto_rebuild=true \
enable_delta=true
# Configure tidy
echo "Configure auto tidy for the PKI"
vault write pki/elasticsearch/config/auto-tidy \
enabled=true \
tidy_cert_store=true \
tidy_expired_issuers=true \
tidy_revocation_queue=true \
tidy_revoked_cert_issuer_associations=true \
tidy_revoked_certs=true \
tidy_acme=true \
tidy_cross_cluster_revoked_certs=true \
tidy_move_legacy_ca_bundle=true \
maintain_stored_certificate_counts=true
if [ "${INITIAL_SETUP}" = "true" ]; then
# Generate an internal CA
echo "Generating an internal CA"
vault write -format=json pki/elasticsearch/intermediate/generate/internal \
common_name="elasticsearch Certificate Authority" \
ttl="131400h" \
organization="ACME Corp" \
ou="Elasticsearch Cluster" \
locality="FooBar Ville" \
key_type=rsa \
key_bits=4096 \
| jq -r '.data.csr' > ${TMP}/elasticsearch.csr
# Sign this PKI with a root PKI
echo "Signing the new CA with the authority from pki/root"
vault write -format=json pki/root/root/sign-intermediate \
csr=@${TMP}/elasticsearch.csr \
format=pem_bundle \
ttl="131400h" \
| jq -r '.data.certificate' > ${TMP}/elasticsearch.crt
# Update the intermediate CA with the signed one
echo "Update the new CA with the signed version"
vault write pki/elasticsearch/intermediate/set-signed \
certificate=@${TMP}/elasticsearch.crt
fi
# Remove temp files
echo "Cleaning temp files"
rm -rf ${TMP}
vault write pki/elasticsearch/roles/server \
allowed_domains="elasticsearch.service.consul" \
allow_bare_domains=true \
allow_subdomains=true \
allow_localhost=false \
allow_ip_sans=true \
server_flag=true \
client_flag=true \
allow_wildcard_certificates=false \
max_ttl=720h

View File

@ -0,0 +1,421 @@
#!/bin/sh
# vim: syntax=sh
set -euo pipefail
function build_alma8 {
if [ "${IMAGE_ALMA8_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/alma:8.24.4-1 already available"
return
fi
# Image alma8 depends on alpine
build_alpine
if ! docker manifest inspect danielberteaud/alma:8.24.4-1 > /dev/null 2>&1; then
echo "Building alma:8.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/alma:8.24.4-1 -t danielberteaud/alma8:latest -t danielberteaud/alma:8 --build-arg=ALMA=8 output/images/alma &&\
docker push danielberteaud/alma:8.24.4-1 &&\
docker push danielberteaud/alma8:latest &&\
docker push danielberteaud/alma:8 &&\
echo "danielberteaud/alma:8.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/alma:8.24.4-1 already available"
fi
IMAGE_ALMA8_AVAILABLE=1
}
function build_alma9 {
if [ "${IMAGE_ALMA9_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/alma:9.24.4-1 already available"
return
fi
# Image alma9 depends on alpine
build_alpine
if ! docker manifest inspect danielberteaud/alma:9.24.4-1 > /dev/null 2>&1; then
echo "Building alma:9.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/alma:9.24.4-1 -t danielberteaud/alma9:latest -t danielberteaud/alma:9 --build-arg=ALMA=9 output/images/alma &&\
docker push danielberteaud/alma:9.24.4-1 &&\
docker push danielberteaud/alma9:latest &&\
docker push danielberteaud/alma:9 &&\
echo "danielberteaud/alma:9.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/alma:9.24.4-1 already available"
fi
IMAGE_ALMA9_AVAILABLE=1
}
function build_alpine {
if [ "${IMAGE_ALPINE_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/alpine:24.4-1 already available"
return
fi
if ! docker manifest inspect danielberteaud/alpine:24.4-1 > /dev/null 2>&1; then
echo "Building alpine:24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/alpine:24.4-1 -t danielberteaud/alpine:latest output/images/alpine &&\
docker push danielberteaud/alpine:24.4-1 &&\
docker push danielberteaud/alpine:latest &&\
echo "danielberteaud/alpine:24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/alpine:24.4-1 already available"
fi
IMAGE_ALPINE_AVAILABLE=1
}
function build_elasticsearch7 {
if [ "${IMAGE_ELASTICSEARCH7_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/elasticsearch:7.17.19-1 already available"
return
fi
if ! docker manifest inspect danielberteaud/elasticsearch:7.17.19-1 > /dev/null 2>&1; then
echo "Building elasticsearch:7.17.19-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/elasticsearch:7.17.19-1 -t danielberteaud/elasticsearch:7 -t danielberteaud/elasticsearch7:latest --build-arg=ES_VERSION=7.17.19 output/images/elasticsearch &&\
docker push danielberteaud/elasticsearch:7.17.19-1 &&\
docker push danielberteaud/elasticsearch:7 &&\
docker push danielberteaud/elasticsearch7:latest &&\
echo "danielberteaud/elasticsearch:7.17.19-1 pushed to remote repo"
else
echo "Image danielberteaud/elasticsearch:7.17.19-1 already available"
fi
IMAGE_ELASTICSEARCH7_AVAILABLE=1
}
function build_elasticsearch8 {
if [ "${IMAGE_ELASTICSEARCH8_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/elasticsearch:8.13.1-1 already available"
return
fi
if ! docker manifest inspect danielberteaud/elasticsearch:8.13.1-1 > /dev/null 2>&1; then
echo "Building elasticsearch:8.13.1-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/elasticsearch:8.13.1-1 -t danielberteaud/elasticsearch:8 -t danielberteaud/elasticsearch8:latest --build-arg=ES_VERSION=8.13.1 output/images/elasticsearch &&\
docker push danielberteaud/elasticsearch:8.13.1-1 &&\
docker push danielberteaud/elasticsearch:8 &&\
docker push danielberteaud/elasticsearch8:latest &&\
echo "danielberteaud/elasticsearch:8.13.1-1 pushed to remote repo"
else
echo "Image danielberteaud/elasticsearch:8.13.1-1 already available"
fi
IMAGE_ELASTICSEARCH8_AVAILABLE=1
}
function build_java11 {
if [ "${IMAGE_JAVA11_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/java:11.24.4-1 already available"
return
fi
if ! docker manifest inspect danielberteaud/java:11.24.4-1 > /dev/null 2>&1; then
echo "Building java:11.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/java:11.24.4-1 -t danielberteaud/java11:latest -t danielberteaud/java:11 --build-arg=JAVA_VERSION=11 output/images/java &&\
docker push danielberteaud/java:11.24.4-1 &&\
docker push danielberteaud/java11:latest &&\
docker push danielberteaud/java:11 &&\
echo "danielberteaud/java:11.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/java:11.24.4-1 already available"
fi
IMAGE_JAVA11_AVAILABLE=1
}
function build_java17 {
if [ "${IMAGE_JAVA17_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/java:17.24.4-1 already available"
return
fi
if ! docker manifest inspect danielberteaud/java:17.24.4-1 > /dev/null 2>&1; then
echo "Building java:17.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/java:17.24.4-1 -t danielberteaud/java17:latest -t danielberteaud/java:17 --build-arg=JAVA_VERSION=17 output/images/java &&\
docker push danielberteaud/java:17.24.4-1 &&\
docker push danielberteaud/java17:latest &&\
docker push danielberteaud/java:17 &&\
echo "danielberteaud/java:17.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/java:17.24.4-1 already available"
fi
IMAGE_JAVA17_AVAILABLE=1
}
function build_java21 {
if [ "${IMAGE_JAVA21_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/java:21.24.4-1 already available"
return
fi
if ! docker manifest inspect danielberteaud/java:21.24.4-1 > /dev/null 2>&1; then
echo "Building java:21.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/java:21.24.4-1 -t danielberteaud/java21:latest -t danielberteaud/java:21 --build-arg=JAVA_VERSION=21 output/images/java &&\
docker push danielberteaud/java:21.24.4-1 &&\
docker push danielberteaud/java21:latest &&\
docker push danielberteaud/java:21 &&\
echo "danielberteaud/java:21.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/java:21.24.4-1 already available"
fi
IMAGE_JAVA21_AVAILABLE=1
}
function build_java8 {
if [ "${IMAGE_JAVA8_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/java:8.24.4-1 already available"
return
fi
if ! docker manifest inspect danielberteaud/java:8.24.4-1 > /dev/null 2>&1; then
echo "Building java:8.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/java:8.24.4-1 -t danielberteaud/java8:latest -t danielberteaud/java:8 --build-arg=JAVA_VERSION=8 output/images/java &&\
docker push danielberteaud/java:8.24.4-1 &&\
docker push danielberteaud/java8:latest &&\
docker push danielberteaud/java:8 &&\
echo "danielberteaud/java:8.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/java:8.24.4-1 already available"
fi
IMAGE_JAVA8_AVAILABLE=1
}
function build_mariadb {
if [ "${IMAGE_MARIADB_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/mariadb:24.4-1 already available"
return
fi
# Image mariadb depends on mariadb_client
build_mariadb_client
if ! docker manifest inspect danielberteaud/mariadb:24.4-1 > /dev/null 2>&1; then
echo "Building mariadb:24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/mariadb:24.4-1 -t danielberteaud/mariadb:latest output/images/mariadb &&\
docker push danielberteaud/mariadb:24.4-1 &&\
docker push danielberteaud/mariadb:latest &&\
echo "danielberteaud/mariadb:24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/mariadb:24.4-1 already available"
fi
IMAGE_MARIADB_AVAILABLE=1
}
function build_mariadb_client {
if [ "${IMAGE_MARIADB_CLIENT_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/mariadb-client:24.4-1 already available"
return
fi
# Image mariadb_client depends on alpine
build_alpine
if ! docker manifest inspect danielberteaud/mariadb-client:24.4-1 > /dev/null 2>&1; then
echo "Building mariadb-client:24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/mariadb-client:24.4-1 -t danielberteaud/mariadb-client:latest output/images/mariadb-client &&\
docker push danielberteaud/mariadb-client:24.4-1 &&\
docker push danielberteaud/mariadb-client:latest &&\
echo "danielberteaud/mariadb-client:24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/mariadb-client:24.4-1 already available"
fi
IMAGE_MARIADB_CLIENT_AVAILABLE=1
}
function build_mongo50 {
if [ "${IMAGE_MONGO50_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/mongo:5.0.24.4-1 already available"
return
fi
# Image mongo50 depends on alma8
build_alma8
if ! docker manifest inspect danielberteaud/mongo:5.0.24.4-1 > /dev/null 2>&1; then
echo "Building mongo:5.0.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/mongo:5.0.24.4-1 -t danielberteaud/mongo:5.0 --build-arg=MONGO_MAJOR=5.0 output/images/mongo &&\
docker push danielberteaud/mongo:5.0.24.4-1 &&\
docker push danielberteaud/mongo:5.0 &&\
echo "danielberteaud/mongo:5.0.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/mongo:5.0.24.4-1 already available"
fi
IMAGE_MONGO50_AVAILABLE=1
}
function build_pgbouncer {
if [ "${IMAGE_PGBOUNCER_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/pgbouncer:24.4-1 already available"
return
fi
# Image pgbouncer depends on alpine
build_alpine
if ! docker manifest inspect danielberteaud/pgbouncer:24.4-1 > /dev/null 2>&1; then
echo "Building pgbouncer:24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/pgbouncer:24.4-1 -t danielberteaud/pgbouncer:latest output/images/pgbouncer &&\
docker push danielberteaud/pgbouncer:24.4-1 &&\
docker push danielberteaud/pgbouncer:latest &&\
echo "danielberteaud/pgbouncer:24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/pgbouncer:24.4-1 already available"
fi
IMAGE_PGBOUNCER_AVAILABLE=1
}
function build_pgcat {
if [ "${IMAGE_PGCAT_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/pgcat:1.1.1-1 already available"
return
fi
# Image pgcat depends on alpine
build_alpine
if ! docker manifest inspect danielberteaud/pgcat:1.1.1-1 > /dev/null 2>&1; then
echo "Building pgcat:1.1.1-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/pgcat:1.1.1-1 -t danielberteaud/pgcat:latest output/images/pgcat &&\
docker push danielberteaud/pgcat:1.1.1-1 &&\
docker push danielberteaud/pgcat:latest &&\
echo "danielberteaud/pgcat:1.1.1-1 pushed to remote repo"
else
echo "Image danielberteaud/pgcat:1.1.1-1 already available"
fi
IMAGE_PGCAT_AVAILABLE=1
}
function build_php82 {
if [ "${IMAGE_PHP82_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/php:82.24.4-1 already available"
return
fi
# Image php82 depends on alpine
build_alpine
if ! docker manifest inspect danielberteaud/php:82.24.4-1 > /dev/null 2>&1; then
echo "Building php:82.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/php:82.24.4-1 -t danielberteaud/php:82 -t danielberteaud/php82:latest --build-arg=PHP_VERSION=82 output/images/php &&\
docker push danielberteaud/php:82.24.4-1 &&\
docker push danielberteaud/php:82 &&\
docker push danielberteaud/php82:latest &&\
echo "danielberteaud/php:82.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/php:82.24.4-1 already available"
fi
IMAGE_PHP82_AVAILABLE=1
}
function build_php83 {
if [ "${IMAGE_PHP83_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/php:83.24.4-1 already available"
return
fi
# Image php83 depends on alpine
build_alpine
if ! docker manifest inspect danielberteaud/php:83.24.4-1 > /dev/null 2>&1; then
echo "Building php:83.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/php:83.24.4-1 -t danielberteaud/php:83 -t danielberteaud/php83:latest --build-arg=PHP_VERSION=83 output/images/php &&\
docker push danielberteaud/php:83.24.4-1 &&\
docker push danielberteaud/php:83 &&\
docker push danielberteaud/php83:latest &&\
echo "danielberteaud/php:83.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/php:83.24.4-1 already available"
fi
IMAGE_PHP83_AVAILABLE=1
}
function build_postgres15 {
if [ "${IMAGE_POSTGRES15_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/postgres:15.24.4-1 already available"
return
fi
# Image postgres15 depends on alma9
build_alma9
if ! docker manifest inspect danielberteaud/postgres:15.24.4-1 > /dev/null 2>&1; then
echo "Building postgres:15.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/postgres:15.24.4-1 -t danielberteaud/postgres15:latest -t danielberteaud/postgres:15 --build-arg=PG_VERSION=15 output/images/postgres &&\
docker push danielberteaud/postgres:15.24.4-1 &&\
docker push danielberteaud/postgres15:latest &&\
docker push danielberteaud/postgres:15 &&\
echo "danielberteaud/postgres:15.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/postgres:15.24.4-1 already available"
fi
IMAGE_POSTGRES15_AVAILABLE=1
}
function build_postgres16 {
if [ "${IMAGE_POSTGRES16_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/postgres:16.24.4-1 already available"
return
fi
# Image postgres16 depends on alma9
build_alma9
if ! docker manifest inspect danielberteaud/postgres:16.24.4-1 > /dev/null 2>&1; then
echo "Building postgres:16.24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/postgres:16.24.4-1 -t danielberteaud/postgres16:latest -t danielberteaud/postgres:16 --build-arg=PG_VERSION=16 output/images/postgres &&\
docker push danielberteaud/postgres:16.24.4-1 &&\
docker push danielberteaud/postgres16:latest &&\
docker push danielberteaud/postgres:16 &&\
echo "danielberteaud/postgres:16.24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/postgres:16.24.4-1 already available"
fi
IMAGE_POSTGRES16_AVAILABLE=1
}
function build_sqlite {
if [ "${IMAGE_SQLITE_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/sqlite:24.4-1 already available"
return
fi
# Image sqlite depends on alpine
build_alpine
if ! docker manifest inspect danielberteaud/sqlite:24.4-1 > /dev/null 2>&1; then
echo "Building sqlite:24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/sqlite:24.4-1 -t danielberteaud/sqlite:latest output/images/sqlite &&\
docker push danielberteaud/sqlite:24.4-1 &&\
docker push danielberteaud/sqlite:latest &&\
echo "danielberteaud/sqlite:24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/sqlite:24.4-1 already available"
fi
IMAGE_SQLITE_AVAILABLE=1
}
function build_wait_for {
if [ "${IMAGE_WAIT_FOR_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/wait-for:24.4-1 already available"
return
fi
if ! docker manifest inspect danielberteaud/wait-for:24.4-1 > /dev/null 2>&1; then
echo "Building wait-for:24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/wait-for:24.4-1 -t danielberteaud/wait-for:latest output/images/wait-for &&\
docker push danielberteaud/wait-for:24.4-1 &&\
docker push danielberteaud/wait-for:latest &&\
echo "danielberteaud/wait-for:24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/wait-for:24.4-1 already available"
fi
IMAGE_WAIT_FOR_AVAILABLE=1
}
function build_wkhtmltopdf {
if [ "${IMAGE_WKHTMLTOPDF_AVAILABLE:-0}" = "1" ]; then
echo "Image danielberteaud/wkhtmltopdf:24.4-1 already available"
return
fi
# Image wkhtmltopdf depends on alpine
build_alpine
if ! docker manifest inspect danielberteaud/wkhtmltopdf:24.4-1 > /dev/null 2>&1; then
echo "Building wkhtmltopdf:24.4-1"
export BUILDKIT_PROGRESS=plain
docker build -t danielberteaud/wkhtmltopdf:24.4-1 -t danielberteaud/wkhtmltopdf:latest output/images/wkhtmltopdf &&\
docker push danielberteaud/wkhtmltopdf:24.4-1 &&\
docker push danielberteaud/wkhtmltopdf:latest &&\
echo "danielberteaud/wkhtmltopdf:24.4-1 pushed to remote repo"
else
echo "Image danielberteaud/wkhtmltopdf:24.4-1 already available"
fi
IMAGE_WKHTMLTOPDF_AVAILABLE=1
}
build_alma8
build_alma9
build_alpine
build_elasticsearch7
build_elasticsearch8
build_java11
build_java17
build_java21
build_java8
build_mariadb
build_mariadb_client
build_mongo50
build_pgbouncer
build_pgcat
build_php82
build_php83
build_postgres15
build_postgres16
build_sqlite
build_wait_for
build_wkhtmltopdf

View File

@ -0,0 +1,25 @@
#!/bin/sh
set -euo pipefail
# vim: syntax=sh
export LC_ALL=C
VAULT_KV_PATH=kv/service/elasticsearch
RAND_CMD="tr -dc A-Za-z0-9\-_\/=~\.+ < /dev/urandom | head -c 50"
if ! vault kv list $(dirname ${VAULT_KV_PATH}) 2>/dev/null | grep -q -E "^$(basename ${VAULT_KV_PATH})\$"; then
vault kv put ${VAULT_KV_PATH} \
elastic_pwd="$(sh -c "${RAND_CMD}")" \
es_bootstrap_pwd="$(sh -c "${RAND_CMD}")" \
exporter_pwd="$(sh -c "${RAND_CMD}")" \
healthdata_services_pwd="$(sh -c "${RAND_CMD}")" \
stock_services_pwd="$(sh -c "${RAND_CMD}")" \
fi
for SECRET in elastic_pwd es_bootstrap_pwd exporter_pwd healthdata_services_pwd stock_services_pwd; do
if ! vault kv get -field ${SECRET} ${VAULT_KV_PATH} >/dev/null 2>&1; then
vault kv patch ${VAULT_KV_PATH} \
${SECRET}=$(sh -c "${RAND_CMD}")
fi
done

View File

@ -0,0 +1,16 @@
# Access the vault KV (v2) store
path "kv/data/service/elasticsearch" {
capabilities = ["read"]
}
path "kv/metadata/service/elasticsearch/*" {
capabilities = ["read", "list"]
}
path "kv/data/service/elasticsearch/*" {
capabilities = ["read"]
}
path "pki/elasticsearch/issue/server" {
capabilities = ["update"]
}

View File

@ -138,7 +138,7 @@ docker:
elasticsearch8:
image: elasticsearch:[[ .docker.base_images.elasticsearch8.build_args.ES_VERSION ]]-1
build_args:
ES_VERSION: 8.13.0
ES_VERSION: 8.13.1
tags:
- elasticsearch:8
- elasticsearch8:latest