Update to 2022-03-07 14:00

This commit is contained in:
Daniel Berteaud 2022-03-07 14:00:06 +01:00
parent c55f851cbd
commit 8b7e505180
58 changed files with 1119 additions and 89 deletions

View File

@ -1,17 +1,28 @@
---
- include: user.yml
- include: directories.yml
- include: facts.yml
- include: archive_pre.yml
when: pgweb_install_mode == 'upgrade'
- include: install.yml
- include: conf.yml
- include: iptables.yml
- include_tasks: user.yml
tags: always
- include_tasks: directories.yml
tags: always
- include_tasks: facts.yml
tags: always
- include_tasks: archive_pre.yml
when: pgweb_install_mode | default('none') == 'upgrade'
tags: always
- include_tasks: install.yml
tags: always
- include_tasks: conf.yml
tags: always
- include_tasks: iptables.yml
when: iptables_manage | default(True)
- include: services.yml
- include: archive_post.yml
when: pgweb_install_mode == 'upgrade'
- include: write_version.yml
- include: cleanup.yml
tags: always
- include_tasks: services.yml
tags: always
- include_tasks: archive_post.yml
when: pgweb_install_mode | default('none') == 'upgrade'
tags: always
- include_tasks: write_version.yml
tags: always
- include_tasks: cleanup.yml
tags: always

View File

@ -40,5 +40,6 @@
notify: reload systemd
tags: proxy
- include: "{{ ansible_os_family }}.yml"
- include_tasks: "{{ ansible_os_family }}.yml"
tags: always

View File

@ -1,14 +1,24 @@
---
- include: user.yml
- include: directories.yml
- include: facts.yml
- include: archive_pre.yml
when: taiga_install_mode == 'upgrade'
- include: install.yml
- include: conf.yml
- include: services.yml
- include: write_version.yml
- include: archive_post.yml
when: taiga_install_mode == 'upgrade'
- include: cleanup.yml
- include_tasks: user.yml
tags: always
- include_tasks: directories.yml
tags: always
- include_tasks: facts.yml
tags: always
- include_tasks: archive_pre.yml
when: taiga_install_mode | default('none') == 'upgrade'
tags: always
- include_tasks: install.yml
tags: always
- include_tasks: conf.yml
tags: always
- include_tasks: services.yml
tags: always
- include_tasks: write_version.yml
tags: always
- include_tasks: archive_post.yml
when: taiga_install_mode | default('none') == 'upgrade'
tags: always
- include_tasks: cleanup.yml
tags: always

View File

@ -1,7 +1,11 @@
---
- include: install.yml
- include: conf.yml
- include: iptables.yml
- include_tasks: install.yml
tags: always
- include_tasks: conf.yml
tags: always
- include_tasks: iptables.yml
when: iptables_manage | default(True)
- include: services.yml
tags: always
- include_tasks: services.yml
tags: always

View File

@ -0,0 +1,9 @@
---
- name: reset permissions
command: sh /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/bin/perms.sh
loop: "{{ wh_clients | subelements('apps') }}"
when: item.1.backend | default(item.0.backend) | default(wh_defaults.backend) == inventory_hostname
- name: restart wh-acld
service: name=wh-acld state=restarted

View File

@ -0,0 +1,4 @@
---
dependencies:
- role: wh_common
- role: httpd_php

View File

@ -0,0 +1,207 @@
---
- include_vars: "{{ item }}"
with_first_found:
- vars/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml
- vars/{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml
- vars/{{ ansible_distribution }}.yml
- vars/{{ ansible_os_family }}.yml
tags: web
- name: Install needed tools
yum: name{{ wh_backend_packages }}
tags: web
- set_fact: wh_app_dir=[]
tags: web
- name: Build a list of app root
set_fact:
wh_app_dir: "{{ wh_app_dir }} + [ '/opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}' ]"
loop: "{{ wh_clients | subelements('apps') }}"
when: item.1.backend | default(item.0.backend) | default(wh_defaults.backend) == inventory_hostname
tags: web
- name: Create unix accounts
user:
name: "wh-{{ item.name }}"
comment: "Unix account for {{ item.name }}"
system: True
shell: "{{ shell | default('/sbin/nologin') }}"
home: /opt/wh/{{ item.name }}
loop: "{{ wh_clients }}"
tags: web
- name: Create ssh directories
file: path=/etc/ssh/wh/{{ item.name }}/ state=directory mode=755
loop: "{{ wh_clients }}"
tags: web
- name: Deploy SSH keys
authorized_key:
user: root
key: "{{ item.ssh_keys | default([]) | join(\"\n\") }}"
path: /etc/ssh/wh/{{ item.name }}/authorized_keys
manage_dir: False
exclusive: True
loop: "{{ wh_clients }}"
tags: web
- name: Set correct permissions on authorized_key files
file: path=/etc/ssh/wh/{{ item.name }}/authorized_keys owner=root group=root mode=644
loop: "{{ wh_clients }}"
when: item.ssh_keys | default([]) | length > 0
tags: web
- name: List all authorized keys directories
shell: ls -1 /etc/ssh/wh | xargs -n1 basename
register: wh_existing_ssh_keys
changed_when: False
tags: web
- name: Remove unmanaged ssh keys
file: path=/etc/ssh/wh/{{ item }} state=absent
with_items: "{{ wh_existing_ssh_keys.stdout_lines | default([]) }}"
when: item not in wh_clients | map(attribute='name')
tags: web
- name: Create applications directories
file: path={{ item.0 }}/{{ item.1 }} state=directory
loop: "{{ wh_app_dir | product(['web','data','tmp','logs','archives','bin','info', 'db_dumps']) | list }}"
notify: reset permissions
tags: web
- name: Set correct SELinux context for apps directories
sefcontext:
target: "{{ item }}(/.*)?"
setype: httpd_sys_content_t
state: present
when: ansible_selinux.status == 'enabled'
loop: "{{ wh_app_dir }}"
notify: reset permissions
tags: web
- name: Deploy PHP FPM pools
template: src=php-fpm.conf.j2 dest=/etc/opt/remi/php{{ item }}/php-fpm.d/wh.conf
vars:
wh_php_version: "{{ item }}"
loop: "{{ httpd_php_versions }}"
notify: restart php-fpm
tags: web
- name: Deploy httpd configuration
template: src=httpd.conf.j2 dest=/etc/httpd/ansible_conf.d/31-wh.conf
notify: reload httpd
tags: web
- name: Deploy permissions scripts
template: src=perms.sh.j2 dest=/opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/bin/perms.sh
loop: "{{ wh_clients | subelements('apps') }}"
when: item.1.backend | default(item.0.backend) | default(wh_defaults.backend) == inventory_hostname
notify: reset permissions
tags: web
- name: Create databases
mysql_db:
name: "{{ item.0.name[0:7] }}_{{ item.1.name[0:7] }}"
login_host: "{{ (wh_default_app | combine(item.1)).database.server | default(mysql_server) }}"
login_user: sqladmin
login_password: "{{ mysql_admin_pass }}"
collation: "{{ (wh_default_app | combine(item.1)).database.collation }}"
encoding: "{{ (wh_default_app | combine(item.1)).database.encoding }}"
state: present
loop: "{{ wh_clients | subelements('apps') }}"
when:
- (wh_default_app | combine(item.1)).database.enabled
- (wh_default_app | combine(item.1)).database.engine == 'mysql'
- item.1.backend | default(item.0.backend) | default(wh_defaults.backend) == inventory_hostname
tags: web
- name: Create applications database users
mysql_user:
name: "{{ item.0.name[0:7] }}_{{ item.1.name[0:7] }}"
password: "{{ (wh_default_app | combine(item.1)).database.pass | default((wh_pass_seed | password_hash('sha256', 65534 | random(seed=item.0.name + item.1.name) | string))[9:27] ) }}"
priv: "{{ item.0.name[0:7] }}_{{ item.1.name[0:7] }}.*:ALL"
host: "%"
login_host: "{{ (wh_default_app | combine(item.1)).database.server | default(mysql_server) }}"
login_user: sqladmin
login_password: "{{ mysql_admin_pass }}"
state: present
loop: "{{ wh_clients | subelements('apps') }}"
when:
- (wh_default_app | combine(item.1)).database.enabled
- (wh_default_app | combine(item.1)).database.engine == 'mysql'
- item.1.backend | default(item.0.backend) | default(wh_defaults.backend) == inventory_hostname
tags: web
- name: Create clients database user
mysql_user:
name: "{{ item.0.name[0:15] }}"
password: "{{ item.0.db_pass | default((wh_pass_seed | password_hash('sha256', 65534 | random(seed=item.0.name) | string))[9:27]) }}"
priv: "{{ item.0.name[0:7] }}_{{ item.1.name[0:7] }}.*:ALL"
host: "%"
login_host: "{{ (wh_default_app | combine(item.1)).database.server | default(mysql_server) }}"
login_user: sqladmin
login_password: "{{ mysql_admin_pass }}"
append_privs: True
state: present
loop: "{{ wh_clients | subelements('apps')}}"
when:
- (wh_default_app | combine(item.1)).database.enabled
- (wh_default_app | combine(item.1)).database.engine == 'mysql'
- item.1.backend | default(item.0.backend) | default(wh_defaults.backend) == inventory_hostname
tags: web
- name: Deploy databases info file
template: src=database.txt.j2 dest=/opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/info/database.txt
loop: "{{ wh_clients | subelements('apps') }}"
when: item.1.backend | default(item.0.backend) | default(wh_defaults.backend) == inventory_hostname
notify: reset permissions
tags: web
- name: Deploy per app backup scripts
template: src=backup.sh.j2 dest=/opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/bin/backup.sh mode=750
loop: "{{ wh_clients | subelements('apps') }}"
when: item.1.backend | default(item.0.backend) | default(wh_defaults.backend) == inventory_hostname
tags: web
- name: Deploy wh_create_archives script to archive all the hosted apps
template: src=wh_create_archives.sh.j2 dest=/usr/local/bin/wh_create_archives.sh mode=750
tags: web
- name: Setup a daily cronjob to take automatic archives of webapps
cron:
name: wh_backups
special_time: daily
user: root
job: 'systemd-cat /usr/local/bin/wh_create_archives.sh'
cron_file: wh
state: present
tags: web
- name: Deploy global pre/post backup scripts
template: src={{ item }}_backup.sh.j2 dest=/etc/backup/{{ item }}.d/wh.sh mode=700
loop: [ 'pre', 'post' ]
tags: web
- name: Deploy logrotate snippet
template: src=logrotate.j2 dest=/etc/logrotate.d/wh
tags: web
- name: Deploy wh-acld
template: src=wh-acld.j2 dest=/usr/local/bin/wh-acld mode=750
notify: restart wh-acld
tags: web
- name: Deploy wh-acld service unit
template: src=wh-acld.service.j2 dest=/etc/systemd/system/wh-acld.service
register: wh_acld_unit
tags: web
- name: Reload systemd
systemd: daemon_reload=True
when: wh_acld_unit.changed
tags: web
- name: Start and enable wh-acld
service: name=wh-acld state=started enabled=True
tags: web

View File

@ -0,0 +1,14 @@
#!/bin/bash -e
cd /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}
# Remove old archives
find archives/ -type f -mtime +2 -exec rm -f "{}" \;
# Create the new daily archive, with a dump of the DB and the web, data and logs dir
TS=$(date +%Y-%m-%d_%Hh%M)
mysqldump --add-drop-table --single-transaction \
--host={{ (wh_default_app | combine(item.1)).database.server | default(mysql_server) }} \
--user={{ item.0.name[0:7] }}_{{ item.1.name[0:7] }} \
--password="{{ (wh_default_app | combine(item.1)).database.pass | default((wh_pass_seed | password_hash('sha256', 65534 | random(seed=item.0.name + item.1.name) | string))[9:27] ) }}" \
{{ item.0.name[0:7] }}_{{ item.1.name[0:7] }} | \
zstd -c > archives/$TS.sql.zst
ZSTD_CLEVEL=15 ZSTD_NBTHREADS=0 nice -n 9 tar cf archives/$TS.tar.zst --use-compress-program=zstd data web logs

View File

@ -0,0 +1,5 @@
Type: {{ (wh_default_app | combine(item.1)).database.engine | default('mysql') }}
Server: {{ (wh_default_app | combine(item.1)).database.server | default(mysql_server) }}
Database: {{ item.0.name[0:7] }}_{{ item.1.name[0:7] }}
User: {{ item.0.name[0:7] }}_{{ item.1.name[0:7] }}
Password: {{ (wh_default_app | combine(item.1)).database.pass | default((wh_pass_seed | password_hash('sha256', 65534 | random(seed=item.0.name + item.1.name) | string))[9:27] ) }}

View File

@ -0,0 +1,44 @@
# {{ ansible_managed }}
{% for client in wh_clients %}
{% for app in client.apps | default([]) %}
{% set app = wh_default_app | combine(app, recursive=True) %}
{% if app.backend | default(client.backend) | default(wh_defaults.backend) == inventory_hostname %}
################################################
## vhost for {{ client.name }}-{{ app.name }}
################################################
<VirtualHost *:80>
ServerName {{ app.vhost | default(client.name + '-' + app.name + '.wh.fws.fr') }}
{% if app.aliases | length > 0 %}
ServerAlias {{ app.aliases | join(' ') }}
{% endif %}
ServerAdmin webmaster@fws.fr
DocumentRoot /opt/wh/{{ client.name }}/apps/{{ app.name }}/web
Alias /_deferror/ "/usr/share/httpd/error/"
Include ansible_conf.d/common_env.inc
ProxyTimeout {{ app.php.max_execution_time }}
</VirtualHost>
################################################
## webroot for {{ client.name }}-{{ app.name }}
################################################
<Directory /opt/wh/{{ client.name }}/apps/{{ app.name }}/web>
AllowOverride All
Options FollowSymLinks
Require all granted
{% if app.php.enabled %}
<FilesMatch \.php$>
SetHandler "proxy:unix:/run/php-fpm/php{{ app.php.version}}-{{ client.name }}-{{ app.name }}.sock|fcgi://localhost"
</FilesMatch>
{% endif %}
<FilesMatch "^(\.ansible_version|\.git.*|(README|LICENSE|AUTHORS|CHANGELOG|CONTRIBUTING|LEGALNOTICE|PRIVACY|SECURITY)(\.md)?|.*\.co?nf|\.htaccess|composer\.(json|lock))">
Require all denied
</FilesMatch>
</Directory>
{% endif %}
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,21 @@
# {{ ansible_managed }}
{% for client in wh_clients %}
{% for app in client.apps %}
{% set app = wh_default_app | combine(app, recursive=True) %}
{% if app.backend | default(client.backend) | default(wh_defaults.backend) == inventory_hostname %}
/opt/wh/{{ client.name }}/apps/{{ app.name }}/logs/*.log {
rotate 52
weekly
copytruncate
missingok
compress
compressoptions -T0
compresscmd /bin/xz
uncompresscmd /bin/unxz
compressext .xz
su {{ app.run_as | default('wh-' + client.name) }} {{ app.run_as | default('wh-' + client.name) }}
}
{% endif %}
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,35 @@
#!/bin/sh
# Set correct SELinux label
restorecon -R /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}
# Remove all the ACL so we can start from scratch
setfacl -R --remove-all --remove-default /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}
# Set permissions on the top level client dir. Not recursively !
# Here, the corresponding client only has read permissions (pus the tech team)
chown root:root /opt/wh/{{ item.0.name }}/{,apps}
chmod 750 /opt/wh/{{ item.0.name }}/
chmod 755 /opt/wh/{{ item.0.name }}/apps
setfacl -m u:apache:rX,g:Tech:rX,g:Client_{{ item.0.name }}:rX,u:{{ item.1.run_as | default('wh-' + item.0.name) }}:rX /opt/wh/{{ item.0.name }}/
# Set decent permissions, aka rw for files and rwx for directories. With setgid so the group owner is inherited to new files
find /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/{data,tmp,web} -type f -exec chmod 660 "{}" \;
find /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/{data,tmp,web} -type d -exec chmod 2770 "{}" \;
# Now, grant apache read access (needed for serving static assets), and full rw access to the client group. Set mask to full permission, we don't want to limit ACL. And excplicitely set other perms to 0
# Members of the tech team has write access for install/debug
setfacl -R -m u:apache:rX,d:u:apache:rX,g:Tech:rwX,d:g:Tech:rwX,g:Client_{{ item.0.name }}:rwX,d:g:Client_{{ item.0.name }}:rwX,u:{{ item.1.run_as | default('wh-' + item.0.name) }}:rwX,d:u:{{ item.1.run_as | default('wh-' + item.0.name) }}:rwX,m:rwX,o:- /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}
# The bin folder shouldn't be visible to the client, it only contains admin's scripts
setfacl -R --remove-all --remove-default /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/bin
chown -R root:root /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/bin
chmod 700 /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/bin
chmod 750 /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/bin/*
# Info is readonly for the client (and the tech team)
setfacl -R --remove-all --remove-default /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/info
chown -R root:Client_{{ item.0.name }} /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/info
chmod 750 /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/info
chmod 640 /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/info/*
setfacl -R -m g:Tech:rX,d:g:Tech:rX,m:rwX,o:- /opt/wh/{{ item.0.name }}/apps/{{ item.1.name }}/info

View File

@ -0,0 +1,61 @@
; {{ ansible_managed }}
{% for client in wh_clients | default([]) %}
{% for app in client.apps | default([]) %}
{% set app = wh_default_app | combine(app, recursive=True) %}
{% if app.php.enabled and app.php.version | string == wh_php_version | string and app.backend | default(client.backend) | default(wh_defaults.backend) == inventory_hostname %}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Begin pool {{ client.name }}-{{ app.name }}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
[{{ client.name }}-{{ app.name }}]
listen.owner = root
listen.group = {{ httpd_group }}
listen.mode = 0660
listen = /run/php-fpm/php{{ wh_php_version }}-{{ client.name }}-{{ app.name }}.sock
user = {{ client.run_as | default('wh-' + client.name) }}
group = {{ client.run_as | default('wh-' + client.name) }}
catch_workers_output = yes
pm = dynamic
pm.max_children = 15
pm.start_servers = 3
pm.min_spare_servers = 3
pm.max_spare_servers = 6
pm.max_requests = 5000
request_terminate_timeout = 5m
php_flag[display_errors] = {{ app.php.display_error | ternary('on','off') }}
php_admin_flag[log_errors] = on
php_admin_value[error_log] = /opt/wh/{{ client.name }}/apps/{{ app.name }}/logs/php_error.log
php_admin_value[memory_limit] = {{ app.php.memory_limit }}
php_admin_value[session.save_path] = /opt/wh/{{ client.name }}/apps/{{ app.name }}/tmp
php_admin_value[upload_tmp_dir] = /opt/wh/{{ client.name }}/apps/{{ app.name }}/tmp
php_admin_value[sys_temp_dir] = /opt/wh/{{ client.name }}/apps/{{ app.name }}/tmp
php_admin_value[post_max_size] = {{ app.php.upload_max_filesize }}
php_admin_value[upload_max_filesize] = {{ app.php.upload_max_filesize }}
php_admin_value[disable_functions] = {{ app.php.disabled_functions | difference(app.php.enabled_functions) | join(', ') }}
php_admin_value[open_basedir] = /opt/wh/{{ client.name }}/apps
php_admin_value[max_execution_time] = {{ app.php.max_execution_time }}
php_admin_value[max_input_time] = {{ app.php.max_execution_time }}
php_admin_flag[allow_url_include] = off
php_admin_flag[allow_url_fopen] = {{ app.php.allow_url_fopen | ternary('on','off') }}
php_admin_flag[file_uploads] = {{ app.php.file_uploads | ternary('on','off') }}
php_admin_flag[session.cookie_httponly] = on
{% if app.php.custom_conf is defined %}
{{ app.php.custom_conf }}
{% endif %}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; End pool {{ client.name }}-{{ app.name }}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
{% endif %}
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,3 @@
#!/bin/bash -e
rm -f /opt/wh/*/apps/*/db_dumps/*.sql.zst

View File

@ -0,0 +1,17 @@
#!/bin/sh
set -eo pipefail
{% for client in wh_clients %}
{% for app in client.apps | default([]) %}
{% set app = wh_default_app | combine(app, recursive=True) %}
{% if app.backend | default(client.backend) | default(wh_defaults.backend) == inventory_hostname %}
mysqldump --add-drop-table --single-transaction \
--host={{ (wh_default_app | combine(app)).database.server | default(mysql_server) }} \
--user={{ client.name[0:7] }}_{{ app.name[0:7] }} \
--password="{{ (wh_default_app | combine(app)).database.pass | default((wh_pass_seed | password_hash('sha256', 65534 | random(seed=client.name + app.name) | string))[9:27] ) }}" \
{{ client.name[0:7] }}_{{ app.name[0:7] }} | \
zstd -c > /opt/wh/{{ client.name }}/apps/{{ app.name }}/db_dumps/{{ client.name[0:7] }}_{{ app.name[0:7] }}.sql.zst
{% endif %}
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,17 @@
#!/bin/bash -e
while true; do
{% for client in wh_clients %}
{% for app in client.apps %}
{% if app.backend | default(client.backend) | default(wh_defaults.backend) == inventory_hostname %}
if [ -e /opt/wh/{{ client.name }}/apps/{{ app.name }}/tmp/reset -o -e /opt/wh/{{ client.name }}/apps/{{ app.name }}/tmp/reset.txt ]; then
echo Reseting permissions for {{ client.name }} - {{ app.name }}
sh /opt/wh/{{ client.name }}/apps/{{ app.name }}/bin/perms.sh
echo Permissions for {{ client.name }} - {{ app.name }} have been reseted
rm -f /opt/wh/{{ client.name }}/apps/{{ app.name }}/tmp/reset /opt/wh/{{ client.name }}/apps/{{ app.name }}/tmp/reset.txt
fi
{% endif %}
{% endfor %}
{% endfor %}
sleep 5
done

View File

@ -0,0 +1,14 @@
[Unit]
Description=Web Hosting ACL monitor daemon
[Service]
Type=simple
ExecStart=/usr/local/bin/wh-acld
PrivateTmp=yes
PrivateDevices=yes
MemoryLimit=100M
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,13 @@
#!/bin/bash -e
{% for client in wh_clients %}
{% for app in client.apps %}
{% set app = wh_default_app | combine(app, recursive=True) %}
{% if app.backend | default(client.backend) | default(wh_defaults.backend) == inventory_hostname %}
echo Starting archiving {{ client.name }} - {{ app.name }}
sh /opt/wh/{{ client.name }}/apps/{{ app.name }}/bin/backup.sh
echo Archive for {{ client.name }} - {{ app.name }} created
{% endif %}
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,6 @@
---
wh_backend_packages:
- acl
- MySQL-python
- mariadb

View File

@ -0,0 +1,7 @@
---
wh_backend_packages:
- acl
- python3-mysql
- mariadb

View File

@ -0,0 +1,3 @@
---
wh_clients: []

View File

@ -0,0 +1,15 @@
---
- name: Build a list of Zimbra domains
set_fact: wh_mail_domains={{ wh_clients | selectattr('mail', 'defined') | selectattr('mail.enabled', 'equalto', True) | selectattr('mail.domain', 'defined') | map(attribute='mail.domain') | list }}
tags: mail
- name: Build a list of Zimbra domain aliases
set_fact: wh_mail_aliases={{ wh_mail_aliases | default([]) + item.mail.domain_aliases }}
loop: "{{ wh_clients }}"
when:
- item.mail is defined
- item.mail.enabled is defined
- item.mail.enabled
- item.mail.domain_aliases is defined
- item.mail.domain_aliases | length > 0
tags: mail

View File

@ -0,0 +1,3 @@
---
- include: facts.yml

View File

@ -0,0 +1,40 @@
---
wh_default_client:
ssh_keys: []
wh_default_app:
allow_ip: []
deny_ip: []
aliases: []
maintenance: False
php:
enabled: True
version: 73
memory_limit: 256M
display_error: False
file_uploads: True
upload_max_filesize: 10M
disabled_functions:
- system
- show_source
- symlink
- exec
- dl
- shell_exec
- passthru
- phpinfo
- escapeshellarg
- escapeshellcmd
enabled_functions: []
allow_url_fopen: False
max_execution_time: 900
# custom_conf: |
# php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f no-reply@domain.tld
cache: enabled
force_https: True
letsencrypt_cert: False
database:
enabled: True
engine: mysql
collation: utf8mb4_unicode_ci
encoding: utf8mb4

View File

@ -0,0 +1,4 @@
---
dependencies:
- role: wh_common
- role: pmg

View File

@ -0,0 +1,4 @@
---
- set_fact: wh_mail_domains_to_relay={{ wh_mail_domains + wh_mail_aliases | default([]) }}
tags: mail

View File

@ -0,0 +1,22 @@
---
- include: facts.yml
- name: List configured relay domains
command: pmgsh get /config/domains
register: wh_pmg_domains
changed_when: False
tags: mail
- set_fact: wh_pmg_domains={{ wh_pmg_domains.stdout | from_json | map(attribute='domain') | list }}
tags: mail
- name: Create domains in PMG relay table
command: pmgsh create /config/domains --domain "{{ item }}"
loop: "{{ wh_mail_domains_to_relay }}"
when: item not in wh_pmg_domains
tags: mail
- name: Remove domains from PMG relay table
command: pmgsh delete /config/domains/{{ item }}
loop: "{{ wh_pmg_domains }}"
when: item not in wh_mail_domains_to_relay

View File

@ -0,0 +1,2 @@
---
- include: ../nginx/handlers/main.yml

View File

@ -0,0 +1,4 @@
---
dependencies:
- role: wh_common
- role: nginx

View File

@ -0,0 +1,46 @@
---
- set_fact: role_wh_proxy={{ True }}
tags: web
- name: Deploy web hosting vhosts
template: src=nginx_vhosts.conf.j2 dest=/etc/nginx/ansible_conf.d/31-vhosts_wh.conf
notify: reload nginx
tags: web
- name: Build a list of client vhosts
set_fact:
wh_vhosts: "{{ wh_vhosts | default([]) + [ item.1.vhost | default(item.0.name + '-' + item.1.name + '.wh.fws.fr') ] }}"
loop: "{{ wh_clients | default([]) | subelements('apps') }}"
tags: web
- name: Check if Let's Encrypt's cert exist (web hosting)
stat: path=/var/lib/dehydrated/certificates/certs/{{ item }}/fullchain.pem
register: wh_letsencrypt_certs
with_items: "{{ wh_vhosts }}"
tags: web
- name: Create directories for missing Let's Encrypt cert (web hosting)
file: path=/var/lib/dehydrated/certificates/certs/{{ item.item }} state=directory
with_items: "{{ wh_letsencrypt_certs.results }}"
when:
- item.stat is defined
- not item.stat.exists
tags: web
- name: Link missing Let's Encrypt cert to the default one (web hosting)
file: src={{ nginx_cert_path }} dest=/var/lib/dehydrated/certificates/certs/{{ item.item }}/fullchain.pem state=link
with_items: "{{ wh_letsencrypt_certs.results }}"
when:
- item.stat is defined
- not item.stat.exists
tags: web
- name: Link missing Let's Encrypt key to the default one (web hosting)
file: src={{ nginx_key_path }} dest=/var/lib/dehydrated/certificates/certs/{{ item.item }}/privkey.pem state=link
with_items: "{{ wh_letsencrypt_certs.results }}"
when:
- item.stat is defined
- not item.stat.exists
tags: web

View File

@ -0,0 +1,93 @@
# {{ ansible_managed }}
{% for client in wh_clients | default([]) %}
{% for app in client.apps | default([]) %}
{% set app = wh_default_app | combine(app, recursive=True) %}
server {
listen 80;
listen 443 ssl http2;
ssl_certificate /var/lib/dehydrated/certificates/certs/{{ app.vhost | default(client.name + '-' + app.name + '.wh.fws.fr') }}/fullchain.pem;
ssl_certificate_key /var/lib/dehydrated/certificates/certs/{{ app.vhost | default(client.name + '-' + app.name + '.wh.fws.fr') }}/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256;
server_name {{ app.vhost | default(client.name + '-' + app.name + '.wh.fws.fr') }} {{ app.aliases | join(' ') }};
root /usr/share/nginx/html;
{% if app.maintenance %}
include /etc/nginx/ansible_conf.d/maintenance.inc;
{% endif %}
# All client's vhost will use http-01 ACME challenges
include /etc/nginx/ansible_conf.d/acme.inc;
# Ensure SSL is used
include /etc/nginx/ansible_conf.d/force_ssl.inc;
location / {
limit_req zone=limit_req_std burst=200 nodelay;
limit_conn limit_conn_std 100;
include /etc/nginx/ansible_conf.d/perf.inc;
include /etc/nginx/ansible_conf.d/cache.inc;
{% if app.proxy_custom_rewrites is defined %}
{{ app.proxy_custom_rewrites | indent(4,true) }}
{% endif %}
# Send the original Host header to the backend
proxy_set_header Host "$host";
# Send info about the original request to the backend
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto "$scheme";
# Handle websocket proxying
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_http_version 1.1;
# Hide some headers sent by the backend
proxy_hide_header X-Powered-By;
proxy_hide_header Cache-Control;
proxy_hide_header Pragma;
proxy_hide_header Expires;
# Set the timeout to read responses from the backend
proxy_read_timeout {{ app.php.max_execution_time }}s;
# Disable buffering large files
proxy_max_temp_file_size 5m;
# Proxy requests to the backend
proxy_pass http://{{ app.backend | default(client.backend) | default(wh_defaults.backend) }};
# per vhost IP blacklist
{% for ip in app.deny_ip %}
deny {{ ip }};
{% endfor %}
{% if app.allow_ip | length > 0 %}
# per vhost IP restriction
{% for ip in app.allow_ip %}
allow {{ ip }};
{% endfor %}
deny all;
{% endif %}
}
location = /RequestDenied {
return 403;
}
}
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,3 @@
---
- name: start zmldapsync-wh
service: name=zmldapsync-wh state=started

View File

@ -0,0 +1,4 @@
---
dependencies:
- role: wh_common
- role: zimbra

View File

@ -0,0 +1,116 @@
---
- name: Get or generate a pre authentication key
shell: |
KEY=$(/opt/zimbra/bin/zmprov getDomain {{ item }} zimbrapreauthkey | perl -ne '/^(?:zimbraP|p)reAuthKey: (.*)/ && print $1')
[ -z $KEY ] && KEY=$(/opt/zimbra/bin/zmprov generateDomainPreAuthKey {{ item }} | perl -ne '/^(?:zimbraP|p)reAuthKey: (.*)/ && print $1')
echo $KEY
become_user: zimbra
register: zcs_preauthkeys
changed_when: False
loop: "{{ wh_mail_domains }}"
tags: mail
- name: Install preauth pages
template: src=../zimbra/templates/cas_preauth.jsp.j2 dest=/opt/zimbra/jetty/webapps/zimbra/public/preauth_{{ item.item }}.jsp owner=zimbra group=zimbra
loop: "{{ zcs_preauthkeys.results }}"
notify: restart zmmailboxd
tags: mail
- name: Install admin preauth pages
template: src=../zimbra/templates/cas_preauth_admin.jsp.j2 dest=/opt/zimbra/jetty/webapps/zimbraAdmin/public/preauth_{{ item.item }}.jsp owner=zimbra group=zimbra
loop: "{{ zcs_preauthkeys.results }}"
notify: restart zmmailboxd
tags: mail
- name: Configure CAS filters
blockinfile:
path: /opt/zimbra/jetty/etc/zimbra.web.xml.in
block: |2
<filter>
<filter-name>CasSingleSignOutFilter</filter-name>
<filter-class>org.jasig.cas.client.session.SingleSignOutFilter</filter-class>
<init-param>
<param-name>casServerUrlPrefix</param-name>
<param-value>https://sso-cl.fws.fr/cas</param-value>
</init-param>
</filter>
<filter-mapping>
<filter-name>CasSingleSignOutFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
<listener>
<listener-class>org.jasig.cas.client.session.SingleSignOutHttpSessionListener</listener-class>
</listener>
{% for item in wh_clients | default([]) %}
{% if item.mail is defined and item.mail.enabled is defined and item.mail.enabled and item.mail.domain is defined %}
<!-- filters for {{ item.name }} -->
<filter>
<filter-name>CasAuthenticationFilter{{ item.name }}</filter-name>
<filter-class>org.jasig.cas.client.authentication.AuthenticationFilter</filter-class>
<init-param>
<param-name>casServerLoginUrl</param-name>
<param-value>https://sso-cl.fws.fr/cas/login</param-value>
</init-param>
<init-param>
<param-name>serverName</param-name>
<param-value>{{ item.mail.vhosts | first }}</param-value>
</init-param>
</filter>
<filter-mapping>
<filter-name>CasAuthenticationFilter{{ item.name }}</filter-name>
<url-pattern>/public/preauth_{{ item.mail.domain }}.jsp</url-pattern>
</filter-mapping>
<filter>
<filter-name>CasValidationFilter{{ item.name }}</filter-name>
<filter-class>org.jasig.cas.client.validation.Cas20ProxyReceivingTicketValidationFilter</filter-class>
<init-param>
<param-name>casServerUrlPrefix</param-name>
<param-value>https://sso-cl.fws.fr/cas</param-value>
</init-param>
<init-param>
<param-name>serverName</param-name>
<param-value>{{ item.mail.vhosts | first }}</param-value>
</init-param>
<init-param>
<param-name>redirectAfterValidation</param-name>
<param-value>true</param-value>
</init-param>
</filter>
<filter-mapping>
<filter-name>CasValidationFilter{{ item.name }}</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
<!-- End filter configuration for {{ item.name }} -->
{% else %}
<!-- Email not enabled for client {{ item.name }} -->
{% endif %}
{% endfor %}
<filter>
<filter-name>CasHttpServletRequestWrapperFilter</filter-name>
<filter-class>org.jasig.cas.client.util.HttpServletRequestWrapperFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>CasHttpServletRequestWrapperFilter</filter-name>
<url-pattern>/public/*</url-pattern>
</filter-mapping>
<!-- prevent Zimbra from adding ;jsessionid=XXXX in the URL, which the CAS server could reject
as it doesn't match the initial service anymore -->
<session-config>
<tracking-mode>COOKIE</tracking-mode>
</session-config>
marker: '<!-- "# {mark} ANSIBLE MANAGED BLOCK (wh Zimbra CAS Auth)" -->'
insertafter: '</error-page>'
validate: xmllint %s
notify: restart zmmailboxd
tags: zcs

View File

@ -0,0 +1,9 @@
---
- name: Ensure every domain has a dkim key
shell: /opt/zimbra/libexec/zmdkimkeyutil -q -d {{ item }} || /opt/zimbra/libexec/zmdkimkeyutil -a -d {{ item }}
become_user: zimbra
loop: "{{ wh_mail_domains + wh_mail_aliases }}"
changed_when: False
tags: mail

View File

@ -0,0 +1,5 @@
---
- name: Build a list of Zimbra domains
set_fact: wh_mail_domains={{ wh_clients | selectattr('mail', 'defined') | selectattr('mail.enabled', 'equalto', True) | selectattr('mail.domain', 'defined') | map(attribute='mail.domain') | list }}
tags: mail

View File

@ -0,0 +1,9 @@
---
- include: facts.yml
- include: zmldapsync.yml
when: zcs_i_am_primary_ldap == True
- include: cas.yml
when: "'mailbox' in zcs_enabled_components"

View File

@ -0,0 +1,22 @@
---
- name: Deploy LDAP sync configuration
template: src=zmldapsync-wh.yml.j2 dest=/opt/zimbra/conf/zmldapsync-wh.yml mode=600
notify: start zmldapsync-wh
tags: mail
- name: Deploy LDAP sync systemd units
template: src=zmldapsync-wh.{{ item }}.j2 dest=/etc/systemd/system/zmldapsync-wh.{{ item }}
loop:
- service
- timer
register: wh_zimbra_systemd_unit
tags: mail
- name: Reload systemd
systemd: daemon_reload=True
when: wh_zimbra_systemd_unit.results | selectattr('changed','equalto',True) | list | length > 0
tags: mail
- name: Enable LDAP sync services
systemd: name=zmldapsync-wh.timer state=started enabled=True
tags: mail

View File

@ -0,0 +1,7 @@
[Unit]
Description=Sync LDAP accounts into Zimbra for hosted clients
[Service]
Type=oneshot
ExecStart=/opt/zimbra/bin/zmldapsync --config /opt/zimbra/conf/zmldapsync-wh.yml
TimeoutSec=300

View File

@ -0,0 +1,8 @@
[Unit]
Description=Sync LDAP Users with Zimbra for hosted clients
[Timer]
OnCalendar=*:0/15
[Install]
WantedBy=timers.target

View File

@ -0,0 +1,58 @@
---
general:
notify:
from: zimbra@{{ ansible_domain }}
to: dani@fws.fr
domains:
{% for client in wh_clients | default([]) %}
{% if client.mail is defined and client.mail.enabled and client.mail.enabled and client.mail.domain is defined %}
{{ client.mail.domain }}:
public_url: https://{{ client.mail.vhosts | first }}
admin_url: https://{{ client.mail.vhosts | first }}:9071/
cas:
enabled: True
server_url: https://sso-cl.fws.fr/cas
ldap:
servers:
- ldap://dc3.fws.fr:389
- ldap://dc1.fws.fr:389
- ldap://dc2.fws.fr:389
schema: ad
bind_dn: CN=Zimbra,OU=Apps,DC=fws,DC=fr
bind_pass: {{ vault_zimbra_ldap_bind_pass | quote }}
users:
base: OU={{ client.name }},OU=Clients,DC=fws,DC=fr
filter: "(&(objectClass=user)(mail=*))"
groups:
base: OU={{ client.name }},OU=Clients,DC=fws,DC=fr
zimbra:
create_if_missing: True
setup_ldap_auth: True
{% if client.mail.domain_aliases is defined and client.mail.domain_aliases | length > 0 %}
domain_aliases:
{% for alias in client.mail.domain_aliases %}
- {{ alias }}
{% endfor %}
{% endif %}
additional_domain_attrs:
{% if client.mail.vhosts is defined and client.mail.vhosts | length > 0 %}
zimbraVirtualHostname:
{% for vhost in client.mail.vhosts %}
- {{ vhost }}
{% endfor %}
zimbraPublicServiceHostname: {{ client.mail.vhosts | first }}
zimbraAdminConsoleLoginURL: https://{{ client.mail.vhosts | first }}:9071//zimbraAdmin/public/preauth_{{ client.mail.domain }}.jsp
zimbraWebClientLoginURL: https://{{ client.mail.vhosts | first }}/public/preauth_{{ client.mail.domain }}.jsp
{% else %}
zimbraPublicServiceHostname: zm-cl.fws.fr
zimbraAdminConsoleLoginURL: https://zm-cl.fws.fr:9071//zimbraAdmin/public/preauth_{{ client.mail.domain }}.jsp
zimbraWebClientLoginURL: https://zm-cl.fws.fr}/public/preauth_{{ client.mail.domain }}.jsp
{% endif %}
zimbraPublicServicePort: 443
zimbraPublicServiceProtocol: https
zimbraAdminConsoleLogoutURL: https://sso-cl.fws.fr/cas/logout
zimbraWebClientLogoutURL: https://sso-cl.fws.fr/cas/logout
{% endif %}
{% endfor %}

View File

@ -1,18 +1,29 @@
---
- include: user.yml
- include: directories.yml
- include: facts.yml
- include: archive_pre.yml
when: vaultwarden_install_mode == 'upgrade' or vaultwarden_web_install_mode == 'upgrade'
- include: install.yml
- include: conf.yml
- include: migrate_bitwarden_rs.yml
when: vaultwarden_migrate_from_bitwarden
- include: iptables.yml
- include_tasks: user.yml
tags: always
- include_tasks: directories.yml
tags: always
- include_tasks: facts.yml
tags: always
- include_tasks: archive_pre.yml
when: vaultwarden_install_mode | default('none') == 'upgrade' or vaultwarden_web_install_mode | default('none') == 'upgrade'
- include_tasks: install.yml
tags: always
- include_tasks: conf.yml
tags: always
- include_tasks: migrate_bitwarden_rs.yml
when: vaultwarden_migrate_from_bitwarden | default(False)
tags: always
- include_tasks: iptables.yml
when: iptables_manage | default(True)
- include: service.yml
- include: write_version.yml
- include: archive_post.yml
when: vaultwarden_install_mode == 'upgrade' or vaultwarden_web_install_mode == 'upgrade'
- include: cleanup.yml
tags: always
- include_tasks: service.yml
tags: always
- include_tasks: write_version.yml
tags: always
- include_tasks: archive_post.yml
when: vaultwarden_install_mode | default('none') == 'upgrade' or vaultwarden_web_install_mode | default('none') == 'upgrade'
tags: always
- include_tasks: cleanup.yml
tags: always

View File

@ -1,8 +1,12 @@
---
- include: user.yml
- include: install.yml
- include: iptables.yml
- include_tasks: user.yml
tags: always
- include_tasks: install.yml
tags: always
- include_tasks: iptables.yml
when: iptables_manage | default(True)
- include: services.yml
tags: always
- include_tasks: services.yml
tags: always

View File

@ -1,11 +1,18 @@
---
- include: user.yml
- include: directories.yml
- include: facts.yml
- include: archive_pre.yml
when: wp_install_mode == 'upgrade'
- include: conf.yml
- include: install.yml
- include: archive_post.yml
when: wp_install_mode == 'upgrade'
- include_tasks: user.yml
tags: always
- include_tasks: directories.yml
tags: always
- include_tasks: facts.yml
tags: always
- include_tasks: archive_pre.yml
when: wp_install_mode | default('none') == 'upgrade'
tags: always
- include_tasks: conf.yml
tags: always
- include_tasks: install.yml
tags: always
- include_tasks: archive_post.yml
when: wp_install_mode | default('none') == 'upgrade'
tags: always

View File

@ -4,5 +4,4 @@
name: zabbix_agent_port
state: "{{ (zabbix_agent_src_ip | length > 0) | ternary('present', 'absent') }}"
rules: "-A INPUT -m state --state NEW -p tcp --dport {{ zabbix_agent_port }} -s {{ zabbix_agent_src_ip | join(',') }} -j ACCEPT"
when: iptables_manage | default(True)
tags: zabbix

View File

@ -1,12 +1,22 @@
---
- include: facts.yml
- include: install.yml
- include: install_{{ ansible_os_family }}.yml
- include: selinux.yml
- include_tasks: facts.yml
tags: always
- include_tasks: install.yml
tags: always
- include_tasks: install_{{ ansible_os_family }}.yml
tags: always
- include_tasks: selinux.yml
when: ansible_selinux.status == 'enabled'
- include: conf.yml
- include: psk.yml
- include: sensors.yml
- include: iptables.yml
- include: service.yml
tags: always
- include_tasks: conf.yml
tags: always
- include_tasks: psk.yml
tags: always
- include_tasks: sensors.yml
tags: always
- include_tasks: iptables.yml
when: iptables_manage | default(True)
tags: always
- include_tasks: service.yml
tags: always

View File

@ -1,5 +1,6 @@
---
- include: sensors_{{ ansible_os_family }}.yml
- include_tasks: sensors_{{ ansible_os_family }}.yml
tags: always
- name: Check if hardware sensors should be detected
stat: path=/etc/zabbix/sensors.ini

View File

@ -1,5 +1,8 @@
---
- include: install.yml
- include: conf.yml
- include: services.yml
- include_tasks: install.yml
tags: always
- include_tasks: conf.yml
tags: always
- include_tasks: services.yml
tags: always

View File

@ -4,5 +4,4 @@
name: zabbix_proxy_port
state: "{{ (zabbix_proxy_src_ip | length > 0) | ternary('present','absent') }}"
rules: "-A INPUT -m state --state NEW -p tcp --dport {{ zabbix_proxy_port | default('10051') }} -s {{ zabbix_proxy_src_ip | join(',') }} -j ACCEPT"
when: iptables_manage | default(True)
tags: zabbix

View File

@ -1,12 +1,20 @@
---
- include: install.yml
- include: directories.yml
- include: upgrade.yml
- include: psk.yml
- include: selinux.yml
- include_tasks: install.yml
tags: always
- include_tasks: directories.yml
tags: always
- include_tasks: upgrade.yml
tags: always
- include_tasks: psk.yml
tags: always
- include_tasks: selinux.yml
when: ansible_selinux.status == 'enabled'
- include: conf.yml
- include: iptables.yml
- include: service.yml
tags: always
- include_tasks: conf.yml
tags: always
- include_tasks: iptables.yml
when: iptables_manage | default(True)
- include_tasks: service.yml
tags: always

View File

@ -1,11 +1,18 @@
---
- include: facts.yml
- include: install.yml
- include: directories.yml
- include: selinux.yml
- include_tasks: facts.yml
tags: always
- include_tasks: install.yml
tags: always
- include_tasks: directories.yml
tags: always
- include_tasks: selinux.yml
when: ansible_selinux.status == 'enabled'
- include: conf.yml
- include: iptables.yml
- include: service.yml
tags: always
- include_tasks: conf.yml
tags: always
- include_tasks: iptables.yml
tags: always
- include_tasks: service.yml
tags: always

View File

@ -1,6 +1,7 @@
---
- include: install_{{ ansible_os_family }}.yml
- include_tasks: install_{{ ansible_os_family }}.yml
tags: always
- name: load ZFS
modprobe: name=zfs