--- - include_tasks: facts.yml tags: always - name: Install tools apt: name: - pigz - ksm-control-daemon - openvswitch-switch - ethtool - patch tags: pve - name: Deploy vzdump config template: src=vzdump.conf.j2 dest=/etc/vzdump.conf tags: pve - name: Deploy ksm configuration template: src=ksmtuned.conf.j2 dest=/etc/ksmtuned.conf notify: restart ksmtuned tags: pve - name: Handle ksm services service: name=ksmtuned state={{ pve_ksm | ternary('started','stopped') }} enabled={{ pve_ksm | ternary(True,False) }} tags: pve - name: Configure modules to load copy: content={{ pve_mod_to_load | join("\n") }} dest=/etc/modules-load.d/firewall.conf register: pve_modules tags: pve - name: Load modules service: name=systemd-modules-load state=restarted when: pve_modules.changed tags: pve - name: Check proxmox cluster status command: pvesh get /cluster/status --output-format=json register: pve_cluster_status_1 ignore_errors: True changed_when: False tags: pve - name: Parse proxmox cluster status set_fact: pve_cluster={{ pve_cluster_status_1.stdout | from_json }} when: pve_cluster_status_1.rc == 0 tags: pve - name: Check proxmox cluster status (old pvesh) command: pvesh get /cluster/status when: pve_cluster_status_1.rc != 0 register: pve_cluster_status_2 changed_when: False tags: pve - name: Parse proxmox cluster status (old pvesh) set_fact: pve_cluster={{ pve_cluster_status_2.stdout | from_json }} when: pve_cluster_status_1.rc != 0 tags: pve - name: Deploy the unlock_dev script copy: src=unlock_dev dest=/usr/local/bin/unlock_dev mode=755 tags: pve - name: Check if the old hookd daemon is installed stat: path=/usr/local/bin/pve-hookd register: pve_old_hookd tags: pve - name: Stop the old hookd daemon service: name=pve-hookd state=stopped when: pve_old_hookd.stat.exists tags: pve - name: Remove the old hook daemon file: path={{ item }} state=absent loop: - /usr/local/bin/pve-hookd - /etc/hooks - /etc/systemd/system/pve-hookd.service - /etc/tmpfiles.d/pve-container-hooks.conf - /etc/systemd/system/pve-container@.service.d/pve-container-hooks.conf - /var/run/lxc/active tags: pve - name: Reload systemd command: systemctl daemon-reload when: pve_old_hookd.stat.exists tags: pve - include_tasks: pve_online.yml when: pve_online == True tags: always - name: Create backup dir file: path=/home/lbkp/pve state=directory tags: pve - name: Install pre and post backup scripts copy: src={{ item.src }} dest=/etc/backup/{{ item.type }}.d/{{ item.src }} mode=755 loop: - src: pve_dump type: pre - src: pve_rm_dump type: post tags: pve - name: Remove registration nag patch: src=remove_nag.patch dest=/usr/share/perl5/PVE/API2/Subscription.pm ignore_errors: True # Don't fail on old PVE where the patch doesn't apply notify: restart pveproxy tags: pve - name: Rise limits for containers pam_limits: domain: '*' limit_type: "{{ item.type }}" limit_item: nofile value: "{{ item.value }}" loop: - type: soft value: 65000 - type: hard value: 65535 tags: pve - name: Rise inotify instances sysctl: name: fs.inotify.max_user_instances value: 1024 sysctl_file: /etc/sysctl.d/ansible.conf tags: pve - name: Ensure dehydrated hook dir exists file: path=/etc/dehydrated/hooks_deploy_cert.d/ state=directory tags: pve,ssl - name: Deploy dehydrated hook template: src=dehydrated_hook.sh.j2 dest=/etc/dehydrated/hooks_deploy_cert.d/20pve.sh mode=755 tags: pve,ssl # See https://bugzilla.proxmox.com/show_bug.cgi?id=2326 why - name: Create corosync override directory file: path=/etc/systemd/system/corosync.service.d/ state=directory tags: pve - name: Setup corosync to be restarted in case of failure copy: content: | [Service] Restart=on-failure RestartSec=1 dest: /etc/systemd/system/corosync.service.d/ansible.conf register: pve_corosync_unit tags: pve - name: Reload systemd systemd: daemon_reload=True when: pve_corosync_unit.changed tags: pve - include_tasks: zabbix.yml tags: always - include_tasks: filebeat.yml tags: always