Compare commits

...

20 commits

112 changed files with 955 additions and 605 deletions

2
.gitignore vendored
View file

@ -1,6 +1,6 @@
*.retry
*.sw*
.vagrant/
*.log
.idea/
.vscode/
venv/

39
Vagrantfile vendored
View file

@ -1,39 +0,0 @@
Vagrant.require_version ">= 2.0.0"
PORT = 19022
def provisioned?(vm="default", provider="virtualbox")
File.exist?(".vagrant/machines/#{vm}/#{provider}/action_provision")
end
Vagrant.configure(2) do |config|
config.vm.network :private_network, ip: "192.168.56.10"
config.vm.network :forwarded_port, guest: PORT, host: PORT
config.vm.box = "ubuntu/focal64"
config.vm.hostname = "datacoop"
config.vm.provider :virtualbox do |v|
v.cpus = 8
v.memory = 16384
end
config.vm.provision :ansible do |ansible|
ansible.compatibility_mode = "2.0"
ansible.playbook = "playbook.yml"
ansible.ask_vault_pass = true
ansible.verbose = "v"
# If the VM is already provisioned, we need to use the new port
if provisioned?
config.ssh.guest_port = PORT
ansible.extra_vars = {
ansible_port: PORT,
from_vagrant: true
}
else
ansible.extra_vars = {
from_vagrant: true
}
end
end
end

View file

@ -1,8 +1,8 @@
[defaults]
ask_vault_pass = True
inventory = datacoop_hosts
inventory = inventory.ini
interpreter_python = /usr/bin/python3
remote_user = root
remote_user = ansible
retry_files_enabled = True
use_persistent_connections = True
forks = 10

117
cloud-init/cloud.cfg Normal file
View file

@ -0,0 +1,117 @@
# cloud-config
# The top level settings are used as module
# and system configuration.
# A set of users which may be applied and/or used by various modules
# when a 'default' entry is found it will reference the 'default_user'
# from the distro configuration specified below
users:
- default
# If this is set, 'root' will not be able to ssh in and they
# will get a message to login instead as the default $user
disable_root: true
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
apt:
# This prevents cloud-init from rewriting apt's sources.list file,
# which has been a source of surprise.
preserve_sources_list: true
# If you use datasource_list array, keep array items in a single line.
# If you use multi line array, ds-identify script won't read array items.
# Example datasource config
# datasource:
# Ec2:
# metadata_urls: [ 'blah.com' ]
# timeout: 5 # (defaults to 50 seconds)
# max_wait: 10 # (defaults to 120 seconds)
# The modules that run in the 'init' stage
cloud_init_modules:
- migrator
- seed_random
- bootcmd
- write-files
- growpart
- resizefs
- disk_setup
- mounts
- set_hostname
- update_hostname
- update_etc_hosts
- ca-certs
- rsyslog
- users-groups
- ssh
# The modules that run in the 'config' stage
cloud_config_modules:
- snap
- ssh-import-id
- keyboard
- locale
- set-passwords
- grub-dpkg
- apt-pipelining
- apt-configure
- ntp
- timezone
- disable-ec2-metadata
- runcmd
- byobu
# The modules that run in the 'final' stage
cloud_final_modules:
- package-update-upgrade-install
- fan
- landscape
- lxd
- write-files-deferred
- puppet
- chef
- mcollective
- salt-minion
- reset_rmc
- refresh_rmc_and_interface
- rightscale_userdata
- scripts-vendor
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
- install-hotplug
- phone-home
- final-message
- power-state-change
# System and/or distro specific settings
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
distro: debian
# Default user name + that default users groups (if added/used)
default_user:
name: ansible
lock_passwd: True
gecos: Ansible User
groups: []
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash
# Other config here will be given to the distro class and/or path classes
paths:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
package_mirrors:
- arches: [default]
failsafe:
primary: https://deb.debian.org/debian
security: https://deb.debian.org/debian-security
ssh_svcname: ssh

View file

@ -1,5 +0,0 @@
[production]
hevonen.servers.data.coop ansible_port=19022
[monitoring]
uptime.data.coop

View file

@ -2,20 +2,15 @@
usage () {
{
echo "Usage: $0 [--vagrant]"
echo "Usage: $0 [--vagrant] base"
echo "Usage: $0 [--vagrant] users"
echo "Usage: $0 [--vagrant] services [SERVICE]"
echo "Usage: $0"
echo "Usage: $0 base"
echo "Usage: $0 users"
echo "Usage: $0 services [--deploy] [SERVICE]"
} >&2
}
BASE_CMD="ansible-playbook playbook.yml"
if [ "$1" = "--vagrant" ]; then
BASE_CMD="$BASE_CMD --verbose --inventory=vagrant_host"
VAGRANT_VAR="from_vagrant"
shift
fi
DEPLOY="false"
if [ -z "$(ansible-galaxy collection list community.general 2>/dev/null)" ]; then
echo "Installing community.general modules"
@ -28,19 +23,24 @@ if [ -z "$1" ]; then
else
case $1 in
"services")
if [ -n "$2" && "$2" = "--deploy" ]; then
DEPLOY="true"
shift
fi
if [ -z "$2" ]; then
echo "Deploying all services!"
eval "$BASE_CMD --tags setup_services $(test -z "$VAGRANT_VAR" || printf '%s' "$VAGRANT_VAR=true")"
$BASE_CMD --tags setup_services --extra-vars "deploy_services=$DEPLOY"
else
echo "Deploying service: $2"
$BASE_CMD --tags setup_services --extra-vars '{"single_service": "'"$2"'"'"$(test -z "$VAGRANT_VAR" || printf '%s' ', "'"$VAGRANT_VAR"'": true')"'}'
$BASE_CMD --tags setup_services --extra-vars "deploy_services=$DEPLOY" --extra-vars "single_service=$2"
fi
;;
"base")
eval "$BASE_CMD --tags base_only $(test -z "$VAGRANT_VAR" || printf '%s' "$VAGRANT_VAR=true")"
$BASE_CMD --tags base_only
;;
"users")
eval "$BASE_CMD --tags setup-users $(test -z "$VAGRANT_VAR" || printf '%s' "$VAGRANT_VAR=true")"
$BASE_CMD --tags setup-users
;;
*)
usage

View file

@ -1,24 +1,17 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
users:
- name: graffen
comment: Jesper Hess Nielsen
password: '!'
- name: ansible
comment: Ansible User
password_lock: true
groups: []
ssh_keys: []
- name: valberg
comment: Vidir Valberg Gudmundsson
password: $6$qt3G.E.CxhC$OwBDn4rZUbCz06HLEMBHjgvKjxiv/eeerbklTHi.gpHIn1OejzX3k2.0NM0Dforaw6Yn5Y8Cgn8kL2FdbQLZ3/
groups:
- sudo
ssh_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUmGeHc6QXDcJHkmVxbTUv04Q3vs20avquoGr6eOkkvYbcgjuFnBOOtvs2Nul1odcvvnHa1nN7DfL8XJamiwsB1B/xe2seaNS1axgwk9XowlVN9pgga8gsC+4gZWBtSObG2GR8n4NtPENzPmW5deNn8dRpTvULPMxZ0VRE9yNQOx8v8w85yYh+vxbbkWGVDYJU23yuJI50U9y6bXxNHinsACDFBeR/giXDlw29TaOaSxz0R6zrRPBoX+V68RyWwBL+KWQKtX2ULtJI40S98Ohd6p41bIxYHCBS/zroqNne8PjYOLcHHsjHUGfTvhcS5a3zdz/iHsvsaOOjFjsydAXH valberg
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4FRrbTpxwGdlF6RVi/thJaMlaEE0Z9YCQA4Y+KnHbBoVWMjzgbIkSWw3MM+E/iiVnix8SFh4tjDSdFjb8lCvHt/PqhMFhZJ02vhVgSwyU+Ji5ur23i202LB9ua54NLN4kNG8K47U0tKi2/EV6LWl2QdRviAcOUctz6u9XDkkMLUgPEYH384XSTRRj4GJ8+0LRzB2rXqetH3gBe9v1vlv0ETYWvzTnpfZUxcrrqEGtXV9Wa0BZoWLos2oKOsYVjNdLZMoFpmyBxPnqzAi1hr7beblFZKqBkvD7XA9RnERbZn1nxkWufVahppPjKQ+se3esWJCp6ri/vNP4WNKY3hiIoekBLbpvGcP1Te7cAIQXiZOilN92NKKYrzN2gAtsxgqGZw7lI1PE71luGdPir2Evl6hPj6/nnNdEHZWgcmBSPy17uCpVvZYBcDDzj8L3hbkLVQ3kcLZTz6I8BXvuGqoeLvRQpBtn5EaLpCCOmXuKqm+dzHzsOIwh+SA5NA8M3P0=
- name: reynir
comment: Reynir Björnsson
password: $6$MiPv.ZFlWnLHGNOb$jdQD9NaPMRUGaP2YHRJNwrMPBGl9qwK0HFhI6x51Xpn7hdzuC4GIwvOw1DJK33sNs/gGP5bWB0izviXkDcq7B0
password_lock: false
groups:
- sudo
ssh_keys:
@ -28,8 +21,19 @@ users:
- name: samsapti
comment: Sam Al-Sapti
password: $6$18dN367fG162hQ9A$Aqkf3O24Ve1btzh1PPOPg3uyydv/AQYUxethcoB4klotebJq3/XsydYT7XBuarxfDccVwyPTMlsP3U8VfQpG60
password_lock: false
groups:
- sudo
ssh_keys:
- sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIFWZGLov8wPBNxuvnaPK+8vv6wK5hHUVEFzXKsN9QeuBAAAADHNzaDpzYW1zYXB0aQ== ssh:samsapti
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPd/4fQV7CL8/KVwbo/phiV5UdXFBIDlkZ+ps8C7FeRf cardno:14 336 332
- name: valberg
comment: Vidir Valberg Gudmundsson
password: $6$qt3G.E.CxhC$OwBDn4rZUbCz06HLEMBHjgvKjxiv/eeerbklTHi.gpHIn1OejzX3k2.0NM0Dforaw6Yn5Y8Cgn8kL2FdbQLZ3/
password_lock: false
groups:
- sudo
ssh_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUmGeHc6QXDcJHkmVxbTUv04Q3vs20avquoGr6eOkkvYbcgjuFnBOOtvs2Nul1odcvvnHa1nN7DfL8XJamiwsB1B/xe2seaNS1axgwk9XowlVN9pgga8gsC+4gZWBtSObG2GR8n4NtPENzPmW5deNn8dRpTvULPMxZ0VRE9yNQOx8v8w85yYh+vxbbkWGVDYJU23yuJI50U9y6bXxNHinsACDFBeR/giXDlw29TaOaSxz0R6zrRPBoX+V68RyWwBL+KWQKtX2ULtJI40S98Ohd6p41bIxYHCBS/zroqNne8PjYOLcHHsjHUGfTvhcS5a3zdz/iHsvsaOOjFjsydAXH valberg
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4FRrbTpxwGdlF6RVi/thJaMlaEE0Z9YCQA4Y+KnHbBoVWMjzgbIkSWw3MM+E/iiVnix8SFh4tjDSdFjb8lCvHt/PqhMFhZJ02vhVgSwyU+Ji5ur23i202LB9ua54NLN4kNG8K47U0tKi2/EV6LWl2QdRviAcOUctz6u9XDkkMLUgPEYH384XSTRRj4GJ8+0LRzB2rXqetH3gBe9v1vlv0ETYWvzTnpfZUxcrrqEGtXV9Wa0BZoWLos2oKOsYVjNdLZMoFpmyBxPnqzAi1hr7beblFZKqBkvD7XA9RnERbZn1nxkWufVahppPjKQ+se3esWJCp6ri/vNP4WNKY3hiIoekBLbpvGcP1Te7cAIQXiZOilN92NKKYrzN2gAtsxgqGZw7lI1PE71luGdPir2Evl6hPj6/nnNdEHZWgcmBSPy17uCpVvZYBcDDzj8L3hbkLVQ3kcLZTz6I8BXvuGqoeLvRQpBtn5EaLpCCOmXuKqm+dzHzsOIwh+SA5NA8M3P0=

View file

@ -0,0 +1,10 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
base_domain: data.coop
letsencrypt_email: admin@data.coop
services_include:
- nginx_proxy
- uptime_kuma
- watchtower

View file

@ -0,0 +1,13 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
base_domain: data.coop
letsencrypt_email: admin@data.coop
services_exclude:
- uptime_kuma
smtp_host: "postfix"
smtp_port: "587"
ldap_dn: "dc=data,dc=coop"

View file

@ -0,0 +1,13 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
base_domain: staging.data.coop
letsencrypt_email: admin@data.coop
services_exclude:
- uptime_kuma
smtp_host: "postfix"
smtp_port: "587"
ldap_dn: "dc=staging,dc=data,dc=coop"

8
host_vars/cavall.yml Normal file
View file

@ -0,0 +1,8 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 22

12
host_vars/folald.yml Normal file
View file

@ -0,0 +1,12 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
vm_host: cavall
vm_type: control
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.vm.{{ vm_host }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 19022
internal_ipv4: 10.2.1.5

11
host_vars/hestur.yml Normal file
View file

@ -0,0 +1,11 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
vm_host: cloud
vm_type: uptime
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.vm.{{ vm_host }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 22

12
host_vars/poltre.yml Normal file
View file

@ -0,0 +1,12 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
vm_host: cavall
vm_type: app
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.vm.{{ vm_host }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 19022
internal_ipv4: 10.2.1.2

12
host_vars/varsa.yml Normal file
View file

@ -0,0 +1,12 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
vm_host: cavall
vm_type: app
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.vm.{{ vm_host }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 19022
internal_ipv4: 10.2.1.3

23
inventory.ini Normal file
View file

@ -0,0 +1,23 @@
[proxmox]
cavall
[monitoring]
hestur
[production]
poltre
[staging]
varsa
[control]
folald
[virtual:children]
monitoring
production
staging
control
[physical:children]
proxmox

View file

@ -1,27 +0,0 @@
# vim: ft=yaml.ansible
---
- hosts: production
gather_facts: true
become: true
vars:
ldap_dn: "dc=data,dc=coop"
vagrant: "{{ from_vagrant is defined and from_vagrant }}"
letsencrypt_enabled: "{{ not vagrant }}"
base_domain: "{{ 'datacoop.devel' if vagrant else 'data.coop' }}"
letsencrypt_email: "admin@{{ base_domain }}"
smtp_host: "postfix"
smtp_port: "587"
services_exclude:
- uptime_kuma
tasks:
- import_role:
name: ubuntu_base
tags:
- base_only
- import_role:
name: docker

View file

@ -0,0 +1,67 @@
# network interface settings; autogenerated
# Please do NOT modify this file directly, unless you know what
# you're doing.
#
# If you want to manage parts of the network configuration manually,
# please utilize the 'source' or 'source-directory' directives to do
# so.
# PVE will preserve these directives, but will NOT read its network
# configuration from sourced files, so do not attempt to move any of
# the PVE managed interfaces into external files!
auto lo
iface lo inet loopback
auto eno1
iface eno1 inet manual
auto eno2
iface eno2 inet manual
iface eno3 inet manual
iface eno4 inet manual
auto bond0
iface bond0 inet manual
bond-slaves eno1 eno2
bond-miimon 100
bond-mode 802.3ad
bond-xmit-hash-policy layer2+3
auto vmbr0
iface vmbr0 inet static
address 85.209.118.134/28
gateway 85.209.118.129
bridge-ports bond0
bridge-stp off
bridge-fd 0
#Main bridge for public VMs
iface vmbr0 inet6 static
address 2a09:94c4:55d1:7680::86/64
gateway 2a09:94c4:55d1:7680::1
auto vmbr1
iface vmbr1 inet manual
address 10.2.1.1/24
bridge-ports none
bridge-stp off
bridge-fd 0
#Internal bridge for VMs
auto vmbr2
iface vmbr2 inet static
address 192.168.1.1/24
bridge-ports none
bridge-stp off
bridge-fd 0
#NAT bridge for VMs that need masquerading
post-up echo 1 > /proc/sys/net/ipv4/ip_forward
post-up iptables -t nat -A POSTROUTING -s '192.168.1.0/24' -o vmbr0 -j MASQUERADE
post-up iptables -t nat -A PREROUTING -i vmbr0 -p tcp --dport 19022 -j DNAT --to 192.168.1.144:19022
post-down iptables -t nat -D POSTROUTING -s '192.168.1.0/24' -o vmbr0 -j MASQUERADE
post-down iptables -t nat -D PREROUTING -i vmbr0 -p tcp --dport 19022 -j DNAT --to 192.168.1.144:19022
source /etc/network/interfaces.d/*

View file

@ -1,229 +1,6 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
volume_root_folder: "/docker-volumes"
volume_website_folder: "{{ volume_root_folder }}/websites"
services:
### Internal services ###
postfix:
domain: "smtp.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/postfix"
pre_deploy_tasks: true
version: "v3.6.1-alpine"
nginx_proxy:
volume_folder: "{{ volume_root_folder }}/nginx"
pre_deploy_tasks: true
version: "1.3-alpine"
acme_companion_version: "2.2"
openldap:
domain: "ldap.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/openldap"
pre_deploy_tasks: true
version: "1.5.0"
phpldapadmin_version: "0.9.0"
netdata:
domain: "netdata.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/netdata"
version: "v1"
portainer:
domain: "portainer.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/portainer"
version: "2.19.0"
keycloak:
domain: sso.{{ base_domain }}
volume_folder: "{{ volume_root_folder }}/keycloak"
version: "22.0"
postgres_version: "10"
allowed_sender_domain: true
restic:
volume_folder: "{{ volume_root_folder }}/restic"
pre_deploy_tasks: true
remote_user: dc-user
remote_domain: rynkeby.skovgaard.tel
host_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLGol2G+a87ssy0nu/STKBZSiGyhZhZKx/ujfe9IeFo
repository: restic
version: "1.7.0"
disabled_in_vagrant: true
# mail dance
domain: "noreply.{{ base_domain }}"
allowed_sender_domain: true
mail_from: "backup@noreply.{{ base_domain }}"
docker_registry:
domain: "docker.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/docker-registry"
pre_deploy_tasks: true
post_deploy_tasks: true
username: "docker"
password: "{{ docker_password }}"
version: "2"
### External services ###
nextcloud:
domain: "cloud.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/nextcloud"
pre_deploy_tasks: true
version: 28-apache
postgres_version: "10"
redis_version: 7-alpine
allowed_sender_domain: true
forgejo:
domain: "git.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/forgejo"
version: "7.0.5"
allowed_sender_domain: true
passit:
domain: "passit.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/passit"
version: stable
postgres_version: 15-alpine
allowed_sender_domain: true
matrix:
domain: "matrix.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/matrix"
pre_deploy_tasks: true
version: v1.114.0
postgres_version: 15-alpine
allowed_sender_domain: true
element:
domain: "element.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/element"
pre_deploy_tasks: true
version: v1.11.80
privatebin:
domain: "paste.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/privatebin"
pre_deploy_tasks: true
version: "20221009"
hedgedoc:
domain: "pad.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/hedgedoc"
pre_deploy_tasks: true
version: 1.9.9-alpine
postgres_version: 10-alpine
data_coop_website:
domain: "{{ base_domain }}"
www_domain: "www.{{ base_domain }}"
volume_folder: "{{ volume_website_folder }}/datacoop"
pre_deploy_tasks: true
version: stable
staging_domain: "staging.{{ base_domain }}"
staging_version: staging
slides_2022_website:
domain: "2022.slides.{{ base_domain }}"
volume_folder: "{{ volume_website_folder }}/slides-2022"
version: latest
fedi_dk_website:
domain: fedi.dk
volume_folder: "{{ volume_website_folder }}/fedidk"
version: latest
vhs_website:
domain: vhs.data.coop
volume_folder: "{{ volume_website_folder }}/vhs"
version: latest
cryptohagen_website:
domains:
- "cryptohagen.dk"
- "www.cryptohagen.dk"
volume_folder: "{{ volume_website_folder }}/cryptohagen"
ulovliglogning_website:
domains:
- "ulovliglogning.dk"
- "www.ulovliglogning.dk"
- "ulovlig-logning.dk"
- "www.ulovlig-logning.dk"
volume_folder: "{{ volume_website_folder }}/ulovliglogning"
cryptoaarhus_website:
domains:
- "cryptoaarhus.dk"
- "www.cryptoaarhus.dk"
volume_folder: "{{ volume_website_folder }}/cryptoaarhus"
drone:
domain: "drone.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/drone"
version: "1"
mailu:
domain: "mail.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/mailu"
pre_deploy_tasks: true
dns: 192.168.203.254
subnet: 192.168.203.0/24
version: "2.0"
postgres_version: 14-alpine
redis_version: alpine
mastodon:
domain: "social.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/mastodon"
pre_deploy_tasks: true
post_deploy_tasks: true
version: v4.2.10
postgres_version: 14-alpine
redis_version: 6-alpine
allowed_sender_domain: true
rallly:
domain: "when.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/rallly"
pre_deploy_tasks: true
version: "2"
postgres_version: 14-alpine
allowed_sender_domain: true
membersystem:
domain: "member.{{ base_domain }}"
django_admins: "Vidir:valberg@orn.li,Balder:benjaoming@data.coop"
volume_folder: "{{ volume_root_folder }}/membersystem"
version: latest
postgres_version: 13-alpine
allowed_sender_domain: true
writefreely:
domain: "write.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/writefreely"
pre_deploy_tasks: true
version: v0.15.0
mariadb_version: "11.2"
allowed_sender_domain: true
watchtower:
volume_folder: "{{ volume_root_folder }}/watchtower"
version: "1.5.3"
diun:
version: "4.28"
volume_folder: "{{ volume_root_folder }}/diun"
matrix_user: "@diun:data.coop"
matrix_room: "#datacoop-services-update:data.coop"
### Uptime monitoring ###
uptime_kuma:
domain: "uptime.{{ base_domain }}"
status_domain: "status.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/uptime_kuma"
pre_deploy_tasks: true
version: "latest"
services_exclude: []
services_include: "{{ services | dict2items | map(attribute='key') | list | difference(services_exclude) }}"
docker_rootless: false
docker_rootless_user: rootlessdocker
docker_rootless_user_uid: 1102

View file

@ -1,26 +0,0 @@
# vim: ft=yaml.ansible
---
- name: Create volume folder for service {{ service.name }}
file:
name: "{{ service.vars.volume_folder }}"
state: directory
- name: Upload Compose file for service {{ service.name }}
template:
src: compose-files/{{ service.name }}.yml.j2
dest: "{{ service.vars.volume_folder }}/docker-compose.yml"
owner: root
mode: u=rw,go=
- name: Run pre-deployment tasks for service {{ service.name }}
include_tasks: pre_deploy/{{ service.name }}.yml
when: service.vars.pre_deploy_tasks is defined and service.vars.pre_deploy_tasks
- name: Deploy Compose stack for service {{ service.name }}
command: docker compose up -d --remove-orphans --pull always
args:
chdir: "{{ service.vars.volume_folder }}"
- name: Run post-deployment tasks for service {{ service.name }}
include_tasks: post_deploy/{{ service.name }}.yml
when: service.vars.post_deploy_tasks is defined and service.vars.post_deploy_tasks

View file

@ -1,44 +1,114 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Add Docker PGP key
apt_key:
keyserver: pgp.mit.edu
id: 8D81803C0EBFCD88
- name: Add Docker apt PGP key
ansible.builtin.apt_key:
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
url: https://download.docker.com/linux/debian/gpg
state: present
- name: Add Docker apt repository
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu bionic stable
ansible.builtin.apt_repository:
filename: docker
repo: "deb [arch=amd64] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable"
state: present
update_cache: yes
update_cache: true
- name: Install Docker
apt:
name: "{{ pkgs }}"
state: present
vars:
pkgs:
ansible.builtin.apt:
name:
- containerd.io
- docker-ce
- docker-ce-cli
- docker-buildx-plugin
- docker-compose-plugin
state: present
- name: Create group for Docker socket
ansible.builtin.group:
name: docker
state: present
- name: Configure rootful Docker
when: not docker_rootless
block:
- name: Make sure Docker is running
ansible.builtin.service:
name: docker
enabled: true
state: started
- name: Configure cron job to prune unused Docker data weekly
cron:
ansible.builtin.cron:
name: Prune unused Docker data
cron_file: ansible_docker_prune
job: 'docker system prune -fa && docker volume prune -fa'
job: docker system prune -fa --volumes --filter "until=6h"
special_time: weekly
user: root
state: present
- name: Create folder structure for bind mounts
file:
name: "{{ item }}"
state: directory
loop:
- "{{ volume_root_folder }}"
- "{{ volume_website_folder }}"
- name: Configure rootless Docker
when: docker_rootless
block:
- name: Make sure rootful Docker is stopped and disabled
ansible.builtin.systemd_service:
name: docker
enabled: false
scope: system
state: stopped
- name: Set up services
import_tasks: services.yml
tags:
- setup_services
- name: Install packages needed by rootless Docker
ansible.builtin.apt:
name:
- docker-ce-rootless-extras
- uidmap
- dbus-user-session
- fuse-overlayfs
- slirp4netns
state: present
- name: Create user for rootless Docker
ansible.builtin.user:
name: "{{ docker_rootless_user }}"
uid: "{{ docker_rootless_user_uid }}"
comment: Rootless Docker User
groups:
- docker
state: present
- name: Enable lingering for Docker user
ansible.builtin.command:
cmd: loginctl enable-linger {{ docker_rootless_user }}
creates: /var/lib/systemd/linger/{{ docker_rootless_user }}
- name: Set DOCKER_HOST environment variable globally
ansible.builtin.lineinfile:
path: /etc/profile
regexp: '^export DOCKER_HOST='
line: export DOCKER_HOST=unix:///run/user/{{ docker_rootless_user_uid }}/docker.sock
state: present
- name: Run rootless Docker setup script
ansible.builtin.command:
cmd: dockerd-rootless-setuptool.sh install
creates: /home/{{ docker_rootless_user }}/.config/systemd/user/docker.service
become: true
become_user: "{{ docker_rootless_user }}"
- name: Make sure rootless Docker is running
ansible.builtin.systemd_service:
name: docker.service
enabled: true
scope: user
state: started
become: true
become_user: "{{ docker_rootless_user }}"
- name: Configure cron job to prune unused Docker data weekly
ansible.builtin.cron:
name: Prune unused Docker data
cron_file: ansible_docker_rootless_prune
job: docker --host unix:///run/user/{{ docker_rootless_user_uid }}/docker.sock system prune -fa --volumes --filter "until=6h"
special_time: weekly
user: "{{ docker_rootless_user }}"
state: present

View file

@ -1,19 +0,0 @@
# vim: ft=yaml.ansible
---
- name: Configure cron job to remove old Mastodon media daily
cron:
name: Clean Mastodon media data older than a week
cron_file: ansible_mastodon_clean_media
job: docker exec mastodon-web-1 tootctl media remove --days 7
special_time: daily
user: root
state: present
- name: Configure cron job to remove old Mastodon preview cards daily
cron:
name: Clean Mastodon preview card data older than two weeks
cron_file: ansible_mastodon_clean_preview_cards
job: docker exec mastodon-web-1 tootctl preview_cards remove --days 14
special_time: daily
user: root
state: present

View file

@ -0,0 +1,232 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
volume_root_folder: "/docker-volumes"
volume_website_folder: "{{ volume_root_folder }}/websites"
services:
### Internal services ###
postfix:
domain: "smtp.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/postfix"
pre_deploy_tasks: true
version: "v3.6.1-alpine"
nginx_proxy:
volume_folder: "{{ volume_root_folder }}/nginx"
pre_deploy_tasks: true
version: "1.3-alpine"
acme_companion_version: "2.2"
openldap:
domain: "ldap.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/openldap"
pre_deploy_tasks: true
version: "1.5.0"
phpldapadmin_version: "0.9.0"
netdata:
domain: "netdata.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/netdata"
version: "v1"
portainer:
domain: "portainer.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/portainer"
version: "2.19.0"
keycloak:
domain: sso.{{ base_domain }}
volume_folder: "{{ volume_root_folder }}/keycloak"
version: "22.0"
postgres_version: "10"
allowed_sender_domain: true
restic:
volume_folder: "{{ volume_root_folder }}/restic"
pre_deploy_tasks: true
remote_user: dc-user
remote_domain: rynkeby.skovgaard.tel
host_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLGol2G+a87ssy0nu/STKBZSiGyhZhZKx/ujfe9IeFo
repository: restic
version: "1.7.0"
disabled_in_vagrant: true
# mail dance
domain: "noreply.{{ base_domain }}"
allowed_sender_domain: true
mail_from: "backup@noreply.{{ base_domain }}"
docker_registry:
domain: "docker.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/docker-registry"
pre_deploy_tasks: true
post_deploy_tasks: true
username: "docker"
password: "{{ docker_password }}"
version: "2"
### External services ###
nextcloud:
domain: "cloud.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/nextcloud"
pre_deploy_tasks: true
version: 28-apache
postgres_version: "10"
redis_version: 7-alpine
allowed_sender_domain: true
forgejo:
domain: "git.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/forgejo"
version: "7.0.5"
allowed_sender_domain: true
passit:
domain: "passit.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/passit"
version: stable
postgres_version: 15-alpine
allowed_sender_domain: true
matrix:
domain: "matrix.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/matrix"
pre_deploy_tasks: true
version: v1.114.0
postgres_version: 15-alpine
allowed_sender_domain: true
element:
domain: "element.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/element"
pre_deploy_tasks: true
version: v1.11.80
privatebin:
domain: "paste.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/privatebin"
pre_deploy_tasks: true
version: "20221009"
hedgedoc:
domain: "pad.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/hedgedoc"
pre_deploy_tasks: true
version: 1.9.9-alpine
postgres_version: 10-alpine
data_coop_website:
domain: "{{ base_domain }}"
www_domain: "www.{{ base_domain }}"
volume_folder: "{{ volume_website_folder }}/datacoop"
pre_deploy_tasks: true
version: stable
staging_domain: "staging.{{ base_domain }}"
staging_version: staging
slides_2022_website:
domain: "2022.slides.{{ base_domain }}"
volume_folder: "{{ volume_website_folder }}/slides-2022"
version: latest
fedi_dk_website:
domain: fedi.dk
volume_folder: "{{ volume_website_folder }}/fedidk"
version: latest
vhs_website:
domain: vhs.data.coop
volume_folder: "{{ volume_website_folder }}/vhs"
version: latest
cryptohagen_website:
domains:
- "cryptohagen.dk"
- "www.cryptohagen.dk"
volume_folder: "{{ volume_website_folder }}/cryptohagen"
ulovliglogning_website:
domains:
- "ulovliglogning.dk"
- "www.ulovliglogning.dk"
- "ulovlig-logning.dk"
- "www.ulovlig-logning.dk"
volume_folder: "{{ volume_website_folder }}/ulovliglogning"
cryptoaarhus_website:
domains:
- "cryptoaarhus.dk"
- "www.cryptoaarhus.dk"
volume_folder: "{{ volume_website_folder }}/cryptoaarhus"
drone:
domain: "drone.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/drone"
version: "1"
mailu:
domain: "mail.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/mailu"
pre_deploy_tasks: true
dns: 192.168.203.254
subnet: 192.168.203.0/24
version: "2.0"
postgres_version: 14-alpine
redis_version: alpine
mastodon:
domain: "social.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/mastodon"
pre_deploy_tasks: true
post_deploy_tasks: true
version: v4.2.10
postgres_version: 14-alpine
redis_version: 6-alpine
allowed_sender_domain: true
rallly:
domain: "when.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/rallly"
pre_deploy_tasks: true
version: "2"
postgres_version: 14-alpine
allowed_sender_domain: true
membersystem:
domain: "member.{{ base_domain }}"
django_admins: "Vidir:valberg@orn.li,Balder:benjaoming@data.coop"
volume_folder: "{{ volume_root_folder }}/membersystem"
version: latest
postgres_version: 13-alpine
allowed_sender_domain: true
writefreely:
domain: "write.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/writefreely"
pre_deploy_tasks: true
version: v0.15.0
mariadb_version: "11.2"
allowed_sender_domain: true
watchtower:
volume_folder: "{{ volume_root_folder }}/watchtower"
version: "1.5.3"
diun:
version: "4.28"
volume_folder: "{{ volume_root_folder }}/diun"
matrix_user: "@diun:data.coop"
matrix_room: "#datacoop-services-update:data.coop"
### Uptime monitoring ###
uptime_kuma:
domain: "uptime.{{ base_domain }}"
status_domain: "status.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/uptime_kuma"
pre_deploy_tasks: true
version: "latest"
services_exclude:
- uptime_kuma
services_include: "{{ services | dict2items | map(attribute='key') | community.general.lists_difference(services_exclude) }}"

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: restart nginx
command: docker compose restart proxy

View file

@ -0,0 +1,30 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create volume folder for service '{{ service.name }}'
file:
name: "{{ service.vars.volume_folder }}"
state: directory
- name: Upload Compose file for service '{{ service.name }}'
template:
src: compose-files/{{ service.name }}.yml.j2
dest: "{{ service.vars.volume_folder }}/docker-compose.yml"
owner: root
mode: u=rw,go=
- name: Run pre-deployment tasks for service '{{ service.name }}'
ansible.builtin.include_tasks: pre_deploy/{{ service.name }}.yml
when: service.vars.pre_deploy_tasks is defined and service.vars.pre_deploy_tasks
- name: Deploy service '{{ service.name }}'
when: deploy_services is defined and deploy_services
block:
- name: Deploy Compose stack for service '{{ service.name }}'
ansible.builtin.command:
cmd: docker compose up -d --remove-orphans
chdir: "{{ service.vars.volume_folder }}"
- name: Run post-deployment tasks for service '{{ service.name }}'
ansible.builtin.include_tasks: post_deploy/{{ service.name }}.yml
when: service.vars.post_deploy_tasks is defined and service.vars.post_deploy_tasks

View file

@ -0,0 +1,15 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create folder structure for bind mounts
file:
name: "{{ item }}"
state: directory
loop:
- "{{ volume_root_folder }}"
- "{{ volume_website_folder }}"
- name: Set up services
import_tasks: services.yml
tags:
- setup_services

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Generate htpasswd file
shell: docker compose exec registry htpasswd -Bbn docker {{ docker_password }} > auth/htpasswd
@ -8,6 +9,6 @@
- name: log in to registry
docker_login:
registry: "{{ 'docker.data.coop' if vagrant else services.docker_registry.domain }}"
registry: docker.data.coop
username: docker
password: "{{ docker_password }}"

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Upload vhost config for root domain
copy:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolder
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:
@ -34,7 +35,7 @@
dest: "{{ services.mailu.volume_folder }}/certs/cert.pem"
state: hard
force: true
when: letsencrypt_enabled
ignore_errors: true
- name: Hard link to Let's Encrypt TLS key
file:
@ -42,4 +43,4 @@
dest: "{{ services.mailu.volume_folder }}/certs/key.pem"
state: hard
force: true
when: letsencrypt_enabled
ignore_errors: true

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolder for Mastodon data
file:
@ -43,3 +44,21 @@
copy:
src: mastodon/postgresql.conf
dest: "{{ services.mastodon.volume_folder }}/postgres_config/postgresql.conf"
- name: Configure cron job to remove old Mastodon media daily
ansible.builtin.cron:
name: Clean Mastodon media data older than a week
cron_file: ansible_mastodon_clean_media
job: docker compose -f {{ services.mastodon.volume_folder }}/docker-compose.yml exec web tootctl media remove --days 7
special_time: daily
user: root
state: present
- name: Configure cron job to remove old Mastodon preview cards daily
ansible.builtin.cron:
name: Clean Mastodon preview card data older than two weeks
cron_file: ansible_mastodon_clean_preview_cards
job: docker compose -f {{ services.mastodon.volume_folder }}/docker-compose.yml exec web tootctl preview_cards remove --days 14
special_time: daily
user: root
state: present

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Set up network for Postfix
docker_network:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolder
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create SSH directory
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolder for MariaDB data
file:

View file

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Set up external services network
docker_network:
@ -12,9 +13,7 @@
name: "{{ item }}"
vars: "{{ services[item] }}"
loop: "{{ services_include }}"
when: single_service is not defined and
(item.vars.disabled_in_vagrant is not defined or
not (item.vars.disabled_in_vagrant and vagrant))
when: single_service is not defined
- name: Deploy single service
include_tasks:
@ -23,6 +22,4 @@
service:
name: "{{ single_service }}"
vars: "{{ services[single_service] }}"
when: single_service is defined and single_service in services and
(services[single_service].disabled_in_vagrant is not defined or
not (services[single_service].disabled_in_vagrant and vagrant))
when: single_service is defined and single_service in services

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,5 +1,6 @@
# vim: ft=yaml.ansible
---
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.5"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
x-sidekiq: &sidekiq
image: tootsuite/mastodon:{{ services.mastodon.version }}
restart: always

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,3 +1,6 @@
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:
@ -19,7 +22,6 @@ services:
labels:
- com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy
{% if letsencrypt_enabled %}
acme:
image: nginxproxy/acme-companion:{{ services.nginx_proxy.acme_companion_version }}
restart: always
@ -31,7 +33,6 @@ services:
- /var/run/docker.sock:/var/run/docker.sock:ro
depends_on:
- proxy
{% endif %}
networks:
external_services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,12 +1,14 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:
backup:
image: mazzolino/restic:{{ services.restic.version }}
restart: always
hostname: {{ inventory_hostname_short }}
domainname: {{ inventory_hostname }}
hostname: {{ hostname }}
domainname: {{ fqdn }}
environment:
RUN_ON_STARTUP: false
BACKUP_CRON: "0 30 3 * * *"

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: '3.3'
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View file

@ -1,17 +0,0 @@
# vim: ft=yaml.ansible
---
- name: Install necessary packages via apt
apt:
name: "{{ packages }}"
vars:
packages:
- aptitude
- python3-pip
- apparmor
- haveged
- mosh
- name: Install Dell OpenManage
apt:
name: srvadmin-all
when: not vagrant and not skip_dell_apt_repo

View file

@ -1,20 +0,0 @@
# vim: ft=yaml.ansible
---
- name: Import dell apt signing key
apt_key:
id: "1285491434D8786F"
keyserver: "keyserver.ubuntu.com"
- name: Configure dell apt repo
apt_repository:
repo: "deb https://linux.dell.com/repo/community/openmanage/10101/focal focal main"
state: present
- name: Restrict dell apt repo"
copy:
dest: "/etc/apt/preferences.d/dell"
content: |
Explanation: Deny all packages from this repo that exist elsewhere
Package: *
Pin: origin "linux.dell.com"
Pin-Priority: 400

Some files were not shown because too many files have changed in this diff Show more