Compare commits

...

15 Commits

Author SHA1 Message Date
Sam A. 72387f3e79
Fix interfaces 2024-03-31 20:41:12 +02:00
Sam A. 200f11dc4d
Add /etc/network/interfaces from Proxmox 2024-03-31 19:59:59 +02:00
Sam A. 7476dba0e6
Refactor vm-common (old ubuntu_base) role 2024-03-31 19:31:27 +02:00
Sam A. 23735ac517
More stuff 2024-03-31 03:58:25 +02:00
Sam A. e6b9159e1b
Stuff 2024-03-31 00:09:35 +01:00
Sam A. fd80dfdba4
Add steps for rootless Docker 2024-03-31 00:08:06 +01:00
Sam A. 27ae28797f
Split Docker role into services and Docker + configure rootless Docker 2024-03-30 20:24:57 +01:00
Sam A. 54249980e9
Stuff 2024-03-30 19:16:29 +01:00
Sam A. b8bca56c76
Add cloud.cfg from template VM to Git 2024-03-30 01:55:41 +01:00
Sam A. b1e3ab1308
Cleanup 2024-03-29 23:06:31 +01:00
Sam A. ef891ced42
Remove Vagrant support and deploy services selectively 2024-03-29 23:03:16 +01:00
Sam A. ec4f107100
Fix comments 2024-03-29 21:54:14 +01:00
Sam A. 7b05d99c58
Merge branch 'main' into proxmox 2024-03-29 21:34:57 +01:00
Sam A. 3a53634dfa
wip 2024-03-01 21:52:27 +01:00
Sam A. a2b6301fad
Add hosts and move vars into var files 2024-03-01 21:30:18 +01:00
112 changed files with 950 additions and 599 deletions

2
.gitignore vendored
View File

@ -1,6 +1,6 @@
*.retry
*.sw*
.vagrant/
*.log
.idea/
.vscode/
venv/

39
Vagrantfile vendored
View File

@ -1,39 +0,0 @@
Vagrant.require_version ">= 2.0.0"
PORT = 19022
def provisioned?(vm="default", provider="virtualbox")
File.exist?(".vagrant/machines/#{vm}/#{provider}/action_provision")
end
Vagrant.configure(2) do |config|
config.vm.network :private_network, ip: "192.168.56.10"
config.vm.network :forwarded_port, guest: PORT, host: PORT
config.vm.box = "ubuntu/focal64"
config.vm.hostname = "datacoop"
config.vm.provider :virtualbox do |v|
v.cpus = 8
v.memory = 16384
end
config.vm.provision :ansible do |ansible|
ansible.compatibility_mode = "2.0"
ansible.playbook = "playbook.yml"
ansible.ask_vault_pass = true
ansible.verbose = "v"
# If the VM is already provisioned, we need to use the new port
if provisioned?
config.ssh.guest_port = PORT
ansible.extra_vars = {
ansible_port: PORT,
from_vagrant: true
}
else
ansible.extra_vars = {
from_vagrant: true
}
end
end
end

View File

@ -1,8 +1,8 @@
[defaults]
ask_vault_pass = True
inventory = datacoop_hosts
inventory = inventory.ini
interpreter_python = /usr/bin/python3
remote_user = root
remote_user = ansible
retry_files_enabled = True
use_persistent_connections = True
forks = 10

117
cloud-init/cloud.cfg Normal file
View File

@ -0,0 +1,117 @@
# cloud-config
# The top level settings are used as module
# and system configuration.
# A set of users which may be applied and/or used by various modules
# when a 'default' entry is found it will reference the 'default_user'
# from the distro configuration specified below
users:
- default
# If this is set, 'root' will not be able to ssh in and they
# will get a message to login instead as the default $user
disable_root: true
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
apt:
# This prevents cloud-init from rewriting apt's sources.list file,
# which has been a source of surprise.
preserve_sources_list: true
# If you use datasource_list array, keep array items in a single line.
# If you use multi line array, ds-identify script won't read array items.
# Example datasource config
# datasource:
# Ec2:
# metadata_urls: [ 'blah.com' ]
# timeout: 5 # (defaults to 50 seconds)
# max_wait: 10 # (defaults to 120 seconds)
# The modules that run in the 'init' stage
cloud_init_modules:
- migrator
- seed_random
- bootcmd
- write-files
- growpart
- resizefs
- disk_setup
- mounts
- set_hostname
- update_hostname
- update_etc_hosts
- ca-certs
- rsyslog
- users-groups
- ssh
# The modules that run in the 'config' stage
cloud_config_modules:
- snap
- ssh-import-id
- keyboard
- locale
- set-passwords
- grub-dpkg
- apt-pipelining
- apt-configure
- ntp
- timezone
- disable-ec2-metadata
- runcmd
- byobu
# The modules that run in the 'final' stage
cloud_final_modules:
- package-update-upgrade-install
- fan
- landscape
- lxd
- write-files-deferred
- puppet
- chef
- mcollective
- salt-minion
- reset_rmc
- refresh_rmc_and_interface
- rightscale_userdata
- scripts-vendor
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
- install-hotplug
- phone-home
- final-message
- power-state-change
# System and/or distro specific settings
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
distro: debian
# Default user name + that default users groups (if added/used)
default_user:
name: ansible
lock_passwd: True
gecos: Ansible User
groups: []
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash
# Other config here will be given to the distro class and/or path classes
paths:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
package_mirrors:
- arches: [default]
failsafe:
primary: https://deb.debian.org/debian
security: https://deb.debian.org/debian-security
ssh_svcname: ssh

View File

@ -1,5 +0,0 @@
[production]
hevonen.servers.data.coop ansible_port=19022
[monitoring]
uptime.data.coop

View File

@ -2,20 +2,15 @@
usage () {
{
echo "Usage: $0 [--vagrant]"
echo "Usage: $0 [--vagrant] base"
echo "Usage: $0 [--vagrant] users"
echo "Usage: $0 [--vagrant] services [SERVICE]"
echo "Usage: $0"
echo "Usage: $0 base"
echo "Usage: $0 users"
echo "Usage: $0 services [--deploy] [SERVICE]"
} >&2
}
BASE_CMD="ansible-playbook playbook.yml"
if [ "$1" = "--vagrant" ]; then
BASE_CMD="$BASE_CMD --verbose --inventory=vagrant_host"
VAGRANT_VAR="from_vagrant"
shift
fi
DEPLOY="false"
if [ -z "$(ansible-galaxy collection list community.general 2>/dev/null)" ]; then
echo "Installing community.general modules"
@ -28,19 +23,24 @@ if [ -z "$1" ]; then
else
case $1 in
"services")
if [ -n "$2" && "$2" = "--deploy" ]; then
DEPLOY="true"
shift
fi
if [ -z "$2" ]; then
echo "Deploying all services!"
eval "$BASE_CMD --tags setup_services $(test -z "$VAGRANT_VAR" || printf '%s' "$VAGRANT_VAR=true")"
$BASE_CMD --tags setup_services --extra-vars "deploy_services=$DEPLOY"
else
echo "Deploying service: $2"
$BASE_CMD --tags setup_services --extra-vars '{"single_service": "'"$2"'"'"$(test -z "$VAGRANT_VAR" || printf '%s' ', "'"$VAGRANT_VAR"'": true')"'}'
$BASE_CMD --tags setup_services --extra-vars "deploy_services=$DEPLOY" --extra-vars "single_service=$2"
fi
;;
"base")
eval "$BASE_CMD --tags base_only $(test -z "$VAGRANT_VAR" || printf '%s' "$VAGRANT_VAR=true")"
$BASE_CMD --tags base_only
;;
"users")
eval "$BASE_CMD --tags setup-users $(test -z "$VAGRANT_VAR" || printf '%s' "$VAGRANT_VAR=true")"
$BASE_CMD --tags setup-users
;;
*)
usage

View File

@ -1,24 +1,17 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
users:
- name: graffen
comment: Jesper Hess Nielsen
password: '!'
- name: ansible
comment: Ansible User
password_lock: true
groups: []
ssh_keys: []
- name: valberg
comment: Vidir Valberg Gudmundsson
password: $6$qt3G.E.CxhC$OwBDn4rZUbCz06HLEMBHjgvKjxiv/eeerbklTHi.gpHIn1OejzX3k2.0NM0Dforaw6Yn5Y8Cgn8kL2FdbQLZ3/
groups:
- sudo
ssh_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUmGeHc6QXDcJHkmVxbTUv04Q3vs20avquoGr6eOkkvYbcgjuFnBOOtvs2Nul1odcvvnHa1nN7DfL8XJamiwsB1B/xe2seaNS1axgwk9XowlVN9pgga8gsC+4gZWBtSObG2GR8n4NtPENzPmW5deNn8dRpTvULPMxZ0VRE9yNQOx8v8w85yYh+vxbbkWGVDYJU23yuJI50U9y6bXxNHinsACDFBeR/giXDlw29TaOaSxz0R6zrRPBoX+V68RyWwBL+KWQKtX2ULtJI40S98Ohd6p41bIxYHCBS/zroqNne8PjYOLcHHsjHUGfTvhcS5a3zdz/iHsvsaOOjFjsydAXH valberg
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4FRrbTpxwGdlF6RVi/thJaMlaEE0Z9YCQA4Y+KnHbBoVWMjzgbIkSWw3MM+E/iiVnix8SFh4tjDSdFjb8lCvHt/PqhMFhZJ02vhVgSwyU+Ji5ur23i202LB9ua54NLN4kNG8K47U0tKi2/EV6LWl2QdRviAcOUctz6u9XDkkMLUgPEYH384XSTRRj4GJ8+0LRzB2rXqetH3gBe9v1vlv0ETYWvzTnpfZUxcrrqEGtXV9Wa0BZoWLos2oKOsYVjNdLZMoFpmyBxPnqzAi1hr7beblFZKqBkvD7XA9RnERbZn1nxkWufVahppPjKQ+se3esWJCp6ri/vNP4WNKY3hiIoekBLbpvGcP1Te7cAIQXiZOilN92NKKYrzN2gAtsxgqGZw7lI1PE71luGdPir2Evl6hPj6/nnNdEHZWgcmBSPy17uCpVvZYBcDDzj8L3hbkLVQ3kcLZTz6I8BXvuGqoeLvRQpBtn5EaLpCCOmXuKqm+dzHzsOIwh+SA5NA8M3P0=
- name: reynir
comment: Reynir Björnsson
password: $6$MiPv.ZFlWnLHGNOb$jdQD9NaPMRUGaP2YHRJNwrMPBGl9qwK0HFhI6x51Xpn7hdzuC4GIwvOw1DJK33sNs/gGP5bWB0izviXkDcq7B0
password_lock: false
groups:
- sudo
ssh_keys:
@ -28,8 +21,19 @@ users:
- name: samsapti
comment: Sam Al-Sapti
password: $6$18dN367fG162hQ9A$Aqkf3O24Ve1btzh1PPOPg3uyydv/AQYUxethcoB4klotebJq3/XsydYT7XBuarxfDccVwyPTMlsP3U8VfQpG60
password_lock: false
groups:
- sudo
ssh_keys:
- sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIFWZGLov8wPBNxuvnaPK+8vv6wK5hHUVEFzXKsN9QeuBAAAADHNzaDpzYW1zYXB0aQ== ssh:samsapti
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPd/4fQV7CL8/KVwbo/phiV5UdXFBIDlkZ+ps8C7FeRf cardno:14 336 332
- name: valberg
comment: Vidir Valberg Gudmundsson
password: $6$qt3G.E.CxhC$OwBDn4rZUbCz06HLEMBHjgvKjxiv/eeerbklTHi.gpHIn1OejzX3k2.0NM0Dforaw6Yn5Y8Cgn8kL2FdbQLZ3/
password_lock: false
groups:
- sudo
ssh_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUmGeHc6QXDcJHkmVxbTUv04Q3vs20avquoGr6eOkkvYbcgjuFnBOOtvs2Nul1odcvvnHa1nN7DfL8XJamiwsB1B/xe2seaNS1axgwk9XowlVN9pgga8gsC+4gZWBtSObG2GR8n4NtPENzPmW5deNn8dRpTvULPMxZ0VRE9yNQOx8v8w85yYh+vxbbkWGVDYJU23yuJI50U9y6bXxNHinsACDFBeR/giXDlw29TaOaSxz0R6zrRPBoX+V68RyWwBL+KWQKtX2ULtJI40S98Ohd6p41bIxYHCBS/zroqNne8PjYOLcHHsjHUGfTvhcS5a3zdz/iHsvsaOOjFjsydAXH valberg
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC4FRrbTpxwGdlF6RVi/thJaMlaEE0Z9YCQA4Y+KnHbBoVWMjzgbIkSWw3MM+E/iiVnix8SFh4tjDSdFjb8lCvHt/PqhMFhZJ02vhVgSwyU+Ji5ur23i202LB9ua54NLN4kNG8K47U0tKi2/EV6LWl2QdRviAcOUctz6u9XDkkMLUgPEYH384XSTRRj4GJ8+0LRzB2rXqetH3gBe9v1vlv0ETYWvzTnpfZUxcrrqEGtXV9Wa0BZoWLos2oKOsYVjNdLZMoFpmyBxPnqzAi1hr7beblFZKqBkvD7XA9RnERbZn1nxkWufVahppPjKQ+se3esWJCp6ri/vNP4WNKY3hiIoekBLbpvGcP1Te7cAIQXiZOilN92NKKYrzN2gAtsxgqGZw7lI1PE71luGdPir2Evl6hPj6/nnNdEHZWgcmBSPy17uCpVvZYBcDDzj8L3hbkLVQ3kcLZTz6I8BXvuGqoeLvRQpBtn5EaLpCCOmXuKqm+dzHzsOIwh+SA5NA8M3P0=

View File

@ -0,0 +1,10 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
base_domain: data.coop
letsencrypt_email: admin@data.coop
services_include:
- nginx_proxy
- uptime_kuma
- watchtower

View File

@ -0,0 +1,13 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
base_domain: data.coop
letsencrypt_email: admin@data.coop
services_exclude:
- uptime_kuma
smtp_host: "postfix"
smtp_port: "587"
ldap_dn: "dc=data,dc=coop"

View File

@ -0,0 +1,13 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
base_domain: staging.data.coop
letsencrypt_email: admin@data.coop
services_exclude:
- uptime_kuma
smtp_host: "postfix"
smtp_port: "587"
ldap_dn: "dc=staging,dc=data,dc=coop"

8
host_vars/cavall.yml Normal file
View File

@ -0,0 +1,8 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 22

12
host_vars/folald.yml Normal file
View File

@ -0,0 +1,12 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.vm.{{ vm_host }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 19022
internal_ipv4: 10.2.1.5
vm_host: cavall
vm_type: control

11
host_vars/hestur.yml Normal file
View File

@ -0,0 +1,11 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.vm.{{ vm_host }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 22
vm_host: cloud
vm_type: uptime

12
host_vars/poltre.yml Normal file
View File

@ -0,0 +1,12 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.vm.{{ vm_host }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 19022
internal_ipv4: 10.2.1.2
vm_host: cavall
vm_type: app

12
host_vars/varsa.yml Normal file
View File

@ -0,0 +1,12 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
hostname: "{{ inventory_hostname }}"
fqdn: "{{ hostname }}.vm.{{ vm_host }}.servers.data.coop"
ansible_host: "{{ fqdn }}"
ansible_port: 19022
internal_ipv4: 10.2.1.3
vm_host: cavall
vm_type: app

22
inventory.ini Normal file
View File

@ -0,0 +1,22 @@
[proxmox]
cavall
[monitoring]
hestur
[production]
poltre
[staging]
varsa
[control]
folald
[virtual:children]
production
staging
control
[physical:children]
proxmox

View File

@ -1,27 +1,15 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- hosts: production
- hosts: all
gather_facts: true
become: true
vars:
ldap_dn: "dc=data,dc=coop"
vagrant: "{{ from_vagrant is defined and from_vagrant }}"
letsencrypt_enabled: "{{ not vagrant }}"
base_domain: "{{ 'datacoop.devel' if vagrant else 'data.coop' }}"
letsencrypt_email: "admin@{{ base_domain }}"
smtp_host: "postfix"
smtp_port: "587"
services_exclude:
- uptime_kuma
tasks:
- import_role:
name: ubuntu_base
tags:
- base_only
- import_role:
name: docker
roles:
- name: vm-common
tags: [base_only]
- name: zfs
tags: [zfs]
- name: docker
tags: [docker]
- name: services
tags: [services]

View File

@ -0,0 +1,65 @@
# network interface settings; autogenerated
# Please do NOT modify this file directly, unless you know what
# you're doing.
#
# If you want to manage parts of the network configuration manually,
# please utilize the 'source' or 'source-directory' directives to do
# so.
# PVE will preserve these directives, but will NOT read its network
# configuration from sourced files, so do not attempt to move any of
# the PVE managed interfaces into external files!
auto lo
iface lo inet loopback
auto eno1
iface eno1 inet manual
auto eno2
iface eno2 inet manual
iface eno3 inet manual
iface eno4 inet manual
auto bond0
iface bond0 inet manual
bond-slaves eno1 eno2
bond-miimon 100
bond-mode 802.3ad
bond-xmit-hash-policy layer2+3
auto vmbr0
iface vmbr0 inet static
address 85.209.118.134/28
gateway 85.209.118.129
bridge-ports bond0
bridge-stp off
bridge-fd 0
#Main bridge for public VMs
iface vmbr0 inet6 static
address 2a09:94c4:55d1:7680::86/64
gateway 2a09:94c4:55d1:7680::1
auto vmbr1
iface vmbr1 inet manual
address 10.2.1.1/24
bridge-ports none
bridge-stp off
bridge-fd 0
#Internal bridge for VMs
auto vmbr2
iface vmbr2 inet static
address 192.168.1.1/24
bridge-ports none
bridge-stp off
bridge-fd 0
#NAT bridge for VMs that need masquerading
post-up echo 1 > /proc/sys/net/ipv4/ip_forward
post-up iptables -t nat -A POSTROUTING -s '192.168.1.0/24' -o vmbr0 -j MASQUERADE
post-down iptables -t nat -D POSTROUTING -s '192.168.1.0/24' -o vmbr0 -j MASQUERADE
source /etc/network/interfaces.d/*

View File

@ -1,227 +1,6 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
volume_root_folder: "/docker-volumes"
volume_website_folder: "{{ volume_root_folder }}/websites"
services:
### Internal services ###
postfix:
domain: "smtp.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/postfix"
pre_deploy_tasks: true
version: "v3.6.1-alpine"
nginx_proxy:
volume_folder: "{{ volume_root_folder }}/nginx"
pre_deploy_tasks: true
version: "1.3-alpine"
acme_companion_version: "2.2"
openldap:
domain: "ldap.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/openldap"
pre_deploy_tasks: true
version: "1.5.0"
phpldapadmin_version: "0.9.0"
netdata:
domain: "netdata.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/netdata"
version: "v1"
portainer:
domain: "portainer.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/portainer"
version: "2.19.0"
keycloak:
domain: sso.{{ base_domain }}
volume_folder: "{{ volume_root_folder }}/keycloak"
version: "22.0"
postgres_version: "10"
allowed_sender_domain: true
restic:
volume_folder: "{{ volume_root_folder }}/restic"
pre_deploy_tasks: true
remote_user: dc-user
remote_domain: rynkeby.skovgaard.tel
host_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLGol2G+a87ssy0nu/STKBZSiGyhZhZKx/ujfe9IeFo
repository: restic
version: "1.7.0"
disabled_in_vagrant: true
# mail dance
domain: "noreply.{{ base_domain }}"
allowed_sender_domain: true
mail_from: "backup@noreply.{{ base_domain }}"
docker_registry:
domain: "docker.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/docker-registry"
pre_deploy_tasks: true
post_deploy_tasks: true
username: "docker"
password: "{{ docker_password }}"
version: "2"
### External services ###
nextcloud:
domain: "cloud.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/nextcloud"
pre_deploy_tasks: true
version: 28-apache
postgres_version: "10"
redis_version: 7-alpine
allowed_sender_domain: true
forgejo:
domain: "git.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/forgejo"
version: "1.21.8-0"
allowed_sender_domain: true
passit:
domain: "passit.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/passit"
version: stable
postgres_version: 15-alpine
allowed_sender_domain: true
matrix:
domain: "matrix.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/matrix"
pre_deploy_tasks: true
version: v1.98.0
postgres_version: 15-alpine
allowed_sender_domain: true
element:
domain: "element.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/element"
pre_deploy_tasks: true
version: v1.11.51
privatebin:
domain: "paste.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/privatebin"
pre_deploy_tasks: true
version: "20221009"
hedgedoc:
domain: "pad.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/hedgedoc"
pre_deploy_tasks: true
version: 1.9.9-alpine
postgres_version: 10-alpine
data_coop_website:
domain: "{{ base_domain }}"
www_domain: "www.{{ base_domain }}"
volume_folder: "{{ volume_website_folder }}/datacoop"
pre_deploy_tasks: true
version: stable
staging_domain: "staging.{{ base_domain }}"
staging_version: staging
slides_2022_website:
domain: "2022.slides.{{ base_domain }}"
volume_folder: "{{ volume_website_folder }}/slides-2022"
version: latest
fedi_dk_website:
domain: fedi.dk
volume_folder: "{{ volume_website_folder }}/fedidk"
version: latest
vhs_website:
domain: vhs.data.coop
volume_folder: "{{ volume_website_folder }}/vhs"
version: latest
cryptohagen_website:
domains:
- "cryptohagen.dk"
- "www.cryptohagen.dk"
volume_folder: "{{ volume_website_folder }}/cryptohagen"
ulovliglogning_website:
domains:
- "ulovliglogning.dk"
- "www.ulovliglogning.dk"
- "ulovlig-logning.dk"
- "www.ulovlig-logning.dk"
volume_folder: "{{ volume_website_folder }}/ulovliglogning"
cryptoaarhus_website:
domains:
- "cryptoaarhus.dk"
- "www.cryptoaarhus.dk"
volume_folder: "{{ volume_website_folder }}/cryptoaarhus"
drone:
domain: "drone.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/drone"
version: "1"
mailu:
domain: "mail.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/mailu"
pre_deploy_tasks: true
dns: 192.168.203.254
subnet: 192.168.203.0/24
version: "2.0"
postgres_version: 14-alpine
redis_version: alpine
mastodon:
domain: "social.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/mastodon"
pre_deploy_tasks: true
post_deploy_tasks: true
version: v4.2.8
postgres_version: 14-alpine
redis_version: 6-alpine
allowed_sender_domain: true
rallly:
domain: "when.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/rallly"
pre_deploy_tasks: true
version: "2"
postgres_version: 14-alpine
allowed_sender_domain: true
membersystem:
domain: "member.{{ base_domain }}"
django_admins: "Vidir:valberg@orn.li"
volume_folder: "{{ volume_root_folder }}/membersystem"
version: latest
postgres_version: 13-alpine
allowed_sender_domain: true
writefreely:
domain: "write.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/writefreely"
pre_deploy_tasks: true
version: v0.15.0
mariadb_version: "11.2"
allowed_sender_domain: true
watchtower:
volume_folder: "{{ volume_root_folder }}/watchtower"
version: "1.5.3"
diun:
version: "4.27"
volume_folder: "{{ volume_root_folder }}/diun"
### Uptime monitoring ###
uptime_kuma:
domain: "uptime.{{ base_domain }}"
status_domain: "status.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/uptime_kuma"
pre_deploy_tasks: true
version: "latest"
services_exclude: []
services_include: "{{ services | dict2items | map(attribute='key') | list | difference(services_exclude) }}"
docker_rootless: false
docker_rootless_user: rootlessdocker
docker_rootless_user_uid: 1102

View File

@ -1,26 +0,0 @@
# vim: ft=yaml.ansible
---
- name: Create volume folder for service {{ service.name }}
file:
name: "{{ service.vars.volume_folder }}"
state: directory
- name: Upload Compose file for service {{ service.name }}
template:
src: compose-files/{{ service.name }}.yml.j2
dest: "{{ service.vars.volume_folder }}/docker-compose.yml"
owner: root
mode: u=rw,go=
- name: Run pre-deployment tasks for service {{ service.name }}
include_tasks: pre_deploy/{{ service.name }}.yml
when: service.vars.pre_deploy_tasks is defined and service.vars.pre_deploy_tasks
- name: Deploy Compose stack for service {{ service.name }}
command: docker compose up -d --remove-orphans --pull always
args:
chdir: "{{ service.vars.volume_folder }}"
- name: Run post-deployment tasks for service {{ service.name }}
include_tasks: post_deploy/{{ service.name }}.yml
when: service.vars.post_deploy_tasks is defined and service.vars.post_deploy_tasks

View File

@ -1,44 +1,114 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Add Docker PGP key
apt_key:
keyserver: pgp.mit.edu
id: 8D81803C0EBFCD88
- name: Add Docker apt PGP key
ansible.builtin.apt_key:
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
url: https://download.docker.com/linux/debian/gpg
state: present
- name: Add Docker apt repository
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu bionic stable
ansible.builtin.apt_repository:
filename: docker
repo: "deb [arch=amd64] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable"
state: present
update_cache: yes
update_cache: true
- name: Install Docker
apt:
name: "{{ pkgs }}"
state: present
vars:
pkgs:
ansible.builtin.apt:
name:
- containerd.io
- docker-ce
- docker-ce-cli
- docker-buildx-plugin
- docker-compose-plugin
- name: Configure cron job to prune unused Docker data weekly
cron:
name: Prune unused Docker data
cron_file: ansible_docker_prune
job: 'docker system prune -fa && docker volume prune -fa'
special_time: weekly
user: root
state: present
- name: Create folder structure for bind mounts
file:
name: "{{ item }}"
state: directory
loop:
- "{{ volume_root_folder }}"
- "{{ volume_website_folder }}"
- name: Create group for Docker socket
ansible.builtin.group:
name: docker
state: present
- name: Set up services
import_tasks: services.yml
tags:
- setup_services
- name: Configure rootful Docker
when: not docker_rootless
block:
- name: Make sure Docker is running
ansible.builtin.service:
name: docker
enabled: true
state: started
- name: Configure cron job to prune unused Docker data weekly
ansible.builtin.cron:
name: Prune unused Docker data
cron_file: ansible_docker_prune
job: docker system prune -fa --volumes --filter "until=6h"
special_time: weekly
user: root
state: present
- name: Configure rootless Docker
when: docker_rootless
block:
- name: Make sure rootful Docker is stopped and disabled
ansible.builtin.systemd_service:
name: docker
enabled: false
scope: system
state: stopped
- name: Install packages needed by rootless Docker
ansible.builtin.apt:
name:
- docker-ce-rootless-extras
- uidmap
- dbus-user-session
- fuse-overlayfs
- slirp4netns
state: present
- name: Create user for rootless Docker
ansible.builtin.user:
name: "{{ docker_rootless_user }}"
uid: "{{ docker_rootless_user_uid }}"
comment: Rootless Docker User
groups:
- docker
state: present
- name: Enable lingering for Docker user
ansible.builtin.command:
cmd: loginctl enable-linger {{ docker_rootless_user }}
creates: /var/lib/systemd/linger/{{ docker_rootless_user }}
- name: Set DOCKER_HOST environment variable globally
ansible.builtin.lineinfile:
path: /etc/profile
regexp: '^export DOCKER_HOST='
line: export DOCKER_HOST=unix:///run/user/{{ docker_rootless_user_uid }}/docker.sock
state: present
- name: Run rootless Docker setup script
ansible.builtin.command:
cmd: dockerd-rootless-setuptool.sh install
creates: /home/{{ docker_rootless_user }}/.config/systemd/user/docker.service
become: true
become_user: "{{ docker_rootless_user }}"
- name: Make sure rootless Docker is running
ansible.builtin.systemd_service:
name: docker.service
enabled: true
scope: user
state: started
become: true
become_user: "{{ docker_rootless_user }}"
- name: Configure cron job to prune unused Docker data weekly
ansible.builtin.cron:
name: Prune unused Docker data
cron_file: ansible_docker_rootless_prune
job: docker --host unix:///run/user/{{ docker_rootless_user_uid }}/docker.sock system prune -fa --volumes --filter "until=6h"
special_time: weekly
user: "{{ docker_rootless_user }}"
state: present

View File

@ -1,19 +0,0 @@
# vim: ft=yaml.ansible
---
- name: Configure cron job to remove old Mastodon media daily
cron:
name: Clean Mastodon media data older than a week
cron_file: ansible_mastodon_clean_media
job: docker exec mastodon-web-1 tootctl media remove --days 7
special_time: daily
user: root
state: present
- name: Configure cron job to remove old Mastodon preview cards daily
cron:
name: Clean Mastodon preview card data older than two weeks
cron_file: ansible_mastodon_clean_preview_cards
job: docker exec mastodon-web-1 tootctl preview_cards remove --days 14
special_time: daily
user: root
state: present

View File

@ -0,0 +1,226 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
volume_root_folder: "/docker-volumes"
volume_website_folder: "{{ volume_root_folder }}/websites"
services:
### Internal services ###
postfix:
domain: "smtp.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/postfix"
pre_deploy_tasks: true
version: "v3.6.1-alpine"
nginx_proxy:
volume_folder: "{{ volume_root_folder }}/nginx"
pre_deploy_tasks: true
version: "1.3-alpine"
acme_companion_version: "2.2"
openldap:
domain: "ldap.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/openldap"
pre_deploy_tasks: true
version: "1.5.0"
phpldapadmin_version: "0.9.0"
netdata:
domain: "netdata.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/netdata"
version: "v1"
portainer:
domain: "portainer.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/portainer"
version: "2.19.0"
keycloak:
domain: sso.{{ base_domain }}
volume_folder: "{{ volume_root_folder }}/keycloak"
version: "22.0"
postgres_version: "10"
allowed_sender_domain: true
restic:
volume_folder: "{{ volume_root_folder }}/restic"
pre_deploy_tasks: true
remote_user: dc-user
remote_domain: rynkeby.skovgaard.tel
host_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLGol2G+a87ssy0nu/STKBZSiGyhZhZKx/ujfe9IeFo
repository: restic
version: "1.7.0"
# mail dance
domain: "noreply.{{ base_domain }}"
allowed_sender_domain: true
mail_from: "backup@noreply.{{ base_domain }}"
docker_registry:
domain: "docker.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/docker-registry"
pre_deploy_tasks: true
post_deploy_tasks: true
username: "docker"
password: "{{ docker_password }}"
version: "2"
### External services ###
nextcloud:
domain: "cloud.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/nextcloud"
pre_deploy_tasks: true
version: 28-apache
postgres_version: "10"
redis_version: 7-alpine
allowed_sender_domain: true
forgejo:
domain: "git.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/forgejo"
version: "1.21.8-0"
allowed_sender_domain: true
passit:
domain: "passit.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/passit"
version: stable
postgres_version: 15-alpine
allowed_sender_domain: true
matrix:
domain: "matrix.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/matrix"
pre_deploy_tasks: true
version: v1.98.0
postgres_version: 15-alpine
allowed_sender_domain: true
element:
domain: "element.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/element"
pre_deploy_tasks: true
version: v1.11.51
privatebin:
domain: "paste.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/privatebin"
pre_deploy_tasks: true
version: "20221009"
hedgedoc:
domain: "pad.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/hedgedoc"
pre_deploy_tasks: true
version: 1.9.9-alpine
postgres_version: 10-alpine
data_coop_website:
domain: "{{ base_domain }}"
www_domain: "www.{{ base_domain }}"
volume_folder: "{{ volume_website_folder }}/datacoop"
pre_deploy_tasks: true
version: stable
staging_domain: "staging.{{ base_domain }}"
staging_version: staging
slides_2022_website:
domain: "2022.slides.{{ base_domain }}"
volume_folder: "{{ volume_website_folder }}/slides-2022"
version: latest
fedi_dk_website:
domain: fedi.dk
volume_folder: "{{ volume_website_folder }}/fedidk"
version: latest
vhs_website:
domain: vhs.data.coop
volume_folder: "{{ volume_website_folder }}/vhs"
version: latest
cryptohagen_website:
domains:
- "cryptohagen.dk"
- "www.cryptohagen.dk"
volume_folder: "{{ volume_website_folder }}/cryptohagen"
ulovliglogning_website:
domains:
- "ulovliglogning.dk"
- "www.ulovliglogning.dk"
- "ulovlig-logning.dk"
- "www.ulovlig-logning.dk"
volume_folder: "{{ volume_website_folder }}/ulovliglogning"
cryptoaarhus_website:
domains:
- "cryptoaarhus.dk"
- "www.cryptoaarhus.dk"
volume_folder: "{{ volume_website_folder }}/cryptoaarhus"
drone:
domain: "drone.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/drone"
version: "1"
mailu:
domain: "mail.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/mailu"
pre_deploy_tasks: true
dns: 192.168.203.254
subnet: 192.168.203.0/24
version: "2.0"
postgres_version: 14-alpine
redis_version: alpine
mastodon:
domain: "social.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/mastodon"
pre_deploy_tasks: true
version: v4.2.8
postgres_version: 14-alpine
redis_version: 6-alpine
allowed_sender_domain: true
rallly:
domain: "when.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/rallly"
pre_deploy_tasks: true
version: "2"
postgres_version: 14-alpine
allowed_sender_domain: true
membersystem:
domain: "member.{{ base_domain }}"
django_admins: "Vidir:valberg@orn.li"
volume_folder: "{{ volume_root_folder }}/membersystem"
version: latest
postgres_version: 13-alpine
allowed_sender_domain: true
writefreely:
domain: "write.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/writefreely"
pre_deploy_tasks: true
version: v0.15.0
mariadb_version: "11.2"
allowed_sender_domain: true
watchtower:
volume_folder: "{{ volume_root_folder }}/watchtower"
version: "1.5.3"
diun:
version: "4.27"
volume_folder: "{{ volume_root_folder }}/diun"
### Uptime monitoring ###
uptime_kuma:
domain: "uptime.{{ base_domain }}"
status_domain: "status.{{ base_domain }}"
volume_folder: "{{ volume_root_folder }}/uptime_kuma"
pre_deploy_tasks: true
version: "latest"
services_exclude: []
services_include: "{{ services | dict2items | map(attribute='key') | list | difference(services_exclude) }}"

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: restart nginx
command: docker compose restart proxy

View File

@ -0,0 +1,30 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create volume folder for service '{{ service.name }}'
file:
name: "{{ service.vars.volume_folder }}"
state: directory
- name: Upload Compose file for service '{{ service.name }}'
template:
src: compose-files/{{ service.name }}.yml.j2
dest: "{{ service.vars.volume_folder }}/docker-compose.yml"
owner: root
mode: u=rw,go=
- name: Run pre-deployment tasks for service '{{ service.name }}'
ansible.builtin.include_tasks: pre_deploy/{{ service.name }}.yml
when: service.vars.pre_deploy_tasks is defined and service.vars.pre_deploy_tasks
- name: Deploy service '{{ service.name }}'
when: deploy_services is defined and deploy_services
block:
- name: Deploy Compose stack for service '{{ service.name }}'
ansible.builtin.command:
cmd: docker compose up -d --remove-orphans
chdir: "{{ service.vars.volume_folder }}"
- name: Run post-deployment tasks for service '{{ service.name }}'
ansible.builtin.include_tasks: post_deploy/{{ service.name }}.yml
when: service.vars.post_deploy_tasks is defined and service.vars.post_deploy_tasks

View File

@ -0,0 +1,15 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create folder structure for bind mounts
file:
name: "{{ item }}"
state: directory
loop:
- "{{ volume_root_folder }}"
- "{{ volume_website_folder }}"
- name: Set up services
import_tasks: services.yml
tags:
- setup_services

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Generate htpasswd file
shell: docker compose exec registry htpasswd -Bbn docker {{ docker_password }} > auth/htpasswd
@ -8,6 +9,6 @@
- name: log in to registry
docker_login:
registry: "{{ 'docker.data.coop' if vagrant else services.docker_registry.domain }}"
registry: docker.data.coop
username: docker
password: "{{ docker_password }}"

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Upload vhost config for root domain
copy:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolder
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:
@ -34,7 +35,6 @@
dest: "{{ services.mailu.volume_folder }}/certs/cert.pem"
state: hard
force: true
when: letsencrypt_enabled
- name: Hard link to Let's Encrypt TLS key
file:
@ -42,4 +42,3 @@
dest: "{{ services.mailu.volume_folder }}/certs/key.pem"
state: hard
force: true
when: letsencrypt_enabled

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolder for Mastodon data
file:
@ -43,3 +44,21 @@
copy:
src: mastodon/postgresql.conf
dest: "{{ services.mastodon.volume_folder }}/postgres_config/postgresql.conf"
- name: Configure cron job to remove old Mastodon media daily
ansible.builtin.cron:
name: Clean Mastodon media data older than a week
cron_file: ansible_mastodon_clean_media
job: docker compose -f {{ services.mastodon.volume_folder }}/docker-compose.yml exec web tootctl media remove --days 7
special_time: daily
user: root
state: present
- name: Configure cron job to remove old Mastodon preview cards daily
ansible.builtin.cron:
name: Clean Mastodon preview card data older than two weeks
cron_file: ansible_mastodon_clean_preview_cards
job: docker compose -f {{ services.mastodon.volume_folder }}/docker-compose.yml exec web tootctl preview_cards remove --days 14
special_time: daily
user: root
state: present

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Set up network for Postfix
docker_network:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolders
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolder
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create SSH directory
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Create subfolder for MariaDB data
file:

View File

@ -1,4 +1,5 @@
# vim: ft=yaml.ansible
# code: language=ansible
---
- name: Set up external services network
docker_network:
@ -12,9 +13,7 @@
name: "{{ item }}"
vars: "{{ services[item] }}"
loop: "{{ services_include }}"
when: single_service is not defined and
(item.vars.disabled_in_vagrant is not defined or
not (item.vars.disabled_in_vagrant and vagrant))
when: single_service is not defined
- name: Deploy single service
include_tasks:
@ -23,6 +22,4 @@
service:
name: "{{ single_service }}"
vars: "{{ services[single_service] }}"
when: single_service is defined and single_service in services and
(services[single_service].disabled_in_vagrant is not defined or
not (services[single_service].disabled_in_vagrant and vagrant))
when: single_service is defined and single_service in services

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,5 +1,6 @@
# vim: ft=yaml.ansible
---
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.5"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
x-sidekiq: &sidekiq
image: tootsuite/mastodon:{{ services.mastodon.version }}
restart: always

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,3 +1,6 @@
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:
@ -19,7 +22,6 @@ services:
labels:
- com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy
{% if letsencrypt_enabled %}
acme:
image: nginxproxy/acme-companion:{{ services.nginx_proxy.acme_companion_version }}
restart: always
@ -31,7 +33,6 @@ services:
- /var/run/docker.sock:/var/run/docker.sock:ro
depends_on:
- proxy
{% endif %}
networks:
external_services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,12 +1,14 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:
backup:
image: mazzolino/restic:{{ services.restic.version }}
restart: always
hostname: {{ inventory_hostname_short }}
domainname: {{ inventory_hostname }}
hostname: {{ hostname }}
domainname: {{ fqdn }}
environment:
RUN_ON_STARTUP: false
BACKUP_CRON: "0 30 3 * * *"

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: '3.3'
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,4 +1,6 @@
# vim: ft=yaml.docker-compose
{# code: language=ansible-jinja #}
# THIS FILE IS MANAGED BY ANSIBLE
version: "3.8"
services:

View File

@ -1,17 +0,0 @@
# vim: ft=yaml.ansible
---
- name: Install necessary packages via apt
apt:
name: "{{ packages }}"
vars:
packages:
- aptitude
- python3-pip
- apparmor
- haveged
- mosh
- name: Install Dell OpenManage
apt:
name: srvadmin-all
when: not vagrant and not skip_dell_apt_repo

View File

@ -1,20 +0,0 @@
# vim: ft=yaml.ansible
---
- name: Import dell apt signing key
apt_key:
id: "1285491434D8786F"
keyserver: "keyserver.ubuntu.com"
- name: Configure dell apt repo
apt_repository:
repo: "deb https://linux.dell.com/repo/community/openmanage/10101/focal focal main"
state: present
- name: Restrict dell apt repo"
copy:
dest: "/etc/apt/preferences.d/dell"
content: |
Explanation: Deny all packages from this repo that exist elsewhere
Package: *
Pin: origin "linux.dell.com"
Pin-Priority: 400

Some files were not shown because too many files have changed in this diff Show More