Initial commit
This commit is contained in:
commit
5cae5344ab
120
.ansible-lint
Normal file
120
.ansible-lint
Normal file
|
@ -0,0 +1,120 @@
|
|||
---
|
||||
# .ansible-lint
|
||||
|
||||
profile: null # min, basic, moderate,safety, shared, production
|
||||
|
||||
# exclude_paths included in this file are parsed relative to this file's location
|
||||
# and not relative to the CWD of execution. CLI arguments passed to the --exclude
|
||||
# option are parsed relative to the CWD of execution.
|
||||
exclude_paths:
|
||||
- .cache/ # implicit unless exclude_paths is defined in config
|
||||
- .github/
|
||||
- test/fixtures/formatting-before/
|
||||
- test/fixtures/formatting-prettier/
|
||||
# parseable: true
|
||||
# quiet: true
|
||||
# strict: true
|
||||
# verbosity: 1
|
||||
|
||||
# Mock modules or roles in order to pass ansible-playbook --syntax-check
|
||||
mock_modules:
|
||||
- zuul_return
|
||||
# note the foo.bar is invalid as being neither a module or a collection
|
||||
- fake_namespace.fake_collection.fake_module
|
||||
- fake_namespace.fake_collection.fake_module.fake_submodule
|
||||
mock_roles:
|
||||
- mocked_role
|
||||
- author.role_name # old standalone galaxy role
|
||||
- fake_namespace.fake_collection.fake_role # role within a collection
|
||||
|
||||
# Enable checking of loop variable prefixes in roles
|
||||
# loop_var_prefix: "{role}_"
|
||||
|
||||
# Enforce variable names to follow pattern below, in addition to Ansible own
|
||||
# requirements, like avoiding python identifiers. To disable add `var-naming`
|
||||
# to skip_list.
|
||||
# var_naming_pattern: "^[a-z_][a-z0-9_]*$"
|
||||
|
||||
use_default_rules: true
|
||||
# Load custom rules from this specific folder
|
||||
# rulesdir:
|
||||
# - ./rule/directory/
|
||||
|
||||
# Ansible-lint completely ignores rules or tags listed below
|
||||
# skip_list:
|
||||
# - skip_this_tag
|
||||
|
||||
# Ansible-lint does not automatically load rules that have the 'opt-in' tag.
|
||||
# You must enable opt-in rules by listing each rule 'id' below.
|
||||
enable_list:
|
||||
- empty-string-compare # opt-in
|
||||
- no-log-password # opt-in
|
||||
- no-same-owner # opt-in
|
||||
# add yaml here if you want to avoid ignoring yaml checks when yamllint
|
||||
# library is missing. Normally its absence just skips using that rule.
|
||||
- yaml
|
||||
# Report only a subset of tags and fully ignore any others
|
||||
# tags:
|
||||
# - jinja[spacing]
|
||||
|
||||
# Ansible-lint does not fail on warnings from the rules or tags listed below
|
||||
warn_list:
|
||||
- skip_this_tag
|
||||
- experimental # experimental is included in the implicit list
|
||||
- no-changed-when
|
||||
- latest[git]
|
||||
- var-naming[no-role-prefix]
|
||||
# - role-name
|
||||
# - yaml[document-start] # you can also use sub-rule matches
|
||||
|
||||
# skip_list:
|
||||
# - fqcn[action-core]
|
||||
# - fqcn[action]
|
||||
|
||||
# Some rules can transform files to fix (or make it easier to fix) identified
|
||||
# errors. `ansible-lint --write` will reformat YAML files and run these transforms.
|
||||
# By default it will run all transforms (effectively `write_list: ["all"]`).
|
||||
# You can disable running transforms by setting `write_list: ["none"]`.
|
||||
# Or only enable a subset of rule transforms by listing rules/tags here.
|
||||
# write_list:
|
||||
# - all
|
||||
|
||||
# Offline mode disables installation of requirements.yml
|
||||
offline: false
|
||||
|
||||
# Return success if number of violations compared with previous git
|
||||
# commit has not increased. This feature works only in git
|
||||
# repositories.
|
||||
progressive: false
|
||||
|
||||
# Define required Ansible's variables to satisfy syntax check
|
||||
extra_vars:
|
||||
foo: bar
|
||||
multiline_string_variable: |
|
||||
line1
|
||||
line2
|
||||
complex_variable: ":{;\t$()"
|
||||
|
||||
# Uncomment to enforce action validation with tasks, usually is not
|
||||
# needed as Ansible syntax check also covers it.
|
||||
# skip_action_validation: false
|
||||
|
||||
# List of additional kind:pattern to be added at the top of the default
|
||||
# match list, first match determines the file kind.
|
||||
kinds:
|
||||
# - playbook: "**/examples/*.{yml,yaml}"
|
||||
# - galaxy: "**/folder/galaxy.yml"
|
||||
# - tasks: "**/tasks/*.yml"
|
||||
# - vars: "**/vars/*.yml"
|
||||
# - meta: "**/meta/main.yml"
|
||||
- yaml: "**/*.yaml-too"
|
||||
|
||||
# List of additional collections to allow in only-builtins rule.
|
||||
# only_builtins_allow_collections:
|
||||
# - example_ns.example_collection
|
||||
|
||||
# List of additions modules to allow in only-builtins rule.
|
||||
# only_builtins_allow_modules:
|
||||
# - example_module
|
||||
|
||||
# vim:ft=yaml
|
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
# ---> Ansible
|
||||
*.retry
|
5
ansible.cfg
Normal file
5
ansible.cfg
Normal file
|
@ -0,0 +1,5 @@
|
|||
[defaults]
|
||||
remote_user = ubuntu
|
||||
remote_tmp = /tmp/.ansible
|
||||
inventory = inventory
|
||||
use_persistent_connections = true
|
3
collections/requirements.yml
Normal file
3
collections/requirements.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
collections:
|
||||
- name: community.general
|
||||
version: '>=7.5.0'
|
54
group_vars/all/secrets.yml
Normal file
54
group_vars/all/secrets.yml
Normal file
|
@ -0,0 +1,54 @@
|
|||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65653764303436313934646462396636636335303334636532306438613635333362313530323731
|
||||
3236383962303039393238646362626665613463666335610a353261343163663934353366656630
|
||||
65356562616661326535626238373635366233326366343631386165653735373637656330343735
|
||||
3330376331306633300a393530313436653737366630336165653839616437626531346331653466
|
||||
35626530373932303962333933626265326166656136623139666533643934653666383436383838
|
||||
35643430383763323038646461313563373462353736376137323230613338613430303763376164
|
||||
32633833653236323561643636373461353932663232663561636164383361663361346263376436
|
||||
33346335323530666436393538326531623931643838633631646137306563306630336238333166
|
||||
34346634306337313938626632663131333534356631386564363233643339623338363539326262
|
||||
62336331646134626439313032626231383833353831343038393739366435663766333262633461
|
||||
61383830656566666262376162376637313933336533396664303830306266323234323463613966
|
||||
33383864393964323866613937623331343966373432643732383663343335316562326637353837
|
||||
32646362613631633263386566353930363665353361353934393537353461383333343331356639
|
||||
31633861346635386432363835373736363133303266383835633436313533393835616231313165
|
||||
62363835623866323961313664333430656131373061373764316331343639653437633037636339
|
||||
37656363366137666138333835353661613363333963333134313338386362656435633063353538
|
||||
64346464336230323131346537653565383630613532356264633035363962303131663036343065
|
||||
31363536366362303164373339333462653166333031616362653631383234303836613532633332
|
||||
64333337343362613161626166393634636336373265643561323230383534326663643536366333
|
||||
63393230343735346631306461653636396634343864623532306661326564633661623131346639
|
||||
38363331613463616266633863303161326237313037643934383032663834366634363965396238
|
||||
63633330636130373331393533393531623535656361306165623539383962653839353334643233
|
||||
35663566383434396135323531353230643763326134323865343863616461326530353963376232
|
||||
31373965353732386630366130656266623464666333383433393062613366363136333933343461
|
||||
61323832626466386433636134383765383834643536363635623830303535646530613238343437
|
||||
36363566396465313830326137396532393762623436646663643663393266396631363663343936
|
||||
32383561643237386630353730323563313636663633623036363131366139396235343138306134
|
||||
66653538663937616266343065333366613236313235346635326337633866363263313832653732
|
||||
35663634363432383066386561663661643265613532386165646230313531356535353165343666
|
||||
38663764643439633664353439366536323763663063626664623365613734386265393934383532
|
||||
66323963303133653465366138666132666339353630323739383633383462373532323762663432
|
||||
30643436376539303430343164663238376634346437623063656466653138626237663538626436
|
||||
30623836393362666231323435383238643731623931396235346330323539643966663365363632
|
||||
65646564656563303064643161353930396663363638383965616662663238646434373862316430
|
||||
39343932656532626631323035633563373730393163396338653064326631626436373533333734
|
||||
65626361346162383530626134336230346234653936366462393538353137373933376533313839
|
||||
34383932343637623262373134636233373839313339393433303337363566643833353066396337
|
||||
66643966373436393937363064353365363239323461653034626161383936303236313364366535
|
||||
65316335333235623463613766633836643730363634666465386663386235306334376364323162
|
||||
39333466383333643339633538336632376333623439646234643666333162326135663130303536
|
||||
39663234633761633632346534383966313234613763323038626466346235333165303934633431
|
||||
36313565346631623166383338643739346634393663303264373962343932376430663333376165
|
||||
62613462396531323634613966616331623538306636343235393362396437633239366136616436
|
||||
65323638393566363034633231643565636431356431386234316233636266663136656139663532
|
||||
38613637636432626236323066643632343661316565343361323764353335313265383831373764
|
||||
64323361333463346438626134323166623231393338373333653161623663336434383931393163
|
||||
36633163393235636435323931313265633234623433653134616132346262653234636364376238
|
||||
66313761333436336663323663626563656566366665336439643461623837666338313565313964
|
||||
31666466663863623334316164316432353362316336616662666666363766306231653664306663
|
||||
37613839383864386533326634336433633464343831303835656366616339393332633965323431
|
||||
65643136643866653834353538356233623662663237303261333564346566643839633532366262
|
||||
66653162366563666463353533656665323661326566383966306332626566663732353730313732
|
||||
65323034326161306165613364336336386265313735396237623633346263333966
|
26
group_vars/all/vars.yml
Normal file
26
group_vars/all/vars.yml
Normal file
|
@ -0,0 +1,26 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
hostname: default
|
||||
timezone: Europe/Copenhagen
|
||||
|
||||
users:
|
||||
- name: ubuntu
|
||||
comment: System Administration
|
||||
password: $6$YitakVLuUxjnPfDd$aFnEDcc98y6MlRYxLPAhb.eHsKqSIz385i4VrHW1Q8b986IqUhtu62gaOIALzM4FAU3dnWaHNUTGxY0zgA6jC0
|
||||
groups:
|
||||
- sudo
|
||||
ssh_keys:
|
||||
- sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIFWZGLov8wPBNxuvnaPK+8vv6wK5hHUVEFzXKsN9QeuBAAAADHNzaDpzYW1zYXB0aQ== ssh:samsapti
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPd/4fQV7CL8/KVwbo/phiV5UdXFBIDlkZ+ps8C7FeRf cardno:14 336 332
|
||||
|
||||
open_ports:
|
||||
- { port: '22', proto: 'tcp', comment: 'SSH (not port-forwarded)' }
|
||||
- { port: '53', proto: 'tcp', comment: 'Pi-hole (not port-forwarded)' }
|
||||
- { port: '53', proto: 'udp', comment: 'Pi-hole (not port-forwarded)' }
|
||||
- { port: '80', proto: 'tcp', comment: 'HTTP' }
|
||||
- { port: '443', proto: 'tcp', comment: 'HTTPS' }
|
||||
- { port: '443', proto: 'udp', comment: 'HTTPS' }
|
||||
- { port: '4001', proto: 'tcp', comment: 'IPFS Kubo P2P' }
|
||||
- { port: '4001', proto: 'udp', comment: 'IPFS Kubo P2P' }
|
||||
- { port: '18080', proto: 'tcp', comment: 'monerod P2P' }
|
||||
- { port: '18089', proto: 'tcp', comment: 'monerod RPC' }
|
13
playbooks/app.yml
Normal file
13
playbooks/app.yml
Normal file
|
@ -0,0 +1,13 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Deploy self-hosted services
|
||||
hosts: all
|
||||
gather_facts: true
|
||||
become: true
|
||||
roles:
|
||||
- role: os_config
|
||||
tags:
|
||||
- os
|
||||
- role: docker_services
|
||||
tags:
|
||||
- docker
|
73
roles/docker_services/defaults/main.yml
Normal file
73
roles/docker_services/defaults/main.yml
Normal file
|
@ -0,0 +1,73 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
base_domain: sapti.me
|
||||
local_domain: local.{{ base_domain }}
|
||||
base_volume: "{{ ssd_mount_point }}/apps"
|
||||
mass_data_volume: "{{ hdd_mount_point }}/apps"
|
||||
|
||||
services:
|
||||
caddy:
|
||||
volume: "{{ base_volume }}/caddy"
|
||||
docker_ipv4: 172.16.3.2
|
||||
version: '2.7.4'
|
||||
|
||||
postfix:
|
||||
domain: smtp.{{ base_domain }}
|
||||
volume: "{{ base_volume }}/postfix"
|
||||
version: latest-alpine
|
||||
|
||||
emby:
|
||||
domain: watch.{{ base_domain }}
|
||||
volume: "{{ base_volume }}/emby"
|
||||
data_volume: "{{ mass_data_volume }}/emby"
|
||||
version: latest
|
||||
|
||||
ipfs:
|
||||
domain: ipfs.{{ local_domain }}
|
||||
gateway_domain: ipfs-gateway.{{ base_domain }}
|
||||
volume: "{{ base_volume }}/ipfs"
|
||||
version: v0.19.2 # https://github.com/ipfs/kubo/issues/9901
|
||||
|
||||
monerod:
|
||||
domain: xmr.{{ base_domain }}
|
||||
volume: "{{ base_volume }}/monerod"
|
||||
version: latest
|
||||
|
||||
nextcloud:
|
||||
domain: cloud.{{ base_domain }}
|
||||
volume: "{{ base_volume }}/nextcloud"
|
||||
version: 27-apache
|
||||
postgres_version: 14-alpine
|
||||
redis_version: 7-alpine
|
||||
|
||||
snowflake:
|
||||
version: latest
|
||||
|
||||
pihole:
|
||||
domain: pi-hole.{{ local_domain }}
|
||||
volume: "{{ base_volume }}/pi-hole"
|
||||
docker_ipv4: 172.18.3.2
|
||||
version: '2023.05.2'
|
||||
unbound_version: latest
|
||||
|
||||
restic:
|
||||
repo: /restic
|
||||
version: '1.7'
|
||||
|
||||
watchtower:
|
||||
version: '1.5.3'
|
||||
|
||||
local_ipv4s:
|
||||
- '192.168.1.0/24'
|
||||
- '192.168.8.0/24'
|
||||
|
||||
restic_volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:rw"
|
||||
- "{{ services.caddy.volume }}:/mnt/volumes/caddy:ro"
|
||||
- "{{ services.postfix.volume }}:/mnt/volumes/postfix:ro"
|
||||
- "{{ services.emby.volume }}:/mnt/volumes/emby:ro"
|
||||
- "{{ services.nextcloud.volume }}:/mnt/volumes/nextcloud:ro"
|
||||
- "{{ services.pihole.volume }}:/mnt/volumes/pi-hole:ro"
|
||||
|
||||
sender_domains:
|
||||
- "{{ services.nextcloud.domain }}"
|
20
roles/docker_services/files/ipfs/ipfs-config.sh
Normal file
20
roles/docker_services/files/ipfs/ipfs-config.sh
Normal file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
ipfs config --json API.HTTPHeaders.Access-Control-Allow-Origin '["https://'$LOCAL_DOMAIN'"]'
|
||||
ipfs config --json API.HTTPHeaders.Access-Control-Allow-Methods '["PUT", "POST"]'
|
||||
|
||||
ipfs config --json Gateway.PublicGateways '{
|
||||
"'$IPFS_DOMAIN'": {
|
||||
"UseSubdomains": true,
|
||||
"Paths": ["/ipfs", "/ipns"]
|
||||
}
|
||||
}'
|
||||
|
||||
ipfs config --json DNS.Resolvers '{
|
||||
".": "https://anycast.censurfridns.dk/dns-query"
|
||||
}'
|
||||
|
||||
ipfs config --json Datastore.StorageMax '"100GB"'
|
||||
ipfs config --json Datastore.GCPeriod '"10m"'
|
10
roles/docker_services/files/pihole/forward-records.conf
Normal file
10
roles/docker_services/files/pihole/forward-records.conf
Normal file
|
@ -0,0 +1,10 @@
|
|||
private-domain: local.sapti.me
|
||||
|
||||
forward-zone:
|
||||
name: "."
|
||||
forward-tls-upstream: yes
|
||||
forward-no-cache: yes
|
||||
forward-addr: 91.239.100.100@853#anycast.censurfridns.dk
|
||||
forward-addr: 2001:67c:28a4::@853#anycast.censurfridns.dk
|
||||
forward-addr: 89.233.43.71@853#unicast.censurfridns.dk
|
||||
forward-addr: 2a01:3a0:53:53::@853#unicast.censurfridns.dk
|
10
roles/docker_services/handlers/main.yml
Normal file
10
roles/docker_services/handlers/main.yml
Normal file
|
@ -0,0 +1,10 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Build Caddy Docker image
|
||||
community.docker.docker_image:
|
||||
name: custom/caddy:{{ services.caddy.version }}-alpine
|
||||
source: build
|
||||
build:
|
||||
path: "{{ services.caddy.volume }}"
|
||||
dockerfile: caddy.Dockerfile
|
||||
state: present
|
28
roles/docker_services/tasks/config.yml
Normal file
28
roles/docker_services/tasks/config.yml
Normal file
|
@ -0,0 +1,28 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Copy Docker daemon config file
|
||||
ansible.builtin.template:
|
||||
src: daemon.json.j2
|
||||
dest: /etc/docker/daemon.json
|
||||
owner: root
|
||||
mode: u=rw,g=r,o=r
|
||||
register: daemon_config
|
||||
|
||||
- name: Disable and (re)start Docker daemon
|
||||
ansible.builtin.service:
|
||||
name: "{{ item }}"
|
||||
enabled: false
|
||||
state: "{{ 'restarted' if daemon_config.changed else 'started' }}"
|
||||
loop:
|
||||
- docker.socket
|
||||
- docker.service
|
||||
when: down is undefined or not down
|
||||
|
||||
- name: Configure cron job to prune unused Docker data weekly
|
||||
ansible.builtin.cron:
|
||||
name: Prune unused Docker data
|
||||
cron_file: ansible_docker_prune
|
||||
job: 'docker system prune -fa && docker volume prune -fa'
|
||||
special_time: weekly
|
||||
user: root
|
||||
state: present
|
50
roles/docker_services/tasks/main.yml
Normal file
50
roles/docker_services/tasks/main.yml
Normal file
|
@ -0,0 +1,50 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Add Docker PGP key
|
||||
ansible.builtin.apt_key:
|
||||
keyserver: keyserver.ubuntu.com
|
||||
id: '0x8D81803C0EBFCD88'
|
||||
state: present
|
||||
|
||||
- name: Add Docker apt repository
|
||||
ansible.builtin.apt_repository:
|
||||
repo: 'deb [arch=arm64] https://download.docker.com/linux/ubuntu focal stable'
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Install Docker
|
||||
ansible.builtin.apt:
|
||||
name: "{{ pkgs }}"
|
||||
state: present
|
||||
vars:
|
||||
pkgs:
|
||||
- docker-ce
|
||||
- docker-compose-plugin
|
||||
|
||||
- name: Create docker-compose symlink
|
||||
ansible.builtin.file:
|
||||
name: /usr/local/bin/docker-compose
|
||||
src: /usr/libexec/docker/cli-plugins/docker-compose
|
||||
state: link
|
||||
|
||||
- name: Install Python bindings for Docker
|
||||
ansible.builtin.pip:
|
||||
name: "{{ pkgs }}"
|
||||
state: present
|
||||
executable: pip3
|
||||
vars:
|
||||
pkgs:
|
||||
- docker
|
||||
- docker-compose
|
||||
|
||||
- name: Configure Docker
|
||||
ansible.builtin.import_tasks: config.yml
|
||||
tags:
|
||||
- docker_config
|
||||
- reboot
|
||||
|
||||
- name: Set up Docker services
|
||||
ansible.builtin.import_tasks: services.yml
|
||||
tags:
|
||||
- services
|
||||
- reboot
|
30
roles/docker_services/tasks/services.yml
Normal file
30
roles/docker_services/tasks/services.yml
Normal file
|
@ -0,0 +1,30 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Create Docker network for services
|
||||
community.docker.docker_network:
|
||||
name: services
|
||||
enable_ipv6: true
|
||||
ipam_config:
|
||||
- subnet: 172.16.0.0/16
|
||||
- subnet: fd02::/64
|
||||
state: present
|
||||
|
||||
- name: Create base directories for Docker volumes
|
||||
ansible.builtin.file:
|
||||
name: "{{ item }}"
|
||||
owner: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ base_volume }}"
|
||||
- "{{ mass_data_volume }}"
|
||||
|
||||
- name: Deploy services
|
||||
ansible.builtin.include_tasks: services/{{ item.key }}.yml
|
||||
loop: "{{ services | dict2items }}"
|
||||
when: single_service is not defined
|
||||
|
||||
- name: Deploy single service
|
||||
ansible.builtin.include_tasks: services/{{ single_service }}.yml
|
||||
when: single_service is defined and
|
||||
single_service in services
|
59
roles/docker_services/tasks/services/caddy.yml
Normal file
59
roles/docker_services/tasks/services/caddy.yml
Normal file
|
@ -0,0 +1,59 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Create Caddy volume directories
|
||||
ansible.builtin.file:
|
||||
name: "{{ services.caddy.volume }}/{{ dir }}"
|
||||
owner: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
loop:
|
||||
- config
|
||||
- data
|
||||
loop_control:
|
||||
loop_var: dir
|
||||
|
||||
- name: Copy Caddyfile
|
||||
ansible.builtin.template:
|
||||
src: Caddyfile.j2
|
||||
dest: "{{ services.caddy.volume }}/Caddyfile"
|
||||
owner: root
|
||||
mode: u=rw,g=r,o=r
|
||||
|
||||
- name: Copy caddy.Dockerfile
|
||||
ansible.builtin.template:
|
||||
src: caddy.Dockerfile.j2
|
||||
dest: "{{ services.caddy.volume }}/caddy.Dockerfile"
|
||||
owner: root
|
||||
mode: u=rw,g=r,o=r
|
||||
register: dockerfile
|
||||
notify: Build Caddy Docker image
|
||||
|
||||
- name: Flush handlers
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- name: Deploy Caddy Docker container
|
||||
community.docker.docker_container:
|
||||
name: caddy
|
||||
state: "{{ 'absent' if down is defined and down else 'started' }}"
|
||||
restart: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ dockerfile.changed or (recreate is defined and recreate) }}"
|
||||
image: custom/caddy:{{ services.caddy.version }}-alpine
|
||||
restart_policy: always
|
||||
default_host_ip: ''
|
||||
networks:
|
||||
- name: services
|
||||
ipv4_address: 172.16.3.2
|
||||
published_ports:
|
||||
- 80:80/tcp
|
||||
- 443:443/tcp
|
||||
- 443:443/udp
|
||||
- 18089:18089/tcp
|
||||
volumes:
|
||||
- "{{ services.caddy.volume }}/Caddyfile:/etc/caddy/Caddyfile:ro"
|
||||
- "{{ services.caddy.volume }}/config:/config:rw"
|
||||
- "{{ services.caddy.volume }}/data:/data:rw"
|
||||
capabilities:
|
||||
- net_bind_service
|
||||
- dac_override
|
||||
cap_drop:
|
||||
- all
|
36
roles/docker_services/tasks/services/emby.yml
Normal file
36
roles/docker_services/tasks/services/emby.yml
Normal file
|
@ -0,0 +1,36 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Create Emby volume directories
|
||||
ansible.builtin.file:
|
||||
name: "{{ dir }}"
|
||||
owner: '1000'
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ services.emby.volume }}/programdata"
|
||||
- "{{ services.emby.data_volume }}/tvshows"
|
||||
- "{{ services.emby.data_volume }}/movies"
|
||||
loop_control:
|
||||
loop_var: dir
|
||||
|
||||
- name: Deploy Emby Docker container
|
||||
community.docker.docker_container:
|
||||
name: emby
|
||||
state: "{{ 'absent' if down is defined and down else 'started' }}"
|
||||
restart: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ recreate is defined and recreate }}"
|
||||
image: emby/embyserver_arm64v8:{{ services.emby.version }}
|
||||
restart_policy: always
|
||||
env:
|
||||
UID: '1000'
|
||||
GID: '1000'
|
||||
networks:
|
||||
- name: services
|
||||
aliases:
|
||||
- emby
|
||||
volumes:
|
||||
- "{{ services.emby.volume }}/programdata:/config:rw"
|
||||
- "{{ services.emby.data_volume }}/tvshows:/mnt/share1:rw"
|
||||
- "{{ services.emby.data_volume }}/movies:/mnt/share2:rw"
|
||||
devices:
|
||||
- /dev/vchiq:/dev/vchiq # MMAL/OMX on Raspberry Pi
|
45
roles/docker_services/tasks/services/ipfs.yml
Normal file
45
roles/docker_services/tasks/services/ipfs.yml
Normal file
|
@ -0,0 +1,45 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Create IPFS Kubo volume directories
|
||||
ansible.builtin.file:
|
||||
name: "{{ dir }}"
|
||||
owner: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ services.ipfs.volume }}/data"
|
||||
- "{{ services.ipfs.volume }}/staging"
|
||||
loop_control:
|
||||
loop_var: dir
|
||||
|
||||
- name: Copy ipfs-config.sh
|
||||
ansible.builtin.copy:
|
||||
src: ipfs/ipfs-config.sh
|
||||
dest: "{{ services.ipfs.volume }}/ipfs-config.sh"
|
||||
owner: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
|
||||
- name: Deploy IPFS Kubo Docker container
|
||||
community.docker.docker_container:
|
||||
name: ipfs_kubo
|
||||
state: "{{ 'absent' if down is defined and down else 'started' }}"
|
||||
restart: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ recreate is defined and recreate }}"
|
||||
image: ipfs/kubo:{{ services.ipfs.version }}
|
||||
restart_policy: always
|
||||
default_host_ip: ''
|
||||
env:
|
||||
IPFS_DOMAIN: "{{ services.ipfs.gateway_domain }}"
|
||||
IPFS_PROFILE: server
|
||||
LOCAL_DOMAIN: "{{ services.ipfs.domain }}"
|
||||
networks:
|
||||
- name: services
|
||||
aliases:
|
||||
- ipfs_kubo
|
||||
volumes:
|
||||
- "{{ services.ipfs.volume }}/ipfs-config.sh:/container-init.d/ipfs-config.sh:ro"
|
||||
- "{{ services.ipfs.volume }}/data:/data/ipfs:rw"
|
||||
- "{{ services.ipfs.volume }}/staging:/export:rw"
|
||||
published_ports:
|
||||
- 4001:4001/tcp
|
||||
- 4001:4001/udp
|
26
roles/docker_services/tasks/services/monerod.yml
Normal file
26
roles/docker_services/tasks/services/monerod.yml
Normal file
|
@ -0,0 +1,26 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Create Monero node volume directory
|
||||
ansible.builtin.file:
|
||||
name: "{{ services.monerod.volume }}"
|
||||
owner: '1000'
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
|
||||
- name: Deploy Monero node Docker container
|
||||
community.docker.docker_container:
|
||||
name: monerod
|
||||
state: "{{ 'absent' if down is defined and down else 'started' }}"
|
||||
restart: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ recreate is defined and recreate }}"
|
||||
image: sethsimmons/simple-monerod:{{ services.monerod.version }}
|
||||
restart_policy: always
|
||||
default_host_ip: ''
|
||||
networks:
|
||||
- name: services
|
||||
aliases:
|
||||
- monerod
|
||||
volumes:
|
||||
- "{{ services.monerod.volume }}:/home/monero/.bitmonero:rw"
|
||||
published_ports:
|
||||
- 18080:18080/tcp
|
108
roles/docker_services/tasks/services/nextcloud.yml
Normal file
108
roles/docker_services/tasks/services/nextcloud.yml
Normal file
|
@ -0,0 +1,108 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Create Nextcloud apache2 directory
|
||||
ansible.builtin.file:
|
||||
name: "{{ services.nextcloud.volume }}/apache2"
|
||||
owner: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
|
||||
- name: Create Nextcloud app directory
|
||||
ansible.builtin.file:
|
||||
name: "{{ services.nextcloud.volume }}/app"
|
||||
owner: root
|
||||
group: '33'
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
|
||||
- name: Create Nextcloud PostgreSQL directory
|
||||
ansible.builtin.file:
|
||||
name: "{{ services.nextcloud.volume }}/postgres"
|
||||
owner: '70'
|
||||
mode: u=rwx,go=
|
||||
state: directory
|
||||
|
||||
- name: Copy Apache2 remoteip config file
|
||||
ansible.builtin.template:
|
||||
src: remoteip.conf.j2
|
||||
dest: "{{ services.nextcloud.volume }}/apache2/remoteip.conf"
|
||||
owner: root
|
||||
mode: u=rw,g=r,o=r
|
||||
|
||||
- name: Deploy Nextcloud with Docker Compose
|
||||
community.docker.docker_compose:
|
||||
project_name: nextcloud
|
||||
state: "{{ 'absent' if down is defined and down else 'present' }}"
|
||||
restarted: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ 'always' if recreate is defined and recreate else 'smart' }}"
|
||||
pull: true
|
||||
definition:
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:{{ services.nextcloud.postgres_version }}
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_DB: nextcloud
|
||||
POSTGRES_USER: nextcloud
|
||||
POSTGRES_PASSWORD: "{{ secrets.nextcloud.postgres_pw }}"
|
||||
volumes:
|
||||
- "{{ services.nextcloud.volume }}/postgres:/var/lib/postgresql/data:rw"
|
||||
|
||||
redis:
|
||||
image: redis:{{ services.nextcloud.redis_version }}
|
||||
restart: always
|
||||
command: redis-server --requirepass {{ secrets.nextcloud.redis_pw }}
|
||||
tmpfs:
|
||||
- /var/lib/redis
|
||||
|
||||
app:
|
||||
image: nextcloud:{{ services.nextcloud.version }}
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_HOST: postgres
|
||||
POSTGRES_DB: nextcloud
|
||||
POSTGRES_USER: nextcloud
|
||||
POSTGRES_PASSWORD: "{{ secrets.nextcloud.postgres_pw }}"
|
||||
REDIS_HOST: redis
|
||||
REDIS_HOST_PASSWORD: "{{ secrets.nextcloud.redis_pw }}"
|
||||
MAIL_FROM_ADDRESS: noreply
|
||||
MAIL_DOMAIN: "{{ services.nextcloud.domain }}"
|
||||
SMTP_AUTHTYPE: PLAIN
|
||||
SMTP_HOST: postfix
|
||||
SMTP_PORT: 587
|
||||
TRUSTED_PROXIES: "{{ services.caddy.docker_ipv4 }}"
|
||||
OVERWRITEHOST: "{{ services.nextcloud.domain }}"
|
||||
OVERWRITEPROTOCOL: https
|
||||
OVERWRITECLIURL: https://{{ services.nextcloud.domain }}
|
||||
NEXTCLOUD_INIT_LOCK: 'true'
|
||||
PHP_MEMORY_LIMIT: 2G
|
||||
PHP_UPLOAD_LIMIT: 16G
|
||||
networks:
|
||||
default:
|
||||
postfix:
|
||||
services:
|
||||
aliases:
|
||||
- nextcloud
|
||||
volumes:
|
||||
- "{{ services.nextcloud.volume }}/app:/var/www/html:rw"
|
||||
- "{{ services.nextcloud.volume }}/apache2/remoteip.conf:/etc/apache2/conf-enabled/remoteip.conf:ro"
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
|
||||
cron:
|
||||
image: nextcloud:{{ services.nextcloud.version }}
|
||||
restart: always
|
||||
entrypoint: /cron.sh
|
||||
volumes:
|
||||
- "{{ services.nextcloud.volume }}/app:/var/www/html:rw"
|
||||
depends_on:
|
||||
- app
|
||||
|
||||
networks:
|
||||
postfix:
|
||||
external: true
|
||||
services:
|
||||
external: true
|
80
roles/docker_services/tasks/services/pihole.yml
Normal file
80
roles/docker_services/tasks/services/pihole.yml
Normal file
|
@ -0,0 +1,80 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Create Pi-hole volume base directory
|
||||
ansible.builtin.file:
|
||||
name: "{{ services.pihole.volume }}"
|
||||
owner: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
|
||||
- name: Create Pi-hole volume directory pihole
|
||||
ansible.builtin.file:
|
||||
name: "{{ services.pihole.volume }}/pihole"
|
||||
owner: '999'
|
||||
group: '1000'
|
||||
mode: u=rwx,g=rwx,o=rx
|
||||
state: directory
|
||||
|
||||
- name: Create other Pi-hole volume directories
|
||||
ansible.builtin.file:
|
||||
name: "{{ services.pihole.volume }}/{{ dir }}"
|
||||
owner: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
loop:
|
||||
- dnsmasq.d
|
||||
- unbound
|
||||
loop_control:
|
||||
loop_var: dir
|
||||
|
||||
- name: Copy forward-records.conf for Unbound
|
||||
ansible.builtin.copy:
|
||||
src: pihole/forward-records.conf
|
||||
dest: "{{ services.pihole.volume }}/unbound/forward-records.conf"
|
||||
owner: root
|
||||
mode: u=rw,g=r,o=r
|
||||
|
||||
- name: Deploy Pi-hole with Docker Compose
|
||||
community.docker.docker_compose:
|
||||
project_name: pihole
|
||||
state: "{{ 'absent' if down is defined and down else 'present' }}"
|
||||
restarted: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ 'always' if recreate is defined and recreate else 'smart' }}"
|
||||
pull: true
|
||||
definition:
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
app:
|
||||
image: pihole/pihole:{{ services.pihole.version }}
|
||||
restart: always
|
||||
environment:
|
||||
DNSMASQ_LISTENING: all
|
||||
DHCP_ACTIVE: 'false'
|
||||
DNSSEC: 'true'
|
||||
PIHOLE_DNS_: unbound
|
||||
WEBPASSWORD: "{{ secrets.pihole.web_pw }}"
|
||||
TZ: "{{ timezone }}"
|
||||
networks:
|
||||
default:
|
||||
services:
|
||||
aliases:
|
||||
- pihole
|
||||
volumes:
|
||||
- "{{ services.pihole.volume }}/pihole:/etc/pihole:rw"
|
||||
- "{{ services.pihole.volume }}/dnsmasq.d:/etc/dnsmasq.d:rw"
|
||||
ports:
|
||||
- 53:53/tcp
|
||||
- 53:53/udp
|
||||
depends_on:
|
||||
- unbound
|
||||
|
||||
unbound:
|
||||
image: mvance/unbound-rpi:{{ services.pihole.unbound_version }}
|
||||
restart: always
|
||||
volumes:
|
||||
- "{{ services.pihole.volume }}/unbound/forward-records.conf:/opt/unbound/etc/unbound/forward-records.conf:ro"
|
||||
|
||||
networks:
|
||||
services:
|
||||
external: true
|
30
roles/docker_services/tasks/services/postfix.yml
Normal file
30
roles/docker_services/tasks/services/postfix.yml
Normal file
|
@ -0,0 +1,30 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Create Docker network for Postfix
|
||||
community.docker.docker_network:
|
||||
name: postfix
|
||||
state: present
|
||||
|
||||
- name: Create Postfix volume directories
|
||||
ansible.builtin.file:
|
||||
name: "{{ services.postfix.volume }}/dkim"
|
||||
owner: root
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
state: directory
|
||||
|
||||
- name: Deploy Postfix Docker container
|
||||
community.docker.docker_container:
|
||||
name: postfix
|
||||
state: "{{ 'absent' if down is defined and down else 'started' }}"
|
||||
restart: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ recreate is defined and recreate }}"
|
||||
image: boky/postfix:{{ services.postfix.version }}
|
||||
restart_policy: always
|
||||
env:
|
||||
ALLOWED_SENDER_DOMAINS: "{{ sender_domains | join(' ') }}"
|
||||
HOSTNAME: "{{ services.postfix.domain }}"
|
||||
DKIM_AUTOGENERATE: "true"
|
||||
networks:
|
||||
- name: postfix
|
||||
volumes:
|
||||
- "{{ services.postfix.volume }}/dkim:/etc/opendkim/keys:rw"
|
67
roles/docker_services/tasks/services/restic.yml
Normal file
67
roles/docker_services/tasks/services/restic.yml
Normal file
|
@ -0,0 +1,67 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Deploy Restic with Docker Compose
|
||||
community.docker.docker_compose:
|
||||
project_name: restic
|
||||
state: "{{ 'absent' if down is defined and down else 'present' }}"
|
||||
restarted: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ 'always' if recreate is defined and recreate else 'smart' }}"
|
||||
pull: true
|
||||
definition:
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: mazzolino/restic:{{ services.restic.version }}
|
||||
restart: always
|
||||
environment:
|
||||
RUN_ON_STARTUP: 'false'
|
||||
BACKUP_CRON: 0 0 3 * * *
|
||||
RESTIC_REPOSITORY: b2:{{ secrets.restic.b2.bucket }}:{{ services.restic.repo }}
|
||||
RESTIC_PASSWORD: "{{ secrets.restic.repo_pw }}"
|
||||
RESTIC_BACKUP_SOURCES: /mnt/volumes
|
||||
RESTIC_BACKUP_ARGS: >-
|
||||
--tag docker-volumes
|
||||
--exclude '*.tmp'
|
||||
--verbose
|
||||
RESTIC_FORGET_ARGS: >-
|
||||
--keep-last 10
|
||||
--keep-daily 7
|
||||
--keep-weekly 5
|
||||
--keep-monthly 12
|
||||
PRE_COMMANDS: |-
|
||||
docker exec -u www-data nextcloud_app_1 php occ maintenance:mode --on
|
||||
POST_COMMANDS_EXIT: |-
|
||||
docker exec -u www-data nextcloud_app_1 php occ maintenance:mode --off
|
||||
B2_ACCOUNT_ID: "{{ secrets.restic.b2.id }}"
|
||||
B2_ACCOUNT_KEY: "{{ secrets.restic.b2.key }}"
|
||||
TZ: "{{ timezone }}"
|
||||
volumes: "{{ restic_volumes }}"
|
||||
|
||||
prune:
|
||||
image: mazzolino/restic:{{ services.restic.version }}
|
||||
restart: always
|
||||
environment:
|
||||
RUN_ON_STARTUP: 'false'
|
||||
PRUNE_CRON: 0 0 4 * * *
|
||||
RESTIC_REPOSITORY: b2:{{ secrets.restic.b2.bucket }}:{{ services.restic.repo }}
|
||||
RESTIC_PASSWORD: "{{ secrets.restic.repo_pw }}"
|
||||
RESTIC_PRUNE_ARGS: >-
|
||||
--verbose
|
||||
B2_ACCOUNT_ID: "{{ secrets.restic.b2.id }}"
|
||||
B2_ACCOUNT_KEY: "{{ secrets.restic.b2.key }}"
|
||||
TZ: "{{ timezone }}"
|
||||
|
||||
check:
|
||||
image: mazzolino/restic:{{ services.restic.version }}
|
||||
restart: always
|
||||
environment:
|
||||
RUN_ON_STARTUP: 'false'
|
||||
CHECK_CRON: 0 0 5 * * *
|
||||
RESTIC_REPOSITORY: b2:{{ secrets.restic.b2.bucket }}:{{ services.restic.repo }}
|
||||
RESTIC_PASSWORD: "{{ secrets.restic.repo_pw }}"
|
||||
RESTIC_CHECK_ARGS: >-
|
||||
--verbose
|
||||
B2_ACCOUNT_ID: "{{ secrets.restic.b2.id }}"
|
||||
B2_ACCOUNT_KEY: "{{ secrets.restic.b2.key }}"
|
||||
TZ: "{{ timezone }}"
|
11
roles/docker_services/tasks/services/snowflake.yml
Normal file
11
roles/docker_services/tasks/services/snowflake.yml
Normal file
|
@ -0,0 +1,11 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Deploy snowflake-proxy Docker container
|
||||
community.docker.docker_container:
|
||||
name: snowflake-proxy
|
||||
state: "{{ 'absent' if down is defined and down else 'started' }}"
|
||||
restart: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ recreate is defined and recreate }}"
|
||||
image: thetorproject/snowflake-proxy:{{ services.snowflake.version }}
|
||||
restart_policy: always
|
||||
network_mode: host
|
21
roles/docker_services/tasks/services/watchtower.yml
Normal file
21
roles/docker_services/tasks/services/watchtower.yml
Normal file
|
@ -0,0 +1,21 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Create Docker network for Watchtower
|
||||
community.docker.docker_network:
|
||||
name: watchtower
|
||||
state: present
|
||||
|
||||
- name: Deploy Watchtower Docker container
|
||||
community.docker.docker_container:
|
||||
name: watchtower
|
||||
state: "{{ 'absent' if down is defined and down else 'started' }}"
|
||||
restart: "{{ restart is defined and restart }}"
|
||||
recreate: "{{ recreate is defined and recreate }}"
|
||||
image: containrrr/watchtower:{{ services.watchtower.version }}
|
||||
restart_policy: always
|
||||
networks:
|
||||
- name: watchtower
|
||||
env:
|
||||
WATCHTOWER_POLL_INTERVAL: '3600'
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:rw
|
96
roles/docker_services/templates/Caddyfile.j2
Normal file
96
roles/docker_services/templates/Caddyfile.j2
Normal file
|
@ -0,0 +1,96 @@
|
|||
{
|
||||
admin off
|
||||
}
|
||||
|
||||
{{ services.emby.domain }} {
|
||||
tls {{ secrets.tls_email }}
|
||||
|
||||
header {
|
||||
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
|
||||
-Server
|
||||
}
|
||||
|
||||
reverse_proxy emby:8096
|
||||
}
|
||||
|
||||
{{ services.ipfs.domain }} {
|
||||
tls {{ secrets.tls_email }} {
|
||||
dns njalla {{ secrets.caddy.njalla_api_token }}
|
||||
}
|
||||
|
||||
header {
|
||||
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
|
||||
-Server
|
||||
}
|
||||
|
||||
@local {
|
||||
remote_ip {{ local_ipv4s | join(' ') }}
|
||||
}
|
||||
|
||||
handle @local {
|
||||
reverse_proxy ipfs_kubo:5001
|
||||
}
|
||||
|
||||
respond 403
|
||||
}
|
||||
|
||||
{{ services.ipfs.gateway_domain }},
|
||||
*.ipfs.{{ services.ipfs.gateway_domain }},
|
||||
*.ipns.{{ services.ipfs.gateway_domain }} {
|
||||
tls {{ secrets.tls_email }} {
|
||||
dns njalla {{ secrets.caddy.njalla_api_token }}
|
||||
}
|
||||
|
||||
header {
|
||||
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
|
||||
-Server
|
||||
}
|
||||
|
||||
reverse_proxy ipfs_kubo:8080
|
||||
}
|
||||
|
||||
{{ services.monerod.domain }}:18089 {
|
||||
tls {{ secrets.tls_email }}
|
||||
|
||||
header {
|
||||
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
|
||||
-Server
|
||||
}
|
||||
|
||||
reverse_proxy monerod:18089
|
||||
}
|
||||
|
||||
{{ services.nextcloud.domain }} {
|
||||
tls {{ secrets.tls_email }}
|
||||
|
||||
rewrite /.well-known/caldav /remote.php/dav
|
||||
rewrite /.well-known/carddav /remote.php/dav
|
||||
|
||||
header {
|
||||
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
|
||||
-Server
|
||||
}
|
||||
|
||||
reverse_proxy nextcloud:80
|
||||
}
|
||||
|
||||
{{ services.pihole.domain }} {
|
||||
tls {{ secrets.tls_email }} {
|
||||
dns njalla {{ secrets.caddy.njalla_api_token }}
|
||||
}
|
||||
|
||||
header {
|
||||
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
|
||||
-Server
|
||||
}
|
||||
|
||||
@local {
|
||||
remote_ip {{ local_ipv4s | join(' ') }}
|
||||
}
|
||||
|
||||
handle @local {
|
||||
reverse_proxy pihole:80
|
||||
}
|
||||
|
||||
respond 403
|
||||
}
|
8
roles/docker_services/templates/caddy.Dockerfile.j2
Normal file
8
roles/docker_services/templates/caddy.Dockerfile.j2
Normal file
|
@ -0,0 +1,8 @@
|
|||
FROM caddy:{{ services.caddy.version }}-builder-alpine AS builder
|
||||
|
||||
RUN xcaddy build v{{ services.caddy.version }} \
|
||||
--with github.com/caddy-dns/njalla
|
||||
|
||||
FROM caddy:{{ services.caddy.version }}-alpine
|
||||
|
||||
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
|
11
roles/docker_services/templates/daemon.json.j2
Normal file
11
roles/docker_services/templates/daemon.json.j2
Normal file
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
"data-root": "{{ ssd_mount_point }}/docker-runtime",
|
||||
"default-address-pools": [
|
||||
{
|
||||
"base": "172.17.0.0/16",
|
||||
"size": 24
|
||||
}
|
||||
],
|
||||
"experimental": true,
|
||||
"ip6tables": true
|
||||
}
|
2
roles/docker_services/templates/remoteip.conf.j2
Normal file
2
roles/docker_services/templates/remoteip.conf.j2
Normal file
|
@ -0,0 +1,2 @@
|
|||
RemoteIPHeader X-Forwarded-For
|
||||
RemoteIPInternalProxy {{ services.caddy.docker_ipv4 }}
|
42
roles/os_config/handlers/main.yml
Normal file
42
roles/os_config/handlers/main.yml
Normal file
|
@ -0,0 +1,42 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Restart systemd-resolved
|
||||
ansible.builtin.service:
|
||||
name: systemd-resolved
|
||||
state: restarted
|
||||
|
||||
- name: Create .env for apt-update-push
|
||||
ansible.builtin.template:
|
||||
src: env.j2
|
||||
dest: /home/{{ ansible_user }}/apt-update-push/.env
|
||||
owner: root
|
||||
mode: u=rw,go=
|
||||
listen: apt-update-push
|
||||
|
||||
- name: Install apt-update-push
|
||||
ansible.builtin.command: /home/{{ ansible_user }}/apt-update-push/install.sh
|
||||
listen: apt-update-push
|
||||
|
||||
- name: Change GPIO_PIN
|
||||
ansible.builtin.lineinfile:
|
||||
path: /home/{{ ansible_user }}/pi-fan-controller/fancontrol.py
|
||||
regexp: '^GPIO_PIN = '
|
||||
line: GPIO_PIN = 14
|
||||
state: present
|
||||
listen: pi-fan-controller
|
||||
|
||||
- name: Install requirements for pi-fan-controller
|
||||
ansible.builtin.pip:
|
||||
requirements: /home/{{ ansible_user }}/pi-fan-controller/requirements.txt
|
||||
executable: pip3
|
||||
state: present
|
||||
listen: pi-fan-controller
|
||||
|
||||
- name: Install pi-fan-controller
|
||||
ansible.builtin.command: /home/{{ ansible_user }}/pi-fan-controller/script/install
|
||||
listen: pi-fan-controller
|
||||
|
||||
- name: Restart sshd
|
||||
ansible.builtin.service:
|
||||
name: sshd
|
||||
state: restarted
|
67
roles/os_config/tasks/base.yml
Normal file
67
roles/os_config/tasks/base.yml
Normal file
|
@ -0,0 +1,67 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Set hostname
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ hostname }}"
|
||||
|
||||
- name: Set timezone
|
||||
community.general.timezone:
|
||||
name: "{{ timezone }}"
|
||||
|
||||
- name: Set /etc/resolv.conf symlink
|
||||
ansible.builtin.file:
|
||||
path: /etc/resolv.conf
|
||||
src: /run/systemd/resolve/resolv.conf
|
||||
owner: root
|
||||
force: true
|
||||
state: link
|
||||
|
||||
- name: Disable systemd-resolved stub resolver
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/systemd/resolved.conf
|
||||
regexp: '^#?DNSStubListener='
|
||||
line: 'DNSStubListener=no'
|
||||
state: present
|
||||
notify: Restart systemd-resolved
|
||||
|
||||
- name: Upgrade system packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
upgrade: full
|
||||
|
||||
- name: Install packages via apt
|
||||
ansible.builtin.apt:
|
||||
name: "{{ pkgs }}"
|
||||
state: present
|
||||
vars:
|
||||
pkgs:
|
||||
- apparmor
|
||||
- curl
|
||||
- git
|
||||
- haveged
|
||||
- needrestart
|
||||
- python3-pip
|
||||
- ufw
|
||||
- unattended-upgrades
|
||||
|
||||
- name: Clone apt-update-push
|
||||
ansible.builtin.git:
|
||||
dest: /home/{{ ansible_user }}/apt-update-push
|
||||
repo: https://github.com/samsapti/apt-update-push.git
|
||||
clone: true
|
||||
update: true
|
||||
single_branch: true
|
||||
depth: 1
|
||||
become: false
|
||||
notify: apt-update-push
|
||||
|
||||
- name: Clone pi-fan-controller
|
||||
ansible.builtin.git:
|
||||
dest: /home/{{ ansible_user }}/pi-fan-controller
|
||||
repo: https://github.com/Howchoo/pi-fan-controller.git
|
||||
clone: true
|
||||
update: false
|
||||
single_branch: true
|
||||
depth: 1
|
||||
become: false
|
||||
notify: pi-fan-controller
|
52
roles/os_config/tasks/disks.yml
Normal file
52
roles/os_config/tasks/disks.yml
Normal file
|
@ -0,0 +1,52 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: (Create and) open LUKS containers
|
||||
community.crypto.luks_device:
|
||||
uuid: "{{ item.disk.uuid }}"
|
||||
passphrase: "{{ item.disk.luks_pw }}"
|
||||
name: "{{ item.name }}"
|
||||
type: luks2
|
||||
state: opened
|
||||
loop:
|
||||
- disk: "{{ secrets.hdd }}"
|
||||
name: "{{ hdd_name }}"
|
||||
- disk: "{{ secrets.ssd }}"
|
||||
name: "{{ ssd_name }}"
|
||||
no_log: true
|
||||
|
||||
- name: Create filesystems if they do not exist
|
||||
community.general.filesystem:
|
||||
dev: "{{ item }}"
|
||||
fstype: ext4
|
||||
state: present
|
||||
loop:
|
||||
- /dev/mapper/{{ hdd_name }}
|
||||
- /dev/mapper/{{ ssd_name }}
|
||||
|
||||
- name: Mount filesystems
|
||||
ansible.posix.mount:
|
||||
src: "{{ item.dev }}"
|
||||
path: "{{ item.path }}"
|
||||
fstype: ext4
|
||||
state: ephemeral
|
||||
loop:
|
||||
- dev: /dev/mapper/{{ hdd_name }}
|
||||
path: "{{ hdd_mount_point }}"
|
||||
- dev: /dev/mapper/{{ ssd_name }}
|
||||
path: "{{ ssd_mount_point }}"
|
||||
when: ansible_mounts | selectattr('device', 'eq', item.dev) | length == 0
|
||||
|
||||
- name: Create swapfile
|
||||
community.general.filesize:
|
||||
path: "{{ ssd_mount_point }}/swapfile"
|
||||
size: 2G
|
||||
blocksize: 512B
|
||||
owner: root
|
||||
mode: u=rw,go=
|
||||
when: ansible_swaptotal_mb == 0
|
||||
|
||||
- name: Mount swapfile
|
||||
ansible.builtin.shell: |
|
||||
mkswap {{ ssd_mount_point }}/swapfile
|
||||
swapon {{ ssd_mount_point }}/swapfile
|
||||
when: ansible_swaptotal_mb == 0
|
14
roles/os_config/tasks/firewall.yml
Normal file
14
roles/os_config/tasks/firewall.yml
Normal file
|
@ -0,0 +1,14 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Allow necessary ports in UFW
|
||||
community.general.ufw:
|
||||
to_port: "{{ item.port }}"
|
||||
proto: "{{ item.proto }}"
|
||||
comment: "{{ item.comment }}"
|
||||
rule: allow
|
||||
loop: "{{ open_ports }}"
|
||||
|
||||
- name: Enable UFW
|
||||
community.general.ufw:
|
||||
policy: deny
|
||||
state: enabled
|
34
roles/os_config/tasks/main.yml
Normal file
34
roles/os_config/tasks/main.yml
Normal file
|
@ -0,0 +1,34 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Configure user accounts
|
||||
ansible.builtin.import_tasks: users.yml
|
||||
tags:
|
||||
- users
|
||||
|
||||
- name: Configure system base
|
||||
ansible.builtin.import_tasks: base.yml
|
||||
tags:
|
||||
- base
|
||||
|
||||
- name: Reboot if needed
|
||||
ansible.builtin.import_tasks: reboot.yml
|
||||
tags:
|
||||
- reboot
|
||||
|
||||
- name: Configure firewall
|
||||
ansible.builtin.import_tasks: firewall.yml
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
- name: Configure SSH
|
||||
ansible.builtin.import_tasks: ssh.yml
|
||||
tags:
|
||||
- ssh
|
||||
|
||||
- name: Configure disks
|
||||
ansible.builtin.import_tasks: disks.yml
|
||||
tags:
|
||||
- reboot
|
||||
|
||||
- name: Flush handlers
|
||||
ansible.builtin.meta: flush_handlers
|
30
roles/os_config/tasks/reboot.yml
Normal file
30
roles/os_config/tasks/reboot.yml
Normal file
|
@ -0,0 +1,30 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Check if a reboot is needed
|
||||
ansible.builtin.stat:
|
||||
path: /var/run/reboot-required
|
||||
register: needs_reboot
|
||||
|
||||
- name: Include docker_services role for service shutdown
|
||||
ansible.builtin.include_role:
|
||||
name: docker_services
|
||||
tasks_from: services.yml
|
||||
apply:
|
||||
ignore_errors: true
|
||||
vars:
|
||||
down: true
|
||||
when: needs_reboot.stat.exists or
|
||||
(do_reboot is defined and do_reboot)
|
||||
|
||||
- name: Reboot host
|
||||
ansible.builtin.reboot:
|
||||
when: needs_reboot.stat.exists or
|
||||
(do_reboot is defined and do_reboot)
|
||||
register: rebooted
|
||||
|
||||
- name: Re-gather facts
|
||||
ansible.builtin.setup:
|
||||
filter:
|
||||
- ansible_mounts
|
||||
- ansible_swaptotal_mb
|
||||
when: rebooted.rebooted is defined and rebooted.rebooted
|
25
roles/os_config/tasks/ssh.yml
Normal file
25
roles/os_config/tasks/ssh.yml
Normal file
|
@ -0,0 +1,25 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Allow SSH login with public keys
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^#?PubkeyAuthentication '
|
||||
line: PubkeyAuthentication yes
|
||||
state: present
|
||||
notify: Restart sshd
|
||||
|
||||
- name: Disallow SSH login with password
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^#?PasswordAuthentication '
|
||||
line: PasswordAuthentication no
|
||||
state: present
|
||||
notify: Restart sshd
|
||||
|
||||
- name: Disallow root login over SSH
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^#?PermitRootLogin '
|
||||
line: PermitRootLogin no
|
||||
state: present
|
||||
notify: Restart sshd
|
19
roles/os_config/tasks/users.yml
Normal file
19
roles/os_config/tasks/users.yml
Normal file
|
@ -0,0 +1,19 @@
|
|||
# vim: ft=yaml.ansible
|
||||
---
|
||||
- name: Add users
|
||||
ansible.builtin.user:
|
||||
name: "{{ item.name }}"
|
||||
comment: "{{ item.comment }}"
|
||||
password: "{{ item.password }}"
|
||||
groups: "{{ item.groups }}"
|
||||
shell: /bin/bash
|
||||
update_password: always
|
||||
loop: "{{ users }}"
|
||||
no_log: true
|
||||
|
||||
- name: Add ssh authorized_keys
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ item.name }}"
|
||||
key: "{{ item.ssh_keys | join('\n') }}"
|
||||
exclusive: true
|
||||
loop: "{{ users }}"
|
2
roles/os_config/templates/env.j2
Normal file
2
roles/os_config/templates/env.j2
Normal file
|
@ -0,0 +1,2 @@
|
|||
topic={{ secrets.ntfy_topic }}
|
||||
hour=20
|
Loading…
Reference in a new issue