1
0
Fork 0

Merge pull request #1 from mrdev023/dev-ansible

Ansible migration
This commit is contained in:
Florian Richer 2023-05-28 14:49:45 +02:00 committed by GitHub
commit e026113bb7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
101 changed files with 1390 additions and 183338 deletions

1
.gitattributes vendored Normal file
View file

@ -0,0 +1 @@
*.qcow2 filter=lfs diff=lfs merge=lfs -text

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
work/image.qcow2
work/inventory.yaml

15
README.md Normal file
View file

@ -0,0 +1,15 @@
# Configure
```bash
ansible-galaxy collection install community.docker
```
```bash
ansible-playbook playbook.yml --ask-become-pass
```
# UTILS
```bash
cd $(docker volume inspect [NAME] | jq -r '.[0].Mountpoint')
```

View file

@ -1,21 +0,0 @@
version: '3'
services:
affine:
image: ghcr.io/toeverything/affine:nightly-server-latest
restart: always
container_name: affine
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.affine-secure.entrypoints=https"
- "traefik.http.routers.affine-secure.rule=Host(`affine.mrdev023.fr`)"
- "traefik.http.routers.affine-secure.tls=true"
- "traefik.http.routers.affine-secure.tls.certresolver=sslResolver"
- "traefik.http.services.affine-secure.loadbalancer.server.port=3000"
- "traefik.docker.network=proxy"
networks:
proxy:
external: true

4
ansible.cfg Normal file
View file

@ -0,0 +1,4 @@
[defaults]
inventory = work/inventory.yaml
retry_files_enabled = false
interpreter_python = auto_silent

3
dolibarr/.gitignore vendored
View file

@ -1,3 +0,0 @@
dolibarr/
dump/
mysql/

View file

@ -1,46 +0,0 @@
version: '3'
services:
mariadb:
image: mariadb:latest
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: dolibarr
volumes:
- ./mysql/data:/var/lib/mysql
- ./dump:/root/dump
networks:
- internal
dolibarr:
image: upshift/dolibarr:latest
restart: always
container_name: dolibarr
environment:
DOLI_DB_HOST: mariadb
DOLI_DB_USER: root
DOLI_DB_PASSWORD: root
DOLI_DB_NAME: dolibarr
DOLI_URL_ROOT: 'http://0.0.0.0'
DOLI_ADMIN_LOGIN: admin
DOLI_ADMIN_PASSWORD: admin
PHP_INI_date.timezone: 'Europe/Paris'
DOLI_INSTALL_AUTO: 1
volumes:
- ./dolibarr/documents:/var/www/documents
- ./dolibarr/html:/var/www/html
networks:
- proxy
- internal
labels:
- "traefik.enable=true"
- "traefik.http.routers.dolibarr-secure.entrypoints=https"
- "traefik.http.routers.dolibarr-secure.rule=Host(`dolibarr.mrdev023.fr`)"
- "traefik.http.routers.dolibarr-secure.tls=true"
- "traefik.http.routers.dolibarr-secure.tls.certresolver=sslResolver"
- "traefik.docker.network=proxy"
networks:
internal:
proxy:
external: true

View file

@ -1,87 +0,0 @@
#!/bin/sh
### BEGIN INIT INFO
# Provides: firewall rules
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start daemon at boot time
# Description: Enable service provided by daemon.
### END INIT INFO
#Suppression des règles précédentes
iptables -F
iptables -X
########
# DROP #
########
# Définition du blocage général
iptables -P INPUT DROP
iptables -P OUTPUT DROP
iptables -P FORWARD DROP
# Drop des scans XMAS et NULL
iptables -A INPUT -p tcp --tcp-flags FIN,URG,PSH FIN,URG,PSH -j DROP
iptables -A INPUT -p tcp --tcp-flags ALL ALL -j DROP
iptables -A INPUT -p tcp --tcp-flags ALL NONE -j DROP
iptables -A INPUT -p tcp --tcp-flags SYN,RST SYN,RST -j DROP
##########
# ACCEPT #
##########
# Conservations des connexions déjà établies
iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
# Autorisation du loopback (127.0.0.1)
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
# Autorisation des échanges avec le serveur DNS (53)
iptables -A OUTPUT -p udp -m udp --dport 53 -m conntrack --ctstate NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A INPUT -p udp -m udp --sport 53 -m conntrack --ctstate NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -p tcp -m tcp --dport 53 -m conntrack --ctstate NEW,RELATED,ESTABLISHED -j ACCEPT
iptables -A INPUT -p tcp -m tcp --sport 53 -m conntrack --ctstate NEW,RELATED,ESTABLISHED -j ACCEPT
# NTP (123)
iptables -A INPUT -p udp --sport 123 -j ACCEPT
iptables -A OUTPUT -p udp --dport 123 -j ACCEPT
# HTTP (80)
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 80 -j ACCEPT
# HTTP MATRIX FEDERATION (8448)
iptables -A INPUT -p tcp --dport 8448 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 8448 -j ACCEPT
# HTTPS (443)
iptables -A INPUT -p tcp --dport 443 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 443 -j ACCEPT
# SSH (7943)
iptables -A INPUT -p tcp --dport 7943 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 7943 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 22 -j ACCEPT # ACCEPT SSH OUTPUT LIKE GIT
iptables -A OUTPUT -p tcp --dport 2277 -j ACCEPT # ACCEPT SSH OUTPUT LIKE GITLAB UNOVA
# ICMP (Ping)
iptables -A INPUT -p icmp -j ACCEPT
iptables -A OUTPUT -p icmp -j ACCEPT
# Parer les attaques de type Déni de Service
iptables -A FORWARD -p tcp --syn -m limit --limit 1/second -j ACCEPT
iptables -A FORWARD -p udp -m limit --limit 1/second -j ACCEPT
iptables -A FORWARD -p icmp --icmp-type echo-request -m limit --limit 1/second -j ACCEPT
# Parer les scans de ports
iptables -A FORWARD -p tcp --tcp-flags SYN,ACK,FIN,RST RST -m limit --limit 1/s -j ACCEPT
# Allow all from private network and docker network
iptables -A INPUT -j ACCEPT -d 172.17.0.0/16
iptables -A OUTPUT -j ACCEPT -d 172.17.0.0/16
iptables -A INPUT -j ACCEPT -d 192.168.1.0/24
iptables -A OUTPUT -j ACCEPT -d 192.168.1.0/24

38
group_vars/all.yml Normal file
View file

@ -0,0 +1,38 @@
##
# Global configuration
server:
domain: mrdev023.test
ssh_port: 22
backup:
folder: /backup
cron_expression: "* * * * *"
retention_days: 7
filename_date_format: "%Y-%m-%d"
acme:
email: florian.richer.97@outlook.com
debug: true
# Other
protonmail:
initialized: false
restore:
cloud:
nextcloud_archive: false
db_archive: false
home_assistant:
home_assistant_config_archive: false
matrix:
db_archive: false
matrix_archive: false
mautrix_facebook_archive: false
mautrix_instagram_archive: false
mautrix_discord_archive: false
n8n:
n8n_archive: false
db_archive: false
protonmail:
protonmail_archive: false
traefik:
prometheus_archive: false
grafana_archive: false

View file

@ -1,26 +0,0 @@
version: '3'
services:
home_assistant:
image: homeassistant/home-assistant
restart: always
container_name: home_assistant
environment:
- TZ=Europe/Paris
volumes:
- home_assistant_config:/config
- /etc/localtime:/etc/localtime:ro
network_mode: host
expose:
- 8123
labels:
- "traefik.enable=true"
- "traefik.http.routers.homeassistant-secure.entrypoints=https"
- "traefik.http.routers.homeassistant-secure.rule=Host(`domo.mrdev023.fr`)"
- "traefik.http.routers.homeassistant-secure.tls=true"
- "traefik.http.routers.homeassistant-secure.tls.certresolver=sslResolver"
# - "traefik.http.routers.homeassistant-secure.service=homeassistant"
- "traefik.http.services.homeassistant.loadbalancer.server.port=8123"
volumes:
home_assistant_config:

View file

@ -1,6 +0,0 @@
#!/bin/bash
# Must be run as sudo
cp ./firewall /etc/init.d/firewall
chmod +x /etc/init.d/firewall
update-rc.d firewall defaults

View file

@ -1,66 +0,0 @@
version: '3'
services:
postgres:
image: postgres:latest
restart: unless-stopped
networks:
- internal
volumes:
- db:/var/lib/postgresql/data
environment:
- POSTGRES_DB=synapse
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=WRyu2kuArNjRxojstqpg7EfcoUP9zoka
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
synapse:
image: matrixdotorg/synapse:latest
restart: unless-stopped
ports:
- 8448:8448
networks:
- internal
- proxy
volumes:
- matrix:/data
labels:
- "traefik.enable=true"
- "traefik.http.routers.matrix-secure.entrypoints=https"
- "traefik.http.routers.matrix-secure.rule=Host(`matrix.mrdev023.fr`)"
- "traefik.http.routers.matrix-secure.tls=true"
- "traefik.http.routers.matrix-secure.tls.certresolver=sslResolver"
- "traefik.port=8008"
- "traefik.docker.network=proxy"
well-kwown:
image: nginx:latest
restart: unless-stopped
networks:
- internal
- proxy
volumes:
- ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf
- ./nginx/www:/var/www/
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
- "traefik.http.routers.matrix-wellknown.rule=Host(`matrix.mrdev023.fr`) && PathPrefix(`/.well-known/matrix`)"
- "traefik.http.routers.matrix-wellknown.tls=true"
- "traefik.http.routers.matrix-wellknown.tls.certresolver=sslResolver"
- "traefik.http.routers.matrix-wellknown.middlewares=cors-headers@docker"
- "traefik.docker.network=proxy"
volumes:
db:
web:
matrix:
networks:
internal:
proxy:
external: true

View file

@ -1,72 +0,0 @@
version: "3"
services:
outline:
image: outlinewiki/outline
env_file: ./docker.env
depends_on:
- postgres
- redis
- storage
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.outline-secure.entrypoints=https"
- "traefik.http.routers.outline-secure.rule=Host(`outline.mrdev023.fr`)"
- "traefik.http.routers.outline-secure.tls=true"
- "traefik.http.routers.outline-secure.tls.certresolver=sslResolver"
- "traefik.http.services.whoami.loadbalancer.server.port=3000"
- "traefik.docker.network=proxy"
redis:
image: redis
env_file: ./docker.env
volumes:
- ./redis.conf:/redis.conf
command: ["redis-server", "/redis.conf"]
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 30s
retries: 3
postgres:
image: postgres
env_file: ./docker.env
volumes:
- database-data:/var/lib/postgresql/data
healthcheck:
test: ["CMD", "pg_isready"]
interval: 30s
timeout: 20s
retries: 3
environment:
POSTGRES_USER: 'user'
POSTGRES_PASSWORD: 'pass'
POSTGRES_DB: 'outline'
storage:
image: minio/minio
env_file: ./docker.env
entrypoint: sh
command: -c 'minio server'
deploy:
restart_policy:
condition: on-failure
volumes:
- storage-data:/data
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
volumes:
https-portal-data:
storage-data:
database-data:
networks:
proxy:
external: true

View file

@ -1,96 +0,0 @@
## Should be set to the public domain where penpot is going to be served.
##
## NOTE: If you are going to serve it under different domain than
## 'localhost' without HTTPS, consider setting the
## `disable-secure-session-cookies' flag on the 'PENPOT_FLAGS'
## setting.
PENPOT_PUBLIC_URI=https://penpot.mrdev023.fr
## Feature flags.
PENPOT_FLAGS=disable-registration disable-demo-users enable-login disable-email-verification
## Temporal workaround because of bad builtin default
PENPOT_HTTP_SERVER_HOST=0.0.0.0
## Standard database connection parameters (only postgresql is supported):
PENPOT_DATABASE_URI=postgresql://penpot-postgres/penpot
PENPOT_DATABASE_USERNAME=penpot
PENPOT_DATABASE_PASSWORD=penpot
## Redis is used for the websockets notifications.
PENPOT_REDIS_URI=redis://penpot-redis/0
## By default, files uploaded by users are stored in local
## filesystem. But it can be configured to store in AWS S3.
PENPOT_ASSETS_STORAGE_BACKEND=assets-fs
PENPOT_STORAGE_ASSETS_FS_DIRECTORY=/opt/data/assets
## Telemetry. When enabled, a periodical process will send anonymous
## data about this instance. Telemetry data will enable us to learn on
## how the application is used, based on real scenarios. If you want
## to help us, please leave it enabled.
PENPOT_TELEMETRY_ENABLED=true
## Email sending configuration. By default, emails are printed in the
## console, but for production usage is recommended to setup a real
## SMTP provider. Emails are used to confirm user registrations.
PENPOT_SMTP_DEFAULT_FROM=no-reply@example.com
PENPOT_SMTP_DEFAULT_REPLY_TO=no-reply@example.com
# PENPOT_SMTP_HOST=
# PENPOT_SMTP_PORT=
# PENPOT_SMTP_USERNAME=
# PENPOT_SMTP_PASSWORD=
# PENPOT_SMTP_TLS=true
# PENPOT_SMTP_SSL=false
## Comma separated list of allowed domains to register. Empty to allow
## all.
# PENPOT_REGISTRATION_DOMAIN_WHITELIST=""
## Authentication providers
## Google
# PENPOT_GOOGLE_CLIENT_ID=
# PENPOT_GOOGLE_CLIENT_SECRET=
## GitHub
# PENPOT_GITHUB_CLIENT_ID=
# PENPOT_GITHUB_CLIENT_SECRET=
## GitLab
# PENPOT_GITLAB_BASE_URI=https://gitlab.com
# PENPOT_GITLAB_CLIENT_ID=
# PENPOT_GITLAB_CLIENT_SECRET=
## OpenID Connect (since 1.5.0)
# PENPOT_OIDC_BASE_URI=
# PENPOT_OIDC_CLIENT_ID=
# PENPOT_OIDC_CLIENT_SECRET=
## LDAP
##
## NOTE: to enable ldap, you will need to put 'enable-login-with-ldap'
## on the 'PENPOT_FLAGS' environment variable.
# PENPOT_LDAP_HOST=ldap
# PENPOT_LDAP_PORT=10389
# PENPOT_LDAP_SSL=false
# PENPOT_LDAP_STARTTLS=false
# PENPOT_LDAP_BASE_DN=ou=people,dc=planetexpress,dc=com
# PENPOT_LDAP_BIND_DN=cn=admin,dc=planetexpress,dc=com
# PENPOT_LDAP_BIND_PASSWORD=GoodNewsEveryone
# PENPOT_LDAP_ATTRS_USERNAME=uid
# PENPOT_LDAP_ATTRS_EMAIL=mail
# PENPOT_LDAP_ATTRS_FULLNAME=cn

View file

@ -1,78 +0,0 @@
version: '3.5'
services:
penpot-frontend:
image: 'penpotapp/frontend:latest'
restart: always
volumes:
- penpot_assets_data:/opt/data
env_file:
- config.env
depends_on:
- penpot-backend
- penpot-exporter
labels:
- "traefik.enable=true"
- "traefik.http.routers.penpot-secure.entrypoints=https"
- "traefik.http.routers.penpot-secure.rule=Host(`penpot.mrdev023.fr`)"
- "traefik.http.routers.penpot-secure.tls=true"
- "traefik.http.routers.penpot-secure.tls.certresolver=sslResolver"
# - "traefik.http.routers.whoami-secure.service=whoami"
# - "traefik.http.services.whoami.loadbalancer.server.port=9002"
- "traefik.docker.network=proxy"
networks:
- internal
- proxy
penpot-backend:
image: 'penpotapp/backend:latest'
restart: always
volumes:
- penpot_assets_data:/opt/data
depends_on:
- penpot-postgres
- penpot-redis
env_file:
- config.env
networks:
- internal
penpot-exporter:
image: 'penpotapp/exporter:latest'
restart: always
env_file:
- config.env
environment:
# Don't touch it; this uses internal docker network to
# communicate with the frontend.
- PENPOT_PUBLIC_URI=http://penpot-frontend
networks:
- internal
penpot-postgres:
image: 'postgres:14'
restart: always
environment:
- POSTGRES_INITDB_ARGS=--data-checksums
- POSTGRES_DB=penpot
- POSTGRES_USER=penpot
- POSTGRES_PASSWORD=penpot
volumes:
- penpot_postgres_data:/var/lib/postgresql/data
networks:
- internal
penpot-redis:
image: redis:7
restart: always
networks:
- internal
volumes:
penpot_postgres_data:
penpot_assets_data:
networks:
internal:
proxy:
external: true

View file

@ -1,31 +0,0 @@
version: "3"
services:
pihole:
container_name: pihole
image: pihole/pihole:latest
networks:
- proxy
ports:
- "53:53/tcp"
- "53:53/udp"
environment:
TZ: 'Europe/Paris'
DNSMASQ_LISTENING: 'all'
WEBPASSWORD: 'ad89wahdw9d'
volumes:
- './etc-pihole:/etc/pihole'
- './etc-dnsmasq.d:/etc/dnsmasq.d'
labels:
- "traefik.enable=true"
- "traefik.http.routers.pihole-secure.entrypoints=https"
- "traefik.http.routers.pihole-secure.rule=Host(`pihole.mrdev023.fr`)"
- "traefik.http.routers.pihole-secure.tls=true"
- "traefik.http.routers.pihole-secure.tls.certresolver=http"
- "traefik.http.services.pihole.loadbalancer.server.port=80"
- "traefik.docker.network=proxy"
restart: always
networks:
proxy:
external: true

View file

@ -1,37 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Dnsmasq config for Pi-hole's FTLDNS
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
###############################################################################
# FILE AUTOMATICALLY POPULATED BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
# #
# IF YOU WISH TO CHANGE THE UPSTREAM SERVERS, CHANGE THEM IN: #
# /etc/pihole/setupVars.conf #
# #
# ANY OTHER CHANGES SHOULD BE MADE IN A SEPARATE CONFIG FILE #
# WITHIN /etc/dnsmasq.d/yourname.conf #
###############################################################################
addn-hosts=/etc/pihole/local.list
addn-hosts=/etc/pihole/custom.list
localise-queries
no-resolv
log-queries
log-facility=/var/log/pihole/pihole.log
log-async
cache-size=10000
server=8.8.8.8
server=8.8.4.4
except-interface=nonexisting

View file

@ -1,42 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2021 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# RFC 6761 config file for Pi-hole
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
###############################################################################
# FILE AUTOMATICALLY POPULATED BY PI-HOLE INSTALL/UPDATE PROCEDURE. #
# ANY CHANGES MADE TO THIS FILE AFTER INSTALL WILL BE LOST ON THE NEXT UPDATE #
# #
# CHANGES SHOULD BE MADE IN A SEPARATE CONFIG FILE #
# WITHIN /etc/dnsmasq.d/yourname.conf #
###############################################################################
# RFC 6761: Caching DNS servers SHOULD recognize
# test, localhost, invalid
# names as special and SHOULD NOT attempt to look up NS records for them, or
# otherwise query authoritative DNS servers in an attempt to resolve these
# names.
server=/test/
server=/localhost/
server=/invalid/
# The same RFC requests something similar for
# 10.in-addr.arpa. 21.172.in-addr.arpa. 27.172.in-addr.arpa.
# 16.172.in-addr.arpa. 22.172.in-addr.arpa. 28.172.in-addr.arpa.
# 17.172.in-addr.arpa. 23.172.in-addr.arpa. 29.172.in-addr.arpa.
# 18.172.in-addr.arpa. 24.172.in-addr.arpa. 30.172.in-addr.arpa.
# 19.172.in-addr.arpa. 25.172.in-addr.arpa. 31.172.in-addr.arpa.
# 20.172.in-addr.arpa. 26.172.in-addr.arpa. 168.192.in-addr.arpa.
# Pi-hole implements this via the dnsmasq option "bogus-priv" (see
# 01-pihole.conf) because this also covers IPv6.
# OpenWRT furthermore blocks bind, local, onion domains
# see https://git.openwrt.org/?p=openwrt/openwrt.git;a=blob_plain;f=package/network/services/dnsmasq/files/rfc6761.conf;hb=HEAD
# and https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.xhtml
# We do not include the ".local" rule ourselves, see https://github.com/pi-hole/pi-hole/pull/4282#discussion_r689112972
server=/bind/
server=/onion/

View file

@ -1 +0,0 @@
https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts

View file

@ -1,9 +0,0 @@
Google (ECS, DNSSEC);8.8.8.8;8.8.4.4;2001:4860:4860:0:0:0:0:8888;2001:4860:4860:0:0:0:0:8844
OpenDNS (ECS, DNSSEC);208.67.222.222;208.67.220.220;2620:119:35::35;2620:119:53::53
Level3;4.2.2.1;4.2.2.2;;
Comodo;8.26.56.26;8.20.247.20;;
DNS.WATCH (DNSSEC);84.200.69.80;84.200.70.40;2001:1608:10:25:0:0:1c04:b12f;2001:1608:10:25:0:0:9249:d69b
Quad9 (filtered, DNSSEC);9.9.9.9;149.112.112.112;2620:fe::fe;2620:fe::9
Quad9 (unfiltered, no DNSSEC);9.9.9.10;149.112.112.10;2620:fe::10;2620:fe::fe:10
Quad9 (filtered, ECS, DNSSEC);9.9.9.11;149.112.112.11;2620:fe::11;2620:fe::fe:11
Cloudflare (DNSSEC);1.1.1.1;1.0.0.1;2606:4700:4700::1111;2606:4700:4700::1001

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load diff

View file

@ -1 +0,0 @@
ff54f3730e1efafd13cb49a132e3c41f4b2f7437 /etc/pihole/list.1.raw.githubusercontent.com.domains

File diff suppressed because it is too large Load diff

View file

@ -1 +0,0 @@
1caf2c98a1a2a147544eb5e023679eb0beed8f2a /etc/pihole/list.2.raw.githubusercontent.com.domains

View file

@ -1 +0,0 @@
### Do not modify this file, it will be overwritten by pihole -g

View file

@ -1,21 +0,0 @@
/var/log/pihole/pihole.log {
su root root
daily
copytruncate
rotate 5
compress
delaycompress
notifempty
nomail
}
/var/log/pihole/FTL.log {
su root root
weekly
copytruncate
rotate 3
compress
delaycompress
notifempty
nomail
}

View file

@ -1 +0,0 @@
/macvendor.db

View file

@ -1 +0,0 @@
https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts

View file

@ -1,3 +0,0 @@
#; Pi-hole FTL config file
#; Comments should start with #; to avoid issues with PHP and bash reading this file
LOCAL_IPV4=0.0.0.0

Binary file not shown.

View file

@ -1,8 +0,0 @@
INSTALL_WEB_INTERFACE=true
WEBPASSWORD=3bbfdf71e1f0cb5be7f470af136acbd9c44821f1bf44a1c55b38b7f68d85ffe7
PIHOLE_DNS_1=8.8.8.8
PIHOLE_DNS_2=8.8.4.4
PIHOLE_INTERFACE=eth0
QUERY_LOGGING=true
BLOCKING_ENABLED=true
DNSMASQ_LISTENING=all

View file

@ -1,8 +0,0 @@
INSTALL_WEB_INTERFACE=true
WEBPASSWORD=3bbfdf71e1f0cb5be7f470af136acbd9c44821f1bf44a1c55b38b7f68d85ffe7
PIHOLE_DNS_1=8.8.8.8
PIHOLE_DNS_2=8.8.4.4
PIHOLE_INTERFACE=eth0
QUERY_LOGGING=true
BLOCKING_ENABLED=true
DNSMASQ_LISTENING=all

View file

@ -1,11 +0,0 @@
CORE_BRANCH=master
WEB_BRANCH=master
FTL_BRANCH=master
CORE_VERSION=v5.13-0-g17779ba
WEB_VERSION=v5.16-0-gc2afe42
FTL_VERSION=v5.18.2
DOCKER_VERSION=2022.10
GITHUB_CORE_VERSION=v5.14.2
GITHUB_WEB_VERSION=v5.18
GITHUB_FTL_VERSION=v5.20
GITHUB_DOCKER_VERSION=2022.12.1

17
playbook.yml Normal file
View file

@ -0,0 +1,17 @@
---
- name: Install servers
hosts: all
roles:
- { role: docker, tags: ["docker"] }
- { role: volume_restore, tags: ["volume_restore"] }
- { role: ssh, tags: ["ssh"] }
- { role: traefik, tags: ["traefik"] }
- { role: whoami, tags: ["whoami"] }
- { role: protonmail, tags: ["protonmail"] }
- { role: cloud, tags: ["cloud"] }
- { role: home_assistant, tags: ["home_assistant"] }
- { role: n8n, tags: ["n8n"] }
- { role: matrix, tags: ["matrix"] }
- { role: iptables, tags: ["iptables"] }
- { role: clean, tags: ["clean"] }

21
prepare_test_env.sh Executable file
View file

@ -0,0 +1,21 @@
#!/bin/sh
#echo "DOWNLOAD FEDORA IMAGE"
#mkdir -p work
#wget --show-progress -nc -c -O work/image.qcow2 https://download.fedoraproject.org/pub/fedora/linux/releases/38/Server/x86_64/images/Fedora-Server-KVM-38-1.6.x86_64.qcow2
#echo "PREPARE IMG"
#qemu-img resize work/image.qcow2 30G
echo "COPY BASE IMG"
cp work/base.qcow2 work/image.qcow2
# sudo systemctl enable --now libvirtd
# sudo virsh --connect qemu:///session net-start default
virt-install --connect qemu:///session --name FedoraServer \
--memory 8192 --cpu host --vcpus 4 --graphics none \
--os-variant fedora-unknown \
--import \
--disk work/image.qcow2,format=qcow2,bus=virtio \
--network bridge=virbr0,model=virtio

View file

@ -1,18 +0,0 @@
version: '3'
services:
protonmail-bridge:
image: shenxn/protonmail-bridge
restart: always
container_name: protonmail-bridge
networks:
- protonmail
volumes:
- protonmail:/root
volumes:
protonmail:
networks:
protonmail:
external: true

View file

@ -1,29 +0,0 @@
version: '3'
services:
rhasspy:
image: rhasspy/rhasspy
restart: always
container_name: rhasspy
volumes:
- rhasspy_profiles:/profiles
- /etc/localtime:/etc/localtime:ro
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.rhasspy-secure.entrypoints=https"
- "traefik.http.routers.rhasspy-secure.rule=Host(`rhasspy.mrdev023.fr`)"
- "traefik.http.routers.rhasspy-secure.tls=true"
- "traefik.http.routers.rhasspy-secure.tls.certresolver=sslResolver"
# - "traefik.http.routers.rhasspy-secure.service=rhasspy"
- "traefik.http.services.rhasspy.loadbalancer.server.port=12101"
- "traefik.docker.network=proxy"
command: --user-profiles /profiles --profile fr
volumes:
rhasspy_profiles:
networks:
proxy:
external: true

View file

@ -0,0 +1,10 @@
---
- name: Prune everything
community.docker.docker_prune:
containers: true
images: true
networks: true
volumes: true
builder_cache: true
become: yes

View file

@ -0,0 +1,4 @@
---
- ansible.builtin.import_tasks: docker.yml
name: docker

View file

@ -0,0 +1,37 @@
---
- name: Check cloud directory exist
ansible.builtin.file:
path: cloud
state: directory
- name: Copy cloud conf
ansible.builtin.copy:
backup: true
src: .
dest: cloud/
register: cloud_copy_files_results
- name: Ensure cron.sh as +x permission
ansible.builtin.file:
path: cloud/cron.sh
mode: u=rwx,g=rx,o=rx
modification_time: preserve
access_time: preserve
- name: Copy template conf
ansible.builtin.template:
backup: true
src: "{{ item.src }}"
dest: "cloud/{{ item.dest }}"
loop:
- { src: 'docker-compose.yml.j2', dest: 'docker-compose.yml' }
register: cloud_copy_templates_results
- name: Update and restart container
community.docker.docker_compose:
project_src: cloud
state: present
pull: true
restarted: "{{ cloud_copy_files_results.changed or cloud_copy_templates_results.changed }}"
become: true

View file

@ -0,0 +1,8 @@
---
- name: Ensure a job that run all 5 minutes for nextcloud cron
ansible.builtin.cron:
name: "check dirs"
minute: "*/5"
job: "cd {{ ansible_env.HOME }}/cloud && ./cron.sh"
become: yes

View file

@ -0,0 +1,11 @@
---
- ansible.builtin.import_tasks: base.yml
name: base
- ansible.builtin.import_tasks: cron.yml
name: cron
- ansible.builtin.import_tasks: restore.yml
name: restore
when: restore.cloud.nextcloud_archive and restore.cloud.db_archive

View file

@ -0,0 +1,43 @@
---
- name: Copy archives
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
loop:
- { src: '{{ restore.cloud.nextcloud_archive }}', dest: 'cloud_nextcloud.tar.gz' }
- { src: '{{ restore.cloud.db_archive }}', dest: 'cloud_db.tar.gz' }
- name: Stop nextcloud container
community.docker.docker_compose:
project_src: cloud
state: absent
become: true
- name: Restore backup
community.docker.docker_container:
name: volume_restore
image: volume_restore:latest
auto_remove: yes
tty: true
volumes:
- "{{ item.path }}:/backup/archive.tar.gz"
- "{{ item.volume }}:/backup/dest"
loop:
- { path: './cloud_nextcloud.tar.gz', volume: 'cloud_nextcloud' }
- { path: './cloud_db.tar.gz', volume: 'cloud_db' }
become: true
- name: Start nextcloud container
community.docker.docker_compose:
project_src: cloud
state: present
become: true
- name: Remove archive
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop:
- { path: 'cloud_nextcloud.tar.gz' }
- { path: 'cloud_db.tar.gz' }

View file

@ -1,70 +1,97 @@
version: '3'
services:
db:
image: postgres:14
restart: always
container_name: nextcloud_db
networks:
- internal
volumes:
- db:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=nextcloud
- POSTGRES_DB=nextcloud
- POSTGRES_USER=nextcloud
nextcloud:
image: nextcloud
restart: always
container_name: nextcloud
networks:
- proxy
- protonmail
- internal
depends_on:
- db
volumes:
- nextcloud:/var/www/html
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.nextcloud-compress.compress=true"
- "traefik.http.middlewares.nextcloud-regex-redirect.redirectregex.permanent=true"
- "traefik.http.middlewares.nextcloud-regex-redirect.redirectregex.regex=https://(.*)/.well-known/(card|cal)dav"
- "traefik.http.middlewares.nextcloud-regex-redirect.redirectregex.replacement=https://$$1/remote.php/dav/"
- "traefik.http.middlewares.nextcloud-headers.headers.frameDeny=true"
- "traefik.http.middlewares.nextcloud-headers.headers.sslRedirect=true"
- "traefik.http.middlewares.nextcloud-headers.headers.contentTypeNosniff=true"
- "traefik.http.middlewares.nextcloud-headers.headers.stsIncludeSubdomains=true"
- "traefik.http.middlewares.nextcloud-headers.headers.stsPreload=true"
- "traefik.http.middlewares.nextcloud-headers.headers.stsSeconds=31536000"
- "traefik.http.middlewares.nextcloud-headers.headers.referrerPolicy=same-origin"
- "traefik.http.middlewares.nextcloud-headers.headers.browserXssFilter=true"
- "traefik.http.middlewares.nextcloud-headers.headers.customRequestHeaders.X-Forwarded-Proto=https"
- "traefik.http.middlewares.nextcloud-headers.headers.customRequestHeaders.X-Robots-Tag=none"
- "traefik.http.middlewares.nextcloud-headers.headers.customFrameOptionsValue=SAMEORIGIN"
- "traefik.http.routers.nextcloud-secure.entrypoints=https"
- "traefik.http.routers.nextcloud-secure.rule=Host(`mycld.mrdev023.fr`)"
- "traefik.http.routers.nextcloud-secure.tls=true"
- "traefik.http.routers.nextcloud-secure.tls.certresolver=sslResolver"
- "traefik.http.routers.nextcloud-secure.middlewares=nextcloud-compress,nextcloud-regex-redirect,nextcloud-headers"
# - "traefik.http.routers.nextcloud-secure.service=nextcloud"
# - "traefik.http.services.nextcloud.loadbalancer.server.port=9002"
- "traefik.docker.network=proxy"
environment:
- POSTGRES_PASSWORD=nextcloud
- POSTGRES_DATABASE=nextcloud
- POSTGRES_USER=nextcloud
- POSTGRES_HOST=db
- OVERWRITEPROTOCOL=https
volumes:
nextcloud:
db:
networks:
internal:
proxy:
external: true
protonmail:
external: true
version: '3'
services:
db:
image: postgres:14
restart: always
container_name: nextcloud_db
networks:
- internal
volumes:
- db:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=nextcloud
- POSTGRES_DB=nextcloud
- POSTGRES_USER=nextcloud
nextcloud:
image: nextcloud
restart: always
container_name: nextcloud
networks:
- proxy
- protonmail
- internal
depends_on:
- db
volumes:
- nextcloud:/var/www/html
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.nextcloud-compress.compress=true"
- "traefik.http.middlewares.nextcloud-regex-redirect.redirectregex.permanent=true"
- "traefik.http.middlewares.nextcloud-regex-redirect.redirectregex.regex=https://(.*)/.well-known/(card|cal)dav"
- "traefik.http.middlewares.nextcloud-regex-redirect.redirectregex.replacement=https://$$1/remote.php/dav/"
- "traefik.http.middlewares.nextcloud-headers.headers.frameDeny=true"
- "traefik.http.middlewares.nextcloud-headers.headers.sslRedirect=true"
- "traefik.http.middlewares.nextcloud-headers.headers.contentTypeNosniff=true"
- "traefik.http.middlewares.nextcloud-headers.headers.stsIncludeSubdomains=true"
- "traefik.http.middlewares.nextcloud-headers.headers.stsPreload=true"
- "traefik.http.middlewares.nextcloud-headers.headers.stsSeconds=31536000"
- "traefik.http.middlewares.nextcloud-headers.headers.referrerPolicy=same-origin"
- "traefik.http.middlewares.nextcloud-headers.headers.browserXssFilter=true"
- "traefik.http.middlewares.nextcloud-headers.headers.customRequestHeaders.X-Forwarded-Proto=https"
- "traefik.http.middlewares.nextcloud-headers.headers.customRequestHeaders.X-Robots-Tag=none"
- "traefik.http.middlewares.nextcloud-headers.headers.customFrameOptionsValue=SAMEORIGIN"
- "traefik.http.routers.nextcloud-secure.entrypoints=https"
- "traefik.http.routers.nextcloud-secure.rule=Host(`mycld.{{ server.domain }}`)"
- "traefik.http.routers.nextcloud-secure.tls=true"
- "traefik.http.routers.nextcloud-secure.tls.certresolver=sslResolver"
- "traefik.http.routers.nextcloud-secure.middlewares=nextcloud-compress,nextcloud-regex-redirect,nextcloud-headers"
# - "traefik.http.routers.nextcloud-secure.service=nextcloud"
# - "traefik.http.services.nextcloud.loadbalancer.server.port=9002"
- "traefik.docker.network=proxy"
environment:
- POSTGRES_PASSWORD=nextcloud
- POSTGRES_DATABASE=nextcloud
- POSTGRES_USER=nextcloud
- POSTGRES_HOST=db
- OVERWRITEPROTOCOL=https
# BACKUP
backup_nextcloud:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-nextcloud.tar.gz"
BACKUP_LATEST_SYMLINK: nextcloud.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- nextcloud:/backup:ro
- {{ server.backup.folder }}/cloud:/archive
backup_db:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-db.tar.gz"
BACKUP_LATEST_SYMLINK: db.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- db:/backup:ro
- {{ server.backup.folder }}/cloud:/archive
volumes:
nextcloud:
db:
networks:
internal:
proxy:
external: true
protonmail:
external: true

View file

@ -0,0 +1,37 @@
---
- name: set mydistribution
ansible.builtin.set_fact:
mydistribution: "{{ 'rhel' if (ansible_distribution == 'Red Hat Enterprise Linux') else (ansible_distribution | lower) }}"
- name: Add signing key
ansible.builtin.rpm_key:
key: "https://download.docker.com/linux/{{ mydistribution }}/gpg"
state: present
- name: Add repository into repo.d list
ansible.builtin.yum_repository:
name: docker
description: docker repository
baseurl: "https://download.docker.com/linux/{{ mydistribution }}/$releasever/$basearch/stable"
enabled: true
gpgcheck: true
gpgkey: "https://download.docker.com/linux/{{ mydistribution }}/gpg"
- name: Install Docker
ansible.builtin.yum:
name:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- docker-compose
- python-docker
state: latest
update_cache: true
- name: Start Docker
ansible.builtin.service:
name: "docker"
enabled: true
state: started

View file

@ -0,0 +1,4 @@
---
- ansible.builtin.import_tasks: install.yml
name: base
become: true

View file

@ -0,0 +1,23 @@
---
- name: Check home_assistant directory exist
ansible.builtin.file:
path: home_assistant
state: directory
- name: Copy template conf
ansible.builtin.template:
backup: true
src: "{{ item.src }}"
dest: "home_assistant/{{ item.dest }}"
loop:
- { src: 'docker-compose.yml.j2', dest: 'docker-compose.yml' }
register: home_assistant_copy_templates_results
- name: Update and restart container
community.docker.docker_compose:
project_src: home_assistant
state: present
pull: true
restarted: "{{ home_assistant_copy_templates_results.changed }}"
become: true

View file

@ -0,0 +1,8 @@
---
- ansible.builtin.import_tasks: base.yml
name: base
- ansible.builtin.import_tasks: restore.yml
name: restore
when: restore.home_assistant.home_assistant_config_archive

View file

@ -0,0 +1,40 @@
---
- name: Copy archives
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
loop:
- { src: '{{ restore.home_assistant.home_assistant_config_archive }}', dest: 'home_assistant_home_assistant_config.tar.gz' }
- name: Stop nextcloud container
community.docker.docker_compose:
project_src: home_assistant
state: absent
become: true
- name: Restore backup
community.docker.docker_container:
name: volume_restore
image: volume_restore:latest
auto_remove: yes
tty: true
volumes:
- "{{ item.path }}:/backup/archive.tar.gz"
- "{{ item.volume }}:/backup/dest"
loop:
- { path: './home_assistant_home_assistant_config.tar.gz', volume: 'home_assistant_home_assistant_config' }
become: true
- name: Start nextcloud container
community.docker.docker_compose:
project_src: home_assistant
state: present
become: true
- name: Remove archive
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop:
- { path: 'home_assistant_home_assistant_config.tar.gz' }

View file

@ -0,0 +1,40 @@
version: '3'
services:
home_assistant:
image: homeassistant/home-assistant
restart: always
container_name: home_assistant
environment:
- TZ=Europe/Paris
volumes:
- home_assistant_config:/config
- /etc/localtime:/etc/localtime:ro
network_mode: host
expose:
- 8123
labels:
- "traefik.enable=true"
- "traefik.http.routers.homeassistant-secure.entrypoints=https"
- "traefik.http.routers.homeassistant-secure.rule=Host(`domo.{{ server.domain }}`)"
- "traefik.http.routers.homeassistant-secure.tls=true"
- "traefik.http.routers.homeassistant-secure.tls.certresolver=sslResolver"
# - "traefik.http.routers.homeassistant-secure.service=homeassistant"
- "traefik.http.services.homeassistant.loadbalancer.server.port=8123"
# BACKUP
backup_home_assistant_config:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-home-assistant-config.tar.gz"
BACKUP_LATEST_SYMLINK: home-assistant-config.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- home_assistant_config:/backup:ro
- {{ server.backup.folder }}/home_assistant:/archive
volumes:
home_assistant_config:

View file

@ -0,0 +1,19 @@
---
- name: Copy conf
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
group: root
mode: u=rwx,g=rx,o=rx
loop:
- { src: 'firewall.j2', dest: '/usr/bin/firewall' }
register: iptables_templates_results
become: yes
- name: Ensure firewall is load in startup
ansible.builtin.cron:
name: "Firewall"
special_time: "reboot"
job: "/usr/bin/firewall"
become: yes

View file

@ -0,0 +1,4 @@
---
- ansible.builtin.import_tasks: base.yml
name: base

View file

@ -0,0 +1,85 @@
#!/bin/sh
### BEGIN INIT INFO
# Provides: firewall rules
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start daemon at boot time
# Description: Enable service provided by daemon.
### END INIT INFO
#Suppression des règles précédentes
/sbin/iptables -F
/sbin/iptables -X
########
# DROP #
########
# Définition du blocage général
/sbin/iptables -P INPUT DROP
/sbin/iptables -P OUTPUT DROP
/sbin/iptables -P FORWARD DROP
# Drop des scans XMAS et NULL
/sbin/iptables -A INPUT -p tcp --tcp-flags FIN,URG,PSH FIN,URG,PSH -j DROP
/sbin/iptables -A INPUT -p tcp --tcp-flags ALL ALL -j DROP
/sbin/iptables -A INPUT -p tcp --tcp-flags ALL NONE -j DROP
/sbin/iptables -A INPUT -p tcp --tcp-flags SYN,RST SYN,RST -j DROP
##########
# ACCEPT #
##########
# Conservations des connexions déjà établies
/sbin/iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
/sbin/iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
# Autorisation du loopback (127.0.0.1)
/sbin/iptables -A INPUT -i lo -j ACCEPT
/sbin/iptables -A OUTPUT -o lo -j ACCEPT
# Autorisation des échanges avec le serveur DNS (53)
/sbin/iptables -A OUTPUT -p udp -m udp --dport 53 -m conntrack --ctstate NEW,RELATED,ESTABLISHED -j ACCEPT
/sbin/iptables -A INPUT -p udp -m udp --sport 53 -m conntrack --ctstate NEW,RELATED,ESTABLISHED -j ACCEPT
/sbin/iptables -A OUTPUT -p tcp -m tcp --dport 53 -m conntrack --ctstate NEW,RELATED,ESTABLISHED -j ACCEPT
/sbin/iptables -A INPUT -p tcp -m tcp --sport 53 -m conntrack --ctstate NEW,RELATED,ESTABLISHED -j ACCEPT
# NTP (123)
/sbin/iptables -A INPUT -p udp --sport 123 -j ACCEPT
/sbin/iptables -A OUTPUT -p udp --dport 123 -j ACCEPT
# HTTP (80)
/sbin/iptables -A INPUT -p tcp --dport 80 -j ACCEPT
/sbin/iptables -A OUTPUT -p tcp --dport 80 -j ACCEPT
# HTTP MATRIX FEDERATION (8448)
/sbin/iptables -A INPUT -p tcp --dport 8448 -j ACCEPT
/sbin/iptables -A OUTPUT -p tcp --dport 8448 -j ACCEPT
# HTTPS (443)
/sbin/iptables -A INPUT -p tcp --dport 443 -j ACCEPT
/sbin/iptables -A OUTPUT -p tcp --dport 443 -j ACCEPT
# SSH
/sbin/iptables -A INPUT -p tcp --dport {{ server.ssh_port }} -j ACCEPT
/sbin/iptables -A OUTPUT -p tcp --dport 22 -j ACCEPT # ACCEPT SSH OUTPUT LIKE GIT
# ICMP (Ping)
/sbin/iptables -A INPUT -p icmp -j ACCEPT
/sbin/iptables -A OUTPUT -p icmp -j ACCEPT
# Parer les attaques de type Déni de Service
/sbin/iptables -A FORWARD -p tcp --syn -m limit --limit 1/second -j ACCEPT
/sbin/iptables -A FORWARD -p udp -m limit --limit 1/second -j ACCEPT
/sbin/iptables -A FORWARD -p icmp --icmp-type echo-request -m limit --limit 1/second -j ACCEPT
# Parer les scans de ports
/sbin/iptables -A FORWARD -p tcp --tcp-flags SYN,ACK,FIN,RST RST -m limit --limit 1/s -j ACCEPT
# Allow all from private network and docker network
/sbin/iptables -A INPUT -j ACCEPT -d 172.17.0.0/16
/sbin/iptables -A OUTPUT -j ACCEPT -d 172.17.0.0/16
/sbin/iptables -A INPUT -j ACCEPT -d 192.168.1.0/24
/sbin/iptables -A OUTPUT -j ACCEPT -d 192.168.1.0/24

View file

@ -0,0 +1,30 @@
---
- name: Check matrix directory exist
ansible.builtin.file:
path: matrix
state: directory
- name: Copy matrix conf
ansible.builtin.copy:
backup: true
src: .
dest: matrix/
register: matrix_copy_files_results
- name: Copy template conf
ansible.builtin.template:
backup: true
src: "{{ item.src }}"
dest: "matrix/{{ item.dest }}"
loop:
- { src: 'docker-compose.yml.j2', dest: 'docker-compose.yml' }
register: matrix_copy_templates_results
- name: Update and restart container
community.docker.docker_compose:
project_src: matrix
state: present
pull: true
restarted: "{{ matrix_copy_files_results.changed or matrix_copy_templates_results.changed }}"
become: true

View file

@ -0,0 +1,8 @@
---
- ansible.builtin.import_tasks: base.yml
name: base
- ansible.builtin.import_tasks: restore.yml
name: restore
when: restore.matrix.db_archive and restore.matrix.matrix_archive and restore.matrix.mautrix_facebook_archive and restore.matrix.mautrix_instagram_archive and restore.matrix.mautrix_discord_archive

View file

@ -0,0 +1,52 @@
---
- name: Copy archives
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
loop:
- { src: '{{ restore.matrix.db_archive }}', dest: 'matrix_db.tar.gz' }
- { src: '{{ restore.matrix.matrix_archive }}', dest: 'matrix_matrix.tar.gz' }
- { src: '{{ restore.matrix.mautrix_facebook_archive }}', dest: 'matrix_mautrix_facebook.tar.gz' }
- { src: '{{ restore.matrix.mautrix_instagram_archive }}', dest: 'matrix_mautrix_instagram.tar.gz' }
- { src: '{{ restore.matrix.mautrix_discord_archive }}', dest: 'matrix_mautrix_discord.tar.gz' }
- name: Stop nextcloud container
community.docker.docker_compose:
project_src: matrix
state: absent
become: true
- name: Restore backup
community.docker.docker_container:
name: volume_restore
image: volume_restore:latest
auto_remove: yes
tty: true
volumes:
- "{{ item.path }}:/backup/archive.tar.gz"
- "{{ item.volume }}:/backup/dest"
loop:
- { path: './matrix_db.tar.gz', volume: 'matrix_db' }
- { path: './matrix_matrix.tar.gz', volume: 'matrix_matrix' }
- { path: './matrix_mautrix_facebook.tar.gz', volume: 'matrix_mautrix_facebook' }
- { path: './matrix_mautrix_instagram.tar.gz', volume: 'matrix_mautrix_instagram' }
- { path: './matrix_mautrix_discord.tar.gz', volume: 'matrix_mautrix_discord' }
become: true
- name: Start nextcloud container
community.docker.docker_compose:
project_src: matrix
state: present
become: true
- name: Remove archive
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop:
- { path: 'matrix_db.tar.gz' }
- { path: 'matrix_matrix.tar.gz' }
- { path: 'matrix_mautrix_facebook.tar.gz' }
- { path: 'matrix_mautrix_instagram.tar.gz' }
- { path: 'matrix_mautrix_discord.tar.gz' }

View file

@ -0,0 +1,164 @@
version: '3'
services:
postgres:
image: postgres:latest
restart: unless-stopped
networks:
- internal
volumes:
- db:/var/lib/postgresql/data
environment:
- POSTGRES_DB=synapse
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=WRyu2kuArNjRxojstqpg7EfcoUP9zoka
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
synapse:
image: matrixdotorg/synapse:latest
restart: unless-stopped
ports:
- 8448:8448
networks:
- internal
- proxy
volumes:
- matrix:/data
- mautrix_facebook:/facebook_data
- mautrix_instagram:/instagram_data
- mautrix_discord:/discord_data
labels:
- "traefik.enable=true"
- "traefik.http.routers.matrix-secure.entrypoints=https"
- "traefik.http.routers.matrix-secure.rule=Host(`matrix.{{ server.domain }}`)"
- "traefik.http.routers.matrix-secure.tls=true"
- "traefik.http.routers.matrix-secure.tls.certresolver=sslResolver"
- "traefik.port=8008"
- "traefik.docker.network=proxy"
well-kwown:
image: nginx:latest
restart: unless-stopped
networks:
- internal
- proxy
volumes:
- ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf
- ./nginx/www:/var/www/
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
- "traefik.http.routers.matrix-wellknown.rule=Host(`matrix.{{ server.domain }}`) && PathPrefix(`/.well-known/matrix`)"
- "traefik.http.routers.matrix-wellknown.tls=true"
- "traefik.http.routers.matrix-wellknown.tls.certresolver=sslResolver"
- "traefik.http.routers.matrix-wellknown.middlewares=cors-headers@docker"
- "traefik.docker.network=proxy"
# BRIDGE
mautrix-facebook:
container_name: mautrix-facebook
image: dock.mau.dev/mautrix/facebook:v0.4.1
restart: unless-stopped
networks:
- internal
volumes:
- mautrix_facebook:/data
mautrix-instagram:
container_name: mautrix-instagram
image: dock.mau.dev/mautrix/instagram:v0.2.3
restart: unless-stopped
networks:
- internal
volumes:
- mautrix_instagram:/data
mautrix-discord:
container_name: mautrix-discord
image: dock.mau.dev/mautrix/discord:v0.2.0
restart: unless-stopped
networks:
- internal
volumes:
- mautrix_discord:/data
# BACKUP
backup_db:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-db.tar.gz"
BACKUP_LATEST_SYMLINK: db.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- db:/backup:ro
- {{ server.backup.folder }}/matrix:/archive
backup_matrix:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-matrix.tar.gz"
BACKUP_LATEST_SYMLINK: matrix.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- matrix:/backup:ro
- {{ server.backup.folder }}/matrix:/archive
backup_mautrix_facebook:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-mautrix-facebook.tar.gz"
BACKUP_LATEST_SYMLINK: mautrix-facebook.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- mautrix_facebook:/backup:ro
- {{ server.backup.folder }}/matrix:/archive
backup_mautrix_instagram:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-mautrix-instagram.tar.gz"
BACKUP_LATEST_SYMLINK: mautrix-instagram.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- mautrix_instagram:/backup:ro
- {{ server.backup.folder }}/matrix:/archive
backup_mautrix_discord:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-mautrix-discord.tar.gz"
BACKUP_LATEST_SYMLINK: mautrix-discord.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- mautrix_discord:/backup:ro
- {{ server.backup.folder }}/matrix:/archive
volumes:
db:
matrix:
mautrix_facebook:
mautrix_instagram:
mautrix_discord:
networks:
internal:
proxy:
external: true

23
roles/n8n/tasks/base.yml Normal file
View file

@ -0,0 +1,23 @@
---
- name: Check n8n directory exist
ansible.builtin.file:
path: n8n
state: directory
- name: Copy template conf
ansible.builtin.template:
backup: true
src: "{{ item.src }}"
dest: "n8n/{{ item.dest }}"
loop:
- { src: 'docker-compose.yml.j2', dest: 'docker-compose.yml' }
register: n8n_copy_templates_results
- name: Update and restart container
community.docker.docker_compose:
project_src: n8n
state: present
pull: true
restarted: "{{ n8n_copy_templates_results.changed }}"
become: true

8
roles/n8n/tasks/main.yml Normal file
View file

@ -0,0 +1,8 @@
---
- ansible.builtin.import_tasks: base.yml
name: base
- ansible.builtin.import_tasks: restore.yml
name: restore
when: restore.n8n.n8n_archive and restore.n8n.db_archive

View file

@ -0,0 +1,43 @@
---
- name: Copy archives
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
loop:
- { src: '{{ restore.n8n.n8n_archive }}', dest: 'n8n_n8n.tar.gz' }
- { src: '{{ restore.n8n.db_archive }}', dest: 'n8n_db.tar.gz' }
- name: Stop nextcloud container
community.docker.docker_compose:
project_src: home_assistant
state: absent
become: true
- name: Restore backup
community.docker.docker_container:
name: volume_restore
image: volume_restore:latest
auto_remove: yes
tty: true
volumes:
- "{{ item.path }}:/backup/archive.tar.gz"
- "{{ item.volume }}:/backup/dest"
loop:
- { path: './n8n_n8n.tar.gz', volume: 'n8n_n8n' }
- { path: './n8n_db.tar.gz', volume: 'n8n_db' }
become: true
- name: Start nextcloud container
community.docker.docker_compose:
project_src: home_assistant
state: present
become: true
- name: Remove archive
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop:
- { path: 'n8n_n8n.tar.gz' }
- { path: 'n8n_db.tar.gz' }

View file

@ -1,50 +1,77 @@
version: '3'
services:
db:
image: postgres:14
restart: always
container_name: n8n_db
networks:
- internal
volumes:
- db:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=n8n
- POSTGRES_DB=n8n
- POSTGRES_USER=n8n
n8n:
image: n8nio/n8n
restart: always
container_name: n8n
networks:
- internal
- proxy
volumes:
- n8n:/home/node/.n8n
labels:
- "traefik.enable=true"
- "traefik.http.routers.n8n-secure.entrypoints=https"
- "traefik.http.routers.n8n-secure.rule=Host(`n8n.mrdev023.fr`)"
- "traefik.http.routers.n8n-secure.tls=true"
- "traefik.http.routers.n8n-secure.tls.certresolver=sslResolver"
# - "traefik.http.routers.whoami-secure.service=whoami"
# - "traefik.http.services.whoami.loadbalancer.server.port=9002"
- "traefik.docker.network=proxy"
environment:
- DB_TYPE=postgresdb
- DB_POSTGRESDB_PORT=5432
- DB_POSTGRESDB_HOST=db
- DB_POSTGRESDB_DATABASE=n8n
- DB_POSTGRESDB_USER=n8n
- DB_POSTGRESDB_PASSWORD=n8n
volumes:
n8n:
db:
networks:
internal:
proxy:
external: true
version: '3'
services:
db:
image: postgres:14
restart: always
container_name: n8n_db
networks:
- internal
volumes:
- db:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=n8n
- POSTGRES_DB=n8n
- POSTGRES_USER=n8n
n8n:
image: n8nio/n8n
restart: always
container_name: n8n
networks:
- internal
- proxy
volumes:
- n8n:/home/node/.n8n
labels:
- "traefik.enable=true"
- "traefik.http.routers.n8n-secure.entrypoints=https"
- "traefik.http.routers.n8n-secure.rule=Host(`n8n.{{ server.domain }}`)"
- "traefik.http.routers.n8n-secure.tls=true"
- "traefik.http.routers.n8n-secure.tls.certresolver=sslResolver"
# - "traefik.http.routers.whoami-secure.service=whoami"
# - "traefik.http.services.whoami.loadbalancer.server.port=9002"
- "traefik.docker.network=proxy"
environment:
- DB_TYPE=postgresdb
- DB_POSTGRESDB_PORT=5432
- DB_POSTGRESDB_HOST=db
- DB_POSTGRESDB_DATABASE=n8n
- DB_POSTGRESDB_USER=n8n
- DB_POSTGRESDB_PASSWORD=n8n
# BACKUP
backup_n8n:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-n8n.tar.gz"
BACKUP_LATEST_SYMLINK: n8n.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- n8n:/backup:ro
- {{ server.backup.folder }}/n8n:/archive
backup_db:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-db.tar.gz"
BACKUP_LATEST_SYMLINK: db.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- db:/backup:ro
- {{ server.backup.folder }}/n8n:/archive
volumes:
n8n:
db:
networks:
internal:
proxy:
external: true

View file

@ -0,0 +1,41 @@
---
- name: Check protonmail directory exist
ansible.builtin.file:
path: protonmail
state: directory
- name: Copy protonmail conf
ansible.builtin.copy:
backup: true
src: .
dest: protonmail/
register: protonmail_copy_files_results
- name: Copy template conf
ansible.builtin.template:
backup: true
src: "{{ item.src }}"
dest: "protonmail/{{ item.dest }}"
loop:
- { src: 'docker-compose.yml.j2', dest: 'docker-compose.yml' }
register: protonmail_copy_templates_results
- name: Create protonmail network
community.docker.docker_network:
name: protonmail
state: present
become: true
- name: Show message if not initialized
ansible.builtin.debug:
msg: Please run init.sh in protonmail folder and set variable protonmail.initialized to true and restart tasks
when: not protonmail.initialized
- name: Update and restart container
community.docker.docker_compose:
project_src: protonmail
state: present
pull: true
restarted: "{{ protonmail.initialized and (protonmail_copy_templates_results.changed or protonmail_copy_files_results.changed) }}"
become: true

View file

@ -0,0 +1,8 @@
---
- ansible.builtin.import_tasks: base.yml
name: base
- ansible.builtin.import_tasks: restore.yml
name: restore
when: restore.protonmail.protonmail_archive

View file

@ -0,0 +1,40 @@
---
- name: Copy archives
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
loop:
- { src: '{{ restore.protonmail.protonmail_archive }}', dest: 'protonmail_protonmail.tar.gz' }
- name: Stop nextcloud container
community.docker.docker_compose:
project_src: home_assistant
state: absent
become: true
- name: Restore backup
community.docker.docker_container:
name: volume_restore
image: volume_restore:latest
auto_remove: yes
tty: true
volumes:
- "{{ item.path }}:/backup/archive.tar.gz"
- "{{ item.volume }}:/backup/dest"
loop:
- { path: './protonmail_protonmail.tar.gz', volume: 'protonmail_protonmail' }
become: true
- name: Start nextcloud container
community.docker.docker_compose:
project_src: home_assistant
state: present
become: true
- name: Remove archive
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop:
- { path: 'protonmail_protonmail.tar.gz' }

View file

@ -0,0 +1,32 @@
version: '3'
services:
protonmail-bridge:
image: shenxn/protonmail-bridge
restart: always
container_name: protonmail-bridge
networks:
- protonmail
volumes:
- protonmail:/root
# BACKUP
backup_protonmail:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-protonmail.tar.gz"
BACKUP_LATEST_SYMLINK: protonmail.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- protonmail:/backup:ro
- {{ server.backup.folder }}/protonmail:/archive
volumes:
protonmail:
networks:
protonmail:
external: true

14
roles/ssh/tasks/base.yml Normal file
View file

@ -0,0 +1,14 @@
---
- name: Copy conf
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
group: root
mode: u=rwx,g=rx,o=rx
loop:
- { src: 'port.conf.j2', dest: '/etc/ssh/ssh_config.d/99-port.conf' }
register: iptables_templates_results
become: yes
# TODO: Reload ssh configuration and reconnect to new port

4
roles/ssh/tasks/main.yml Normal file
View file

@ -0,0 +1,4 @@
---
- ansible.builtin.import_tasks: base.yml
name: base

View file

@ -0,0 +1,3 @@
# Change default port of ssh config
Port {{ server.ssh_port }}

View file

@ -0,0 +1,37 @@
---
- name: Check traefik directory exist
ansible.builtin.file:
path: traefik
state: directory
- name: Copy traefik conf
ansible.builtin.copy:
backup: true
src: .
dest: traefik/
register: traefik_copy_files_results
- name: Copy template conf
ansible.builtin.template:
backup: true
src: "{{ item.src }}"
dest: "traefik/{{ item.dest }}"
loop:
- { src: 'docker-compose.yml.j2', dest: 'docker-compose.yml' }
- { src: 'config/traefik.yml.j2', dest: 'config/traefik.yml' }
register: traefik_copy_templates_results
- name: Create proxy network
community.docker.docker_network:
name: proxy
state: present
become: true
- name: Update and restart container
community.docker.docker_compose:
project_src: traefik
state: present
pull: true
restarted: "{{ traefik_copy_files_results.changed or traefik_copy_templates_results.changed }}"
become: true

View file

@ -0,0 +1,8 @@
---
- ansible.builtin.import_tasks: base.yml
name: base
- ansible.builtin.import_tasks: restore.yml
name: restore
when: restore.traefik.prometheus_archive and restore.traefik.grafana_archive

View file

@ -0,0 +1,43 @@
---
- name: Copy archives
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
loop:
- { src: '{{ restore.traefik.prometheus_archive }}', dest: 'traefik_prometheus.tar.gz' }
- { src: '{{ restore.traefik.grafana_archive }}', dest: 'traefik_grafana.tar.gz' }
- name: Stop nextcloud container
community.docker.docker_compose:
project_src: home_assistant
state: absent
become: true
- name: Restore backup
community.docker.docker_container:
name: volume_restore
image: volume_restore:latest
auto_remove: yes
tty: true
volumes:
- "{{ item.path }}:/backup/archive.tar.gz"
- "{{ item.volume }}:/backup/dest"
loop:
- { path: './traefik_prometheus.tar.gz', volume: 'traefik_prometheus' }
- { path: './traefik_grafana.tar.gz', volume: 'traefik_grafana' }
become: true
- name: Start nextcloud container
community.docker.docker_compose:
project_src: home_assistant
state: present
become: true
- name: Remove archive
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop:
- { path: 'traefik_prometheus.tar.gz' }
- { path: 'traefik_grafana.tar.gz' }

View file

@ -1,51 +1,53 @@
log:
level: DEBUG
api:
dashboard: true
accessLog:
filePath: "/var/log/traefik/access.log"
format: json
entryPoints:
http:
address: ":80"
http:
redirections:
entryPoint:
to: https
scheme: https
https:
address: ":443"
metrics:
address: ":8080"
metrics:
prometheus:
entryPoint: metrics
buckets:
- 0.1
- 0.3
- 1.2
- 5.0
addEntryPointsLabels: true
addServicesLabels: true
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
http:
endpoint: "http://http_provider/dynamic_conf.yaml"
certificatesResolvers:
sslResolver:
acme:
email: florian.richer.97@outlook.com
tlschallenge: {}
storage: acme.json
keyType: RSA4096
#caServer: "https://acme-staging-v02.api.letsencrypt.org/directory"
httpChallenge:
entryPoint: http
log:
level: DEBUG
api:
dashboard: true
accessLog:
filePath: "/var/log/traefik/access.log"
format: json
entryPoints:
http:
address: ":80"
http:
redirections:
entryPoint:
to: https
scheme: https
https:
address: ":443"
metrics:
address: ":8080"
metrics:
prometheus:
entryPoint: metrics
buckets:
- 0.1
- 0.3
- 1.2
- 5.0
addEntryPointsLabels: true
addServicesLabels: true
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
http:
endpoint: "http://http_provider/dynamic_conf.yaml"
certificatesResolvers:
sslResolver:
acme:
email: {{ acme.email }}
tlschallenge: {}
storage: acme.json
keyType: RSA4096
{% if acme.debug %}
caServer: "https://acme-staging-v02.api.letsencrypt.org/directory"
{% endif %}
httpChallenge:
entryPoint: http

View file

@ -26,7 +26,7 @@ services:
- "traefik.http.middlewares.traefik-auth.basicauth.users=mrdev023:$$2y$$05$$t51tXUW6zO9dndSK1JEFS.utJ3th/RYVSgDlouOZhUigjbkTX1zQC$$"
- "traefik.http.middlewares.traefik-stripprefix.stripprefix.prefixes=/traefik"
- "traefik.http.routers.traefik-secure.entrypoints=https"
- "traefik.http.routers.traefik-secure.rule=Host(`dash.mrdev023.fr`) && (PathPrefix(`/traefik`) || PathPrefix(`/api`))"
- "traefik.http.routers.traefik-secure.rule=Host(`dash.{{ server.domain }}`) && (PathPrefix(`/traefik`) || PathPrefix(`/api`))"
- "traefik.http.middlewares.tls-rep.redirectregex.permanent=true"
- "traefik.http.middlewares.tls-header.headers.SSLRedirect=true"
- "traefik.http.middlewares.tls-header.headers.forceSTSHeader=true"
@ -56,7 +56,7 @@ services:
- /etc/localtime:/etc/localtime:ro
command:
- "--web.route-prefix=/"
- "--web.external-url=https://dash.mrdev023.fr/prometheus"
- "--web.external-url=https://dash.{{ server.domain }}/prometheus"
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/usr/share/prometheus/console_libraries"
@ -69,7 +69,7 @@ services:
- "traefik.http.middlewares.prometheus-auth.basicauth.users=mrdev023:$$2y$$05$$t51tXUW6zO9dndSK1JEFS.utJ3th/RYVSgDlouOZhUigjbkTX1zQC$$"
- "traefik.http.middlewares.prometheus-stripprefix.stripprefix.prefixes=/prometheus"
- "traefik.http.routers.prometheus-secure.entrypoints=https"
- "traefik.http.routers.prometheus-secure.rule=Host(`dash.mrdev023.fr`) && PathPrefix(`/prometheus`)"
- "traefik.http.routers.prometheus-secure.rule=Host(`dash.{{ server.domain }}`) && PathPrefix(`/prometheus`)"
- "traefik.http.routers.prometheus-secure.middlewares=tls-chain,prometheus-stripprefix,prometheus-auth"
- "traefik.http.routers.prometheus-secure.tls=true"
- "traefik.http.routers.prometheus-secure.tls.certresolver=sslResolver"
@ -98,7 +98,7 @@ services:
- "traefik.http.middlewares.grafana-auth.basicauth.users=mrdev023:$$2y$$05$$t51tXUW6zO9dndSK1JEFS.utJ3th/RYVSgDlouOZhUigjbkTX1zQC$$"
- "traefik.http.middlewares.grafana-stripprefix.stripprefix.prefixes=/grafana"
- "traefik.http.routers.grafana-secure.entrypoints=https"
- "traefik.http.routers.grafana-secure.rule=Host(`dash.mrdev023.fr`) && PathPrefix(`/grafana`)"
- "traefik.http.routers.grafana-secure.rule=Host(`dash.{{ server.domain }}`) && PathPrefix(`/grafana`)"
- "traefik.http.routers.grafana-secure.middlewares=tls-chain,grafana-stripprefix,grafana-auth"
- "traefik.http.routers.grafana-secure.tls=true"
- "traefik.http.routers.grafana-secure.tls.certresolver=http"
@ -115,6 +115,33 @@ services:
volumes:
- ./config/dynamic_conf.yaml:/usr/local/apache2/htdocs/dynamic_conf.yaml
# BACKUP
backup_prometheus:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-prometheus.tar.gz"
BACKUP_LATEST_SYMLINK: prometheus.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- prometheus:/backup:ro
- {{ server.backup.folder }}/traefik:/archive
backup_grafana:
image: offen/docker-volume-backup:latest
restart: always
environment:
BACKUP_CRON_EXPRESSION: "{{ server.backup.cron_expression }}"
BACKUP_FILENAME: "{{ server.backup.filename_date_format }}-grafana.tar.gz"
BACKUP_LATEST_SYMLINK: grafana.latest.tar.gz
BACKUP_EXCLUDE_REGEXP: "\\.log$$"
BACKUP_RETENTION_DAYS: "{{ server.backup.retention_days }}"
volumes:
- grafana:/backup:ro
- {{ server.backup.folder }}/traefik:/archive
networks:
providers:
internal:

View file

@ -0,0 +1,11 @@
FROM alpine:latest
RUN apk add --update --no-cache tar
RUN mkdir -p /backup/dest
COPY ./extract.sh /usr/bin/extract
RUN chmod +x /usr/bin/extract
CMD ["restore"]

View file

@ -0,0 +1,5 @@
# HOW TO USE IT
```
docker run -v [PATH_TO_ARCHIVE]:/backup/archive.tar.gz -v [DIST_VOLUME]:/backup/dest volume_restore:latest
```

View file

@ -0,0 +1,3 @@
#!/bin/sh
rm -rf /backup/dest*
tar -xf /backup/archive.tar.gz --strip-components=1 -C /backup/dest

View file

@ -0,0 +1,18 @@
---
- name: Copy conf
ansible.builtin.copy:
backup: true
src: .
dest: volume_restore/
register: volume_restore_copy
- name: Build image
community.docker.docker_image:
name: volume_restore
tag: latest
build:
path: volume_restore
source: build
become: yes
when: volume_restore_copy.changed

View file

@ -0,0 +1,4 @@
---
- ansible.builtin.import_tasks: base.yml
name: base

View file

@ -0,0 +1,23 @@
---
- name: Check whoami directory exist
ansible.builtin.file:
path: whoami
state: directory
- name: Copy template conf
ansible.builtin.template:
backup: true
src: "{{ item.src }}"
dest: "whoami/{{ item.dest }}"
loop:
- { src: 'docker-compose.yml.j2', dest: 'docker-compose.yml' }
register: whoami_copy_templates_results
- name: Force update and restart container
community.docker.docker_compose:
project_src: whoami
state: present
pull: true
restarted: "{{ whoami_copy_templates_results.changed }}"
become: true

View file

@ -0,0 +1,4 @@
---
- ansible.builtin.import_tasks: base.yml
name: base

View file

@ -1,22 +1,22 @@
version: '3'
services:
whoami:
image: "containous/whoami"
restart: always
container_name: "whoami"
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.whoami-secure.entrypoints=https"
- "traefik.http.routers.whoami-secure.rule=Host(`whoami.mrdev023.fr`)"
- "traefik.http.routers.whoami-secure.tls=true"
- "traefik.http.routers.whoami-secure.tls.certresolver=sslResolver"
# - "traefik.http.routers.whoami-secure.service=whoami"
# - "traefik.http.services.whoami.loadbalancer.server.port=9002"
- "traefik.docker.network=proxy"
networks:
proxy:
external: true
version: '3'
services:
whoami:
image: "containous/whoami"
restart: always
container_name: "whoami"
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.whoami-secure.entrypoints=https"
- "traefik.http.routers.whoami-secure.rule=Host(`whoami.{{ server.domain }}`)"
- "traefik.http.routers.whoami-secure.tls=true"
- "traefik.http.routers.whoami-secure.tls.certresolver=sslResolver"
# - "traefik.http.routers.whoami-secure.service=whoami"
# - "traefik.http.services.whoami.loadbalancer.server.port=9002"
- "traefik.docker.network=proxy"
networks:
proxy:
external: true

4
run.sh Executable file
View file

@ -0,0 +1,4 @@
#!/bin/sh
echo "START VM"
virsh --connect qemu:///session start FedoraServer --console

3
work/base.qcow2 Normal file
View file

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5c49f4048c39f537795e51934cbf8fc751c4f535326dc528aa52563421f8ec6a
size 1340997632

Some files were not shown because too many files have changed in this diff Show more