Compare commits

..

23 Commits

Author SHA1 Message Date
Gleb Koval 15acf69833
Fix unifi VM networking
Infrastructure / Check and run Ansbile playbooks (push) Successful in 2m23s Details
2024-07-13 18:32:11 +01:00
Gleb Koval 9d2eec5c8f
Set IP of unifi
Infrastructure / Check and run Ansbile playbooks (push) Successful in 2m34s Details
2024-07-13 18:29:09 +01:00
Gleb Koval fcac9954f2
:-)
Infrastructure / Check and run Ansbile playbooks (push) Successful in 2m13s Details
2024-07-13 18:03:08 +01:00
Gleb Koval b8f1fa1ee3
Re-deploy unifi
Infrastructure / Check and run Ansbile playbooks (push) Failing after 9m56s Details
2024-07-13 17:53:03 +01:00
Gleb Koval 102bb6ae4b
pain
Infrastructure / Check and run Ansbile playbooks (push) Failing after 3m52s Details
2024-07-13 17:41:12 +01:00
Gleb Koval a8a1b0dac3
Unifi still not working
Infrastructure / Check and run Ansbile playbooks (push) Failing after 7m32s Details
2024-07-13 17:21:52 +01:00
Gleb Koval 5163f3906f
Wait for longer for unifi VM initial
Infrastructure / Check and run Ansbile playbooks (push) Failing after 9m46s Details
Because this stupid network doesn't work at all
2024-07-13 17:07:22 +01:00
Gleb Koval 504939ce0d
unfi VM
Infrastructure / Check and run Ansbile playbooks (push) Failing after 7m25s Details
2024-07-13 16:13:39 +01:00
Gleb Koval 73ccc218e8
infisical-python dependency
Infrastructure / Check and run Ansbile playbooks (push) Successful in 9m45s Details
2024-06-22 00:47:33 +01:00
Gleb Koval 41967af509
Update infisical ansible
Infrastructure / Check and run Ansbile playbooks (push) Failing after 2m11s Details
2024-06-22 00:44:08 +01:00
Gleb Koval eedd640d27
Upgrade immich to 1.106
Infrastructure / Check and run Ansbile playbooks (push) Failing after 3m22s Details
2024-06-22 00:19:42 +01:00
Gleb Koval 1a35bc2de5
Backups VM: Fix labels
Infrastructure / Check and run Ansbile playbooks (push) Successful in 2m22s Details
2024-06-08 01:28:56 +01:00
Gleb Koval 30413ef525
Backups VM: Remove bad handler
Infrastructure / Check and run Ansbile playbooks (push) Successful in 2m52s Details
2024-06-08 01:25:26 +01:00
Gleb Koval a35d8ae32c
Backups VM
Infrastructure / Check and run Ansbile playbooks (push) Failing after 2m12s Details
2024-06-08 01:22:08 +01:00
Gleb Koval c36095851b
Fix #53: Trust all proxies
Infrastructure / Check and run Ansbile playbooks (push) Successful in 2m32s Details
2024-03-30 02:03:39 +00:00
Gleb Koval 6b227ab45a
Use older infisical library
Infrastructure / Check and run Ansbile playbooks (push) Successful in 2m48s Details
2024-03-30 01:55:13 +00:00
Gleb Koval 899f8614a0 Fix #53: Use newer infisical-python library (#59)
Infrastructure / Check and run Ansbile playbooks (push) Failing after 2m6s Details
Reviewed-on: #59
2024-03-30 01:50:12 +00:00
Gleb Koval b532c40bd1 Fix #53: New docker compose down in Ansible fix (#58)
Infrastructure / Check and run Ansbile playbooks (push) Failing after 1m59s Details
Only run docker compose down when project files exist.

Reviewed-on: #58
2024-03-30 01:36:20 +00:00
Gleb Koval 94a836f135 Fix #53: Typo (#57)
Infrastructure / Check and run Ansbile playbooks (push) Has been cancelled Details
Reviewed-on: #57
2024-03-30 01:19:27 +00:00
Gleb Koval 3b22a7c7d2 Fix #53: Fix Ansible handlers (again) (#56)
Infrastructure / Check and run Ansbile playbooks (push) Failing after 2m12s Details
Reviewed-on: #56
2024-03-30 01:14:34 +00:00
Gleb Koval 621e634798 Fix #53: Trigger all playbooks (#55)
Infrastructure / Check and run Ansbile playbooks (push) Failing after 2m9s Details
Reviewed-on: #55
2024-03-30 00:29:59 +00:00
Gleb Koval fe3be248ad Fix #53: Incorrect usage of Ansible handlers (#54)
Infrastructure / Check and run Ansbile playbooks (push) Successful in 2m15s Details
Reviewed-on: #54
2024-03-30 00:23:13 +00:00
Gleb Koval 22b5241751 Finance VM (#53)
Infrastructure / Check and run Ansbile playbooks (push) Failing after 2m3s Details
Finance manager service using Firefly III.

Reviewed-on: #53
2024-03-30 00:17:11 +00:00
37 changed files with 1435 additions and 209 deletions

View File

@ -1,4 +0,0 @@
strict: true
use_default_rules: true
skip_list:
- args[module]

View File

@ -29,14 +29,13 @@ jobs:
apt update apt update
apt install -y python3-pip apt install -y python3-pip
pip3 install -r requirements.txt pip3 install -r requirements.txt
ansible-galaxy collection install -r requirements.yml --force ansible-galaxy collection install community.general infisical.vault
- name: Run ansible-lint
uses: ansible/ansible-lint@v6
- name: Check playbooks - name: Check playbooks
run: | run: |
ansible-playbook --inventory ./inventory --syntax-check infra/**/*playbook.yaml for file in $(find . -wholename "*/infra/*playbook.yaml" -type f); do
ansible-playbook --inventory ./inventory --syntax-check "$file"
done
- name: Get changed playbooks - name: Get changed playbooks
id: files id: files
@ -74,8 +73,7 @@ jobs:
PROXMOX_TOKEN_SECRET: ${{ secrets.PROXMOX_TOKEN_SECRET }} PROXMOX_TOKEN_SECRET: ${{ secrets.PROXMOX_TOKEN_SECRET }}
SSH_PUBLIC: ${{ secrets.SSH_PUBLIC }} SSH_PUBLIC: ${{ secrets.SSH_PUBLIC }}
SMTP_PASSWORD: ${{ secrets.SMTP_PASSWORD }} SMTP_PASSWORD: ${{ secrets.SMTP_PASSWORD }}
INFISICAL_ENCRYPTION_KEY: ${{ secrets.INFISICAL_ENCRYPTION_KEY }} UNIVERSAL_AUTH_MACHINE_IDENTITY_CLIENT_ID: ${{ secrets.INFISICAL_CLIENT_ID }}
INFISICAL_AUTH_SECRET: ${{ secrets.INFISICAL_AUTH_SECRET }} UNIVERSAL_AUTH_MACHINE_IDENTITY_CLIENT_SECRET: ${{ secrets.INFISICAL_CLIENT_SECRET }}
INFISICAL_TOKEN: ${{ secrets.INFISICAL_TOKEN }}
INFISICAL_URL: https://secrets.koval.net INFISICAL_URL: https://secrets.koval.net
run: ansible-playbook --inventory ./inventory ${{ steps.playbooks.outputs.to_run }} -vv run: ansible-playbook --inventory ./inventory ${{ steps.playbooks.outputs.to_run }} -vv

View File

@ -0,0 +1,86 @@
- name: Provision backups Proxmox VM
hosts: backups
connection: ansible.builtin.local
gather_facts: false
vars:
api_user: "{{ lookup('ansible.builtin.env', 'PROXMOX_USER') }}"
api_host: "{{ lookup('ansible.builtin.env', 'PROXMOX_HOST' ) }}"
api_token_id: "{{ lookup('ansible.builtin.env', 'PROXMOX_TOKEN_ID') }}"
api_token_secret: "{{ lookup('ansible.builtin.env', 'PROXMOX_TOKEN_SECRET') }}"
ssh_public: "{{ lookup('ansible.builtin.env', 'SSH_PUBLIC') }}"
vmname: "{{ inventory_hostname | regex_replace('^([^\\.]+)\\..+$', '\\1') }}"
node: pve
module_defaults:
community.general.proxmox_kvm:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
node: "{{ node }}"
community.general.proxmox_nic:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
community.general.proxmox_disk:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
tasks:
# Initial setup
- name: Create VM
community.general.proxmox_kvm:
clone: "{{ node }}-base"
storage: nvme
- name: Wait for status
community.general.proxmox_kvm:
state: current
register: vm
retries: 30
delay: 10
until: vm.status is defined
# Networking and initial config
- name: Add PUB NIC
community.general.proxmox_nic:
interface: net0
firewall: false
bridge: PUB
- name: Add SRV NIC
community.general.proxmox_nic:
interface: net1
firewall: false
bridge: SRV
# VM Configuration
- name: Create CD disk
community.general.proxmox_disk:
disk: ide2
media: cdrom
iso_image: none
- name: Create root disk
community.general.proxmox_disk:
disk: scsi0
backup: true
storage: nvme
size: 32
- name: Create data disk
community.general.proxmox_disk:
disk: scsi1
backup: false
storage: zfs
size: 2048
- name: Update VM
community.general.proxmox_kvm:
update: true
boot: order=scsi0;ide2
agent: enabled=1
tags:
- provisioned
onboot: true
cores: 4
memory: 4096

View File

@ -36,7 +36,7 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
clone: "{{ node }}-debian-12" clone: "{{ node }}-debian-12"
storage: nvme storage: nvme
notify: Initial boot register: create
- name: Wait for status - name: Wait for status
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: current state: current
@ -65,8 +65,21 @@
ipconfig0: ip=dhcp,ip6=auto ipconfig0: ip=dhcp,ip6=auto
ipconfig1: ip=dhcp ipconfig1: ip=dhcp
- name: Force all notified handlers to run # Initial boot
ansible.builtin.meta: flush_handlers # For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
when: create.changed is true
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90
# VM Configuration # VM Configuration
- name: Resize root disk - name: Resize root disk
@ -95,18 +108,3 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: restarted state: restarted
timeout: 60 timeout: 60
handlers:
# Initial boot
# For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90

View File

@ -1,4 +1,4 @@
- name: Install docker - name: Install software
hosts: cloud hosts: cloud
gather_facts: false gather_facts: false
tasks: tasks:

View File

@ -1,13 +0,0 @@
- name: Cleanup old ~/nextcloud directory
hosts: cloud
gather_facts: false
vars:
app: nextcloud
tasks:
- name: Wait for connection
ansible.builtin.wait_for_connection:
timeout: 300
- name: Delete nextcloud directory
ansible.builtin.file:
path: "$HOME/{{ app }}"
state: absent

View File

@ -2,29 +2,21 @@
hosts: cloud hosts: cloud
gather_facts: false gather_facts: false
vars: vars:
container: nextcloud-aio-mastercontainer app: nextcloud
tasks: tasks:
- name: Wait for connection - name: Wait for connection
ansible.builtin.wait_for_connection: ansible.builtin.wait_for_connection:
timeout: 300 timeout: 300
- name: Deploy master container - name: Get user
community.docker.docker_container: ansible.builtin.user:
image: nextcloud/all-in-one:latest name: debian
recreate: true register: user
state: started - name: Copy project
restart_policy: unless-stopped ansible.builtin.copy:
init: true src: "./{{ app }}"
name: "{{ container }}" dest: "{{ user.home }}"
published_ports: mode: "0744"
- 8080:8080 - name: Re-deploy
env: ansible.builtin.command: bash all-in-one.sh
NEXTCLOUD_UPLOAD_LIMIT: 16G args:
NEXTCLOUD_MAX_TIME: "7200" chdir: "{{ user.home }}/{{ app }}"
NEXTCLOUD_ADDITIONAL_APKS: imagemagick ffmpeg
APACHE_PORT: "11000"
APACHE_IP_BINDING: "0.0.0.0"
TZ: Europe/London
AIO_DISABLE_BACKUP_SECTION: "true"
volumes:
- nextcloud_aio_mastercontainer:/mnt/docker-aio-config
- /var/run/docker.sock:/var/run/docker.sock:ro

View File

@ -0,0 +1,71 @@
- name: Decommission Immich
hosts: cloud
gather_facts: false
vars:
app: immich
api_user: "{{ lookup('ansible.builtin.env', 'PROXMOX_USER') }}"
api_host: "{{ lookup('ansible.builtin.env', 'PROXMOX_HOST' ) }}"
api_token_id: "{{ lookup('ansible.builtin.env', 'PROXMOX_TOKEN_ID') }}"
api_token_secret: "{{ lookup('ansible.builtin.env', 'PROXMOX_TOKEN_SECRET') }}"
vmname: "{{ inventory_hostname | regex_replace('^([^\\.]+)\\..+$', '\\1') }}"
node: pve
module_defaults:
community.general.proxmox_kvm:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
node: "{{ node }}"
community.general.proxmox_disk:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
tasks:
- name: Wait for connection
ansible.builtin.wait_for_connection:
timeout: 300
- name: Get user
ansible.builtin.user:
name: debian
register: user
- name: Docker compose down
ansible.builtin.command: docker compose down
args:
chdir: "{{ user.home }}/{{ app }}"
ignore_errors: true
- name: Remove docker volumes
ansible.builtin.command: docker compose down --volumes
args:
chdir: "{{ user.home }}/{{ app }}"
ignore_errors: true
- name: Remove config directory
ansible.builtin.file:
path: "{{ user.home }}/{{ app }}"
state: absent
- name: Destroy media disk
community.general.proxmox_disk:
disk: scsi2
state: absent
delegate_to: localhost
- name: Remove media mount
ansible.posix.mount:
src: /dev/disk/by-path/pci-0000:00:05.0-scsi-0:0:0:2-part1
path: /mnt/media
fstype: ext4
opts: rw,errors=remount-ro,x-systemd.growfs
state: absent
become: true
- name: Remove media directory
ansible.builtin.file:
path: /mnt/media
state: absent
become: true
- name: Restart VM
community.general.proxmox_kvm:
state: restarted
timeout: 60
delegate_to: localhost

View File

@ -0,0 +1,21 @@
#!/bin/bash
docker stop nextcloud-aio-mastercontainer || true
docker rm nextcloud-aio-mastercontainer || true
docker run \
--init \
--sig-proxy=false \
--name nextcloud-aio-mastercontainer \
--restart unless-stopped \
--publish 8080:8080 \
--env NEXTCLOUD_UPLOAD_LIMIT=16G \
--env NEXTCLOUD_MAX_TIME=7200 \
--env NEXTCLOUD_ADDITIONAL_APKS="imagemagick ffmpeg" \
--env APACHE_PORT=11000 \
--env APACHE_IP_BINDING=0.0.0.0 \
--env TZ=Europe/London \
--env AIO_DISABLE_BACKUP_SECTION=true \
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
-d nextcloud/all-in-one:latest

View File

@ -0,0 +1,113 @@
- name: Provision finance Proxmox VM
hosts: finance
connection: ansible.builtin.local
gather_facts: false
vars:
api_user: "{{ lookup('ansible.builtin.env', 'PROXMOX_USER') }}"
api_host: "{{ lookup('ansible.builtin.env', 'PROXMOX_HOST' ) }}"
api_token_id: "{{ lookup('ansible.builtin.env', 'PROXMOX_TOKEN_ID') }}"
api_token_secret: "{{ lookup('ansible.builtin.env', 'PROXMOX_TOKEN_SECRET') }}"
ssh_public: "{{ lookup('ansible.builtin.env', 'SSH_PUBLIC') }}"
vmname: "{{ inventory_hostname | regex_replace('^([^\\.]+)\\..+$', '\\1') }}"
node: pve2
module_defaults:
community.general.proxmox_kvm:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
node: "{{ node }}"
community.general.proxmox_nic:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
community.general.proxmox_disk:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
tasks:
# Initial setup
- name: Create VM
community.general.proxmox_kvm:
clone: "{{ node }}-debian-12"
storage: nvme
notify:
- Start VM
- Wait
- name: Wait for status
community.general.proxmox_kvm:
state: current
register: vm
retries: 30
delay: 10
until: vm.status is defined
# Networking and initial config
- name: Add PUB NIC
community.general.proxmox_nic:
interface: net0
firewall: false
bridge: PUB
- name: Add SRV NIC
community.general.proxmox_nic:
interface: net1
firewall: false
bridge: SRV
- name: Configure cloud-init
community.general.proxmox_kvm:
update: true
ciuser: debian
sshkeys: "{{ ssh_public }}"
ipconfig:
ipconfig0: ip=dhcp,ip6=auto
ipconfig1: ip=dhcp
- name: Force all notified handlers to run
ansible.builtin.meta: flush_handlers
# VM Configuration
- name: Resize root disk
community.general.proxmox_disk:
disk: scsi0
size: 16G
state: resized
- name: Create data disk
community.general.proxmox_disk:
disk: scsi1
backup: true
storage: nvme
size: 64
- name: Update VM
community.general.proxmox_kvm:
update: true
agent: enabled=1
tags:
- debian-12
- managed
onboot: true
cores: 4
memory: 8192
- name: Retart VM
community.general.proxmox_kvm:
state: restarted
timeout: 60
handlers:
# Initial boot
# For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Start VM
community.general.proxmox_kvm:
state: started
register: start
- name: Wait # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90

View File

@ -0,0 +1,43 @@
- name: Initialise VM
hosts: finance
gather_facts: false
tasks:
- name: Wait for connection
ansible.builtin.wait_for_connection:
timeout: 300
- name: Install system packages
ansible.builtin.apt:
update_cache: true
pkg:
- qemu-guest-agent
- parted
become: true
- name: Enable qemu-guest-agent
ansible.builtin.systemd:
name: qemu-guest-agent
state: started
enabled: true
become: true
- name: Create data partition
community.general.parted:
device: /dev/disk/by-path/pci-0000:00:05.0-scsi-0:0:0:1
label: gpt
name: data
number: 1
state: present
become: true
- name: Create data filesystem
community.general.filesystem:
dev: /dev/disk/by-path/pci-0000:00:05.0-scsi-0:0:0:1-part1
fstype: ext4
become: true
- name: Mount data partition
ansible.posix.mount:
src: /dev/disk/by-path/pci-0000:00:05.0-scsi-0:0:0:1-part1
path: /var/lib/docker
fstype: ext4
opts: rw,errors=remount-ro,x-systemd.growfs
state: mounted
become: true

View File

@ -0,0 +1,48 @@
- name: Install docker
hosts: finance
gather_facts: false
tasks:
- name: Wait for connection
ansible.builtin.wait_for_connection:
timeout: 300
- name: Install dependencies
ansible.builtin.apt:
update_cache: true
pkg:
- curl
- python3-apt
- gpg
become: true
- name: Add docker key
ansible.builtin.apt_key:
url: https://download.docker.com/linux/debian/gpg
keyring: /etc/apt/keyrings/docker.gpg
become: true
- name: Add docker repo
ansible.builtin.apt_repository:
repo: deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian bookworm stable
become: true
- name: Install docker
ansible.builtin.apt:
update_cache: true
pkg:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
become: true
- name: Add user to docker group
ansible.builtin.user:
user: debian
groups:
- docker
append: true
become: true
- name: Enable docker
ansible.builtin.systemd:
name: docker
state: started
enabled: true
become: true

View File

@ -0,0 +1,64 @@
- name: Deploy app
hosts: finance
gather_facts: false
vars:
app: firefly-iii
tasks:
- name: Wait for connection
ansible.builtin.wait_for_connection:
timeout: 300
- name: Check if project exists
ansible.builtin.stat:
path: "$HOME/{{ app }}"
register: project
- name: Docker compose down
when: project.stat.exists
community.docker.docker_compose_v2:
project_src: "$HOME/{{ app }}"
state: absent
- name: Copy project
ansible.builtin.copy:
src: "./{{ app }}"
dest: "$HOME"
mode: "0744"
- name: Replace APP_KEY secret
ansible.builtin.replace:
path: "$HOME/{{ app }}/.env"
regexp: "APP_KEY_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/finance', secret_name='APP_KEY')['value'] }}"
- name: Replace DB secret
ansible.builtin.replace:
path: "$HOME/{{ app }}/.env"
regexp: "DB_PASSWORD_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/finance', secret_name='DB_PASSWORD')['value'] }}"
- name: Replace cron token secret
ansible.builtin.replace:
path: "$HOME/{{ app }}/.env"
regexp: "STATIC_CRON_TOKEN_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/finance', secret_name='STATIC_CRON_TOKEN')['value'] }}"
- name: Replace SMTP Password secret (app)
ansible.builtin.replace:
path: "$HOME/{{ app }}/.env"
regexp: "SMTP_PASSWORD_VALUE"
replace: "{{ lookup('ansible.builtin.env', 'SMTP_PASSWORD') }}"
- name: Replace Nordigen ID secret
ansible.builtin.replace:
path: "$HOME/{{ app }}/.importer.env"
regexp: "NORDIGEN_ID_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/finance', secret_name='NORDIGEN_ID')['value'] }}"
- name: Replace Nordigen Key secret
ansible.builtin.replace:
path: "$HOME/{{ app }}/.importer.env"
regexp: "NORDIGEN_KEY_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/finance', secret_name='NORDIGEN_KEY')['value'] }}"
- name: Replace SMTP Password secret (importer)
ansible.builtin.replace:
path: "$HOME/{{ app }}/.importer.env"
regexp: "SMTP_PASSWORD_VALUE"
replace: "{{ lookup('ansible.builtin.env', 'SMTP_PASSWORD') }}"
- name: Docker compose up -d
community.docker.docker_compose_v2:
project_src: "$HOME/{{ app }}"

View File

@ -0,0 +1,349 @@
# You can leave this on "local". If you change it to production most console commands will ask for extra confirmation.
# Never set it to "testing".
APP_ENV=production
# Set to true if you want to see debug information in error screens.
APP_DEBUG=false
# This should be your email address.
# If you use Docker or similar, you can set this variable from a file by using SITE_OWNER_FILE
# The variable is used in some errors shown to users who aren't admin.
SITE_OWNER=gleb@koval.net
# The encryption key for your sessions. Keep this very secure.
# Change it to a string of exactly 32 chars or use something like `php artisan key:generate` to generate it.
# If you use Docker or similar, you can set this variable from a file by using APP_KEY_FILE
#
# Avoid the "#" character in your APP_KEY, it may break things.
#
APP_KEY=APP_KEY_VALUE
# Firefly III will launch using this language (for new users and unauthenticated visitors)
# For a list of available languages: https://github.com/firefly-iii/firefly-iii/tree/main/resources/lang
#
# If text is still in English, remember that not everything may have been translated.
DEFAULT_LANGUAGE=en_GB
# The locale defines how numbers are formatted.
# by default this value is the same as whatever the language is.
DEFAULT_LOCALE=equal
# Change this value to your preferred time zone.
# Example: Europe/Amsterdam
# For a list of supported time zones, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TZ=Europe/London
# TRUSTED_PROXIES is a useful variable when using Docker and/or a reverse proxy.
# Set it to ** and reverse proxies work just fine.
TRUSTED_PROXIES=*
# The log channel defines where your log entries go to.
# Several other options exist. You can use 'single' for one big fat error log (not recommended).
# Also available are 'syslog', 'errorlog' and 'stdout' which will log to the system itself.
# A rotating log option is 'daily', creates 5 files that (surprise) rotate.
# A cool option is 'papertrail' for cloud logging
# Default setting 'stack' will log to 'daily' and to 'stdout' at the same time.
LOG_CHANNEL=stack
# Log level. You can set this from least severe to most severe:
# debug, info, notice, warning, error, critical, alert, emergency
# If you set it to debug your logs will grow large, and fast. If you set it to emergency probably
# nothing will get logged, ever.
APP_LOG_LEVEL=notice
# Audit log level.
# The audit log is used to log notable Firefly III events on a separate channel.
# These log entries may contain sensitive financial information.
# The audit log is disabled by default.
#
# To enable it, set AUDIT_LOG_LEVEL to "info"
# To disable it, set AUDIT_LOG_LEVEL to "emergency"
AUDIT_LOG_LEVEL=emergency
#
# If you want, you can redirect the audit logs to another channel.
# Set 'audit_stdout', 'audit_syslog', 'audit_errorlog' to log to the system itself.
# Use audit_daily to log to a rotating file.
# Use audit_papertrail to log to papertrail.
#
# If you do this, the audit logs may be mixed with normal logs because the settings for these channels
# are often the same as the settings for the normal logs.
AUDIT_LOG_CHANNEL=
#
# Used when logging to papertrail:
# Also used when audit logs log to papertrail:
#
PAPERTRAIL_HOST=
PAPERTRAIL_PORT=
# Database credentials. Make sure the database exists. I recommend a dedicated user for Firefly III
# For other database types, please see the FAQ: https://docs.firefly-iii.org/references/faq/install/#i-want-to-use-sqlite
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
# Use "pgsql" for PostgreSQL
# Use "mysql" for MySQL and MariaDB.
# Use "sqlite" for SQLite.
DB_CONNECTION=mysql
DB_HOST=db
DB_PORT=3306
DB_DATABASE=firefly
DB_USERNAME=firefly
DB_PASSWORD=DB_PASSWORD_VALUE
# leave empty or omit when not using a socket connection
DB_SOCKET=
# MySQL supports SSL. You can configure it here.
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MYSQL_USE_SSL=false
MYSQL_SSL_VERIFY_SERVER_CERT=true
# You need to set at least of these options
MYSQL_SSL_CAPATH=/etc/ssl/certs/
MYSQL_SSL_CA=
MYSQL_SSL_CERT=
MYSQL_SSL_KEY=
MYSQL_SSL_CIPHER=
# PostgreSQL supports SSL. You can configure it here.
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
PGSQL_SSL_MODE=prefer
PGSQL_SSL_ROOT_CERT=null
PGSQL_SSL_CERT=null
PGSQL_SSL_KEY=null
PGSQL_SSL_CRL_FILE=null
# more PostgreSQL settings
PGSQL_SCHEMA=public
# If you're looking for performance improvements, you could install memcached or redis
CACHE_DRIVER=file
SESSION_DRIVER=file
# If you set either of the options above to 'redis', you might want to update these settings too
# If you use Docker or similar, you can set REDIS_HOST_FILE, REDIS_PASSWORD_FILE or
# REDIS_PORT_FILE to set the value from a file instead of from an environment variable
# can be tcp or unix. http is not supported
REDIS_SCHEME=tcp
# use only when using 'unix' for REDIS_SCHEME. Leave empty otherwise.
REDIS_PATH=
# use only when using 'tcp' or 'http' for REDIS_SCHEME. Leave empty otherwise.
REDIS_HOST=127.0.0.1
REDIS_PORT=6379
# Use only with Redis 6+ with proper ACL set. Leave empty otherwise.
REDIS_USERNAME=
REDIS_PASSWORD=
# always use quotes and make sure redis db "0" and "1" exists. Otherwise change accordingly.
REDIS_DB="0"
REDIS_CACHE_DB="1"
# Cookie settings. Should not be necessary to change these.
# If you use Docker or similar, you can set COOKIE_DOMAIN_FILE to set
# the value from a file instead of from an environment variable
# Setting samesite to "strict" may give you trouble logging in.
COOKIE_PATH="/"
COOKIE_DOMAIN=
COOKIE_SECURE=false
COOKIE_SAMESITE=lax
# If you want Firefly III to email you, update these settings
# For instructions, see: https://docs.firefly-iii.org/how-to/firefly-iii/advanced/notifications/#email
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MAIL_MAILER=smtp
MAIL_HOST=mx.koval.net
MAIL_PORT=587
MAIL_FROM=no-reply@koval.net
MAIL_USERNAME=no-reply@koval.net
MAIL_PASSWORD=SMTP_PASSWORD_VALUE
MAIL_ENCRYPTION=tls
MAIL_SENDMAIL_COMMAND=
# Other mail drivers:
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MAILGUN_DOMAIN=
MAILGUN_SECRET=
# If you are on EU region in mailgun, use api.eu.mailgun.net, otherwise use api.mailgun.net
# If you use Docker or similar, you can set this variable from a file by appending it with _FILE
MAILGUN_ENDPOINT=api.mailgun.net
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MANDRILL_SECRET=
SPARKPOST_SECRET=
# Firefly III can send you the following messages.
SEND_ERROR_MESSAGE=true
# These messages contain (sensitive) transaction information:
SEND_REPORT_JOURNALS=true
# Set this value to true if you want to set the location of certain things, like transactions.
# Since this involves an external service, it's optional and disabled by default.
ENABLE_EXTERNAL_MAP=false
#
# Enable or disable exchange rate conversion. This function isn't used yet by Firefly III
#
ENABLE_EXCHANGE_RATES=false
# Set this value to true if you want Firefly III to download currency exchange rates
# from the internet. These rates are hosted by the creator of Firefly III inside
# an Azure Storage Container.
# Not all currencies may be available. Rates may be wrong.
ENABLE_EXTERNAL_RATES=true
# The map will default to this location:
MAP_DEFAULT_LAT=51
MAP_DEFAULT_LONG=0
MAP_DEFAULT_ZOOM=6
#
# Some objects have room for an URL, like transactions and webhooks.
# By default, the following protocols are allowed:
# http, https, ftp, ftps, mailto
#
# To change this, set your preferred comma separated set below.
# Be sure to include http, https and other default ones if you need to.
#
VALID_URL_PROTOCOLS=
#
# Firefly III authentication settings
#
#
# Firefly III supports a few authentication methods:
# - 'web' (default, uses built in DB)
# - 'remote_user_guard' for Authelia etc
# Read more about these settings in the documentation.
# https://docs.firefly-iii.org/how-to/firefly-iii/advanced/authentication/
#
# LDAP is no longer supported :(
#
AUTHENTICATION_GUARD=web
#
# Remote user guard settings
#
AUTHENTICATION_GUARD_HEADER=REMOTE_USER
AUTHENTICATION_GUARD_EMAIL=
#
# Firefly III generates a basic keypair for your OAuth tokens.
# If you want, you can overrule the key with your own (secure) value.
# It's also possible to set PASSPORT_PUBLIC_KEY_FILE or PASSPORT_PRIVATE_KEY_FILE
# if you're using Docker secrets or similar solutions for secret management
#
PASSPORT_PRIVATE_KEY=
PASSPORT_PUBLIC_KEY=
#
# Extra authentication settings
#
CUSTOM_LOGOUT_URL=
# You can disable the X-Frame-Options header if it interferes with tools like
# Organizr. This is at your own risk. Applications running in frames run the risk
# of leaking information to their parent frame.
DISABLE_FRAME_HEADER=false
# You can disable the Content Security Policy header when you're using an ancient browser
# or any version of Microsoft Edge / Internet Explorer (which amounts to the same thing really)
# This leaves you with the risk of not being able to stop XSS bugs should they ever surface.
# This is at your own risk.
DISABLE_CSP_HEADER=false
# If you wish to track your own behavior over Firefly III, set valid analytics tracker information here.
# Nobody uses this except for me on the demo site. But hey, feel free to use this if you want to.
# Do not prepend the TRACKER_URL with http:// or https://
# The only tracker supported is Matomo.
# You can set the following variables from a file by appending them with _FILE:
TRACKER_SITE_ID=
TRACKER_URL=
#
# Firefly III supports webhooks. These are security sensitive and must be enabled manually first.
#
ALLOW_WEBHOOKS=false
#
# The static cron job token can be useful when you use Docker and wish to manage cron jobs.
# 1. Set this token to any 32-character value (this is important!).
# 2. Use this token in the cron URL instead of a user's command line token that you can find in /profile
#
# For more info: https://docs.firefly-iii.org/how-to/firefly-iii/advanced/cron/
#
# You can set this variable from a file by appending it with _FILE
#
STATIC_CRON_TOKEN=STATIC_CRON_TOKEN_VALUE
# You can fine tune the start-up of a Docker container by editing these environment variables.
# Use this at your own risk. Disabling certain checks and features may result in lots of inconsistent data.
# However if you know what you're doing you can significantly speed up container start times.
# Set each value to true to enable, or false to disable.
# Set this to true to build all locales supported by Firefly III.
# This may take quite some time (several minutes) and is generally not recommended.
# If you wish to change or alter the list of locales, start your Docker container with
# `docker run -v locale.gen:/etc/locale.gen -e DKR_BUILD_LOCALE=true`
# and make sure your preferred locales are in your own locale.gen.
DKR_BUILD_LOCALE=false
# Check if the SQLite database exists. Can be skipped if you're not using SQLite.
# Won't significantly speed up things.
DKR_CHECK_SQLITE=true
# Run database creation and migration commands. Disable this only if you're 100% sure the DB exists
# and is up to date.
DKR_RUN_MIGRATION=true
# Run database upgrade commands. Disable this only when you're 100% sure your DB is up-to-date
# with the latest fixes (outside of migrations!)
DKR_RUN_UPGRADE=true
# Verify database integrity. Includes all data checks and verifications.
# Disabling this makes Firefly III assume your DB is intact.
DKR_RUN_VERIFY=true
# Run database reporting commands. When disabled, Firefly III won't go over your data to report current state.
# Disabling this should have no impact on data integrity or safety but it won't warn you of possible issues.
DKR_RUN_REPORT=true
# Generate OAuth2 keys.
# When disabled, Firefly III won't attempt to generate OAuth2 Passport keys. This won't be an issue, IFF (if and only if)
# you had previously generated keys already and they're stored in your database for restoration.
DKR_RUN_PASSPORT_INSTALL=true
# Leave the following configuration vars as is.
# Unless you like to tinker and know what you're doing.
APP_NAME=FireflyIII
BROADCAST_DRIVER=log
QUEUE_DRIVER=sync
CACHE_PREFIX=firefly
PUSHER_KEY=
IPINFO_TOKEN=
PUSHER_SECRET=
PUSHER_ID=
DEMO_USERNAME=
DEMO_PASSWORD=
#
# The v2 layout is very experimental. If it breaks you get to keep both parts.
# Be wary of data loss.
#
FIREFLY_III_LAYOUT=v1
#
# Please make sure this URL matches the external URL of your Firefly III installation.
# It is used to validate specific requests and to generate URLs in emails.
#
APP_URL=https://finance.koval.net
# MySQL
MYSQL_RANDOM_ROOT_PASSWORD=yes
MYSQL_USER=$DB_USERNAME
MYSQL_PASSWORD=$DB_PASSWORD
MYSQL_DATABASE=$DB_DATABASE

1
infra/finance/firefly-iii/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
!*.env

View File

@ -0,0 +1,255 @@
# Firefly Data Importer (FIDI) configuration file
# Where is Firefly III?
#
# 1) Make sure you ADD http:// or https://
# 2) Make sure you REMOVE any trailing slash from the end of the URL.
# 3) In case of Docker, refer to the internal IP of your Firefly III installation.
#
# Setting this value is not mandatory. But it is very useful.
#
# This variable can be set from a file if you append it with _FILE
#
FIREFLY_III_URL=http://app:8080
#
# Imagine Firefly III can be reached at "http://172.16.0.2:8082" (internal Docker network or something).
# But you have a fancy URL: "https://personal-finances.bill.microsoft.com/"
#
# In those cases, you can overrule the URL so when the data importer links back to Firefly III, it uses the correct URL.
#
# 1) Make sure you ADD http:// or https://
# 2) Make sure you REMOVE any trailing slash from the end of the URL.
#
# IF YOU SET THIS VALUE, YOU MUST ALSO SET THE FIREFLY_III_URL
#
# This variable can be set from a file if you append it with _FILE
#
VANITY_URL=https://finance.koval.net
#
# Set your Firefly III Personal Access Token (OAuth)
# You can create a Personal Access Token on the /profile page:
# go to the OAuth tab, then Personal Access Token and "Create token".
#
# - Do not use the "command line token". That's the WRONG one.
# - Do not use "APP_KEY" value from your Firefly III installation. That's the WRONG one.
#
# Setting this value is not mandatory. Instructions will follow if you omit this field.
#
# This variable can be set from a file if you append it with _FILE
#
FIREFLY_III_ACCESS_TOKEN=
#
# You can also use a public client ID. This is available in Firefly III 5.4.0-alpha.3 and higher.
# This is a number (1, 2, 3). If you use the client ID, you can leave the access token empty and vice versa.
#
# This value is not mandatory to set. Instructions will follow if you omit this field.
#
# This variable can be set from a file if you append it with _FILE
#
FIREFLY_III_CLIENT_ID=
#
# Nordigen information.
# The key and ID can be set from a file if you append it with _FILE
#
NORDIGEN_ID=NORDIGEN_ID_VALUE
NORDIGEN_KEY=NORDIGEN_KEY_VALUE
NORDIGEN_SANDBOX=false
#
# Spectre information
#
# The ID and secret can be set from a file if you append it with _FILE
SPECTRE_APP_ID=
SPECTRE_SECRET=
#
# Use cache. No need to do this.
#
USE_CACHE=false
#
# If set to true, the data import will not complain about running into duplicates.
# This will give you cleaner import mails if you run regular imports.
#
# This means that the data importer will not import duplicates, but it will not complain about them either.
#
# This setting has no influence on the settings in your configuration(.json).
#
# Of course, if something goes wrong *because* the transaction is a duplicate you will
# NEVER know unless you start digging in your log files. So be careful with this.
#
IGNORE_DUPLICATE_ERRORS=false
#
# Auto import settings. Due to security constraints, you MUST enable each feature individually.
# You must also set a secret. The secret is used for the web routes.
#
# The auto-import secret must be a string of at least 16 characters.
# Visit this page for inspiration: https://www.random.org/passwords/?num=1&len=16&format=html&rnd=new
#
# Submit it using ?secret=X
#
# This variable can be set from a file if you append it with _FILE
#
AUTO_IMPORT_SECRET=
#
# Is the /autoimport even endpoint enabled?
# By default it's disabled, and the secret alone will not enable it.
#
CAN_POST_AUTOIMPORT=false
#
# Is the /autoupload endpoint enabled?
# By default it's disabled, and the secret alone will not enable it.
#
CAN_POST_FILES=false
#
# Import directory white list. You need to set this before the auto importer will accept a directory to import from.
#
# This variable can be set from a file if you append it with _FILE
#
IMPORT_DIR_ALLOWLIST=
#
# If you import from a directory, you can save a fallback configuration file in the directory.
# This file must be called "_fallback.json" and will be used when your CSV or CAMT.053 file is not accompanied
# by a configuration file.
#
# This fallback configuration will only be used if this variable is set to true.
# https://docs.firefly-iii.org/how-to/data-importer/advanced/post/#importing-a-local-directory
#
FALLBACK_IN_DIR=false
#
# When you're running Firefly III under a (self-signed) certificate,
# the data importer may have trouble verifying the TLS connection.
#
# You have a few options to make sure the data importer can connect
# to Firefly III:
# - 'true': will verify all certificates. The most secure option and the default.
# - 'file.pem': refer to a file (you must provide it) to your custom root or intermediate certificates.
# - 'false': will verify NO certificates. Not very secure.
VERIFY_TLS_SECURITY=true
#
# If you want, you can set a directory here where the data importer will look for import configurations.
# This is a separate setting from the /import directory that the auto-import uses.
# Setting this variable isn't necessary. The default value is "storage/configurations".
#
# This variable can be set from a file if you append it with _FILE
#
JSON_CONFIGURATION_DIR=
#
# Time out when connecting with Firefly III.
# π*10 seconds is usually fine.
#
CONNECTION_TIMEOUT=31.41
# The following variables can be useful when debugging the application
APP_ENV=local
APP_DEBUG=false
LOG_CHANNEL=stack
#
# If you turn this on, expect massive logs with lots of privacy sensitive data
#
LOG_RETURN_JSON=false
# Log level. You can set this from least severe to most severe:
# debug, info, notice, warning, error, critical, alert, emergency
# If you set it to debug your logs will grow large, and fast. If you set it to emergency probably
# nothing will get logged, ever.
LOG_LEVEL=debug
# TRUSTED_PROXIES is a useful variable when using Docker and/or a reverse proxy.
# Set it to ** and reverse proxies work just fine.
TRUSTED_PROXIES=*
#
# Time zone
#
TZ=Europe/London
#
# Use ASSET_URL when you're running the data importer in a sub-directory.
#
ASSET_URL=
#
# Email settings.
# The data importer can send you a message with all errors, warnings and messages
# after a successful import. This is disabled by default
#
ENABLE_MAIL_REPORT=false
#
# Force Firefly III URL to be secure?
#
#
EXPECT_SECURE_URL=false
# If enabled, define which mailer you want to use.
# Options include: smtp, mailgun, postmark, sendmail, log, array
# Amazon SES is not supported.
# log = drop mails in the logs instead of sending them
# array = debug mailer that does nothing.
MAIL_MAILER=smtp
# where to send the report?
MAIL_DESTINATION=gleb+fireflyiii@koval.net
# other mail settings
# These variables can be set from a file if you append it with _FILE
MAIL_FROM_ADDRESS=no-reply@koval.net
MAIL_HOST=smtp.mailtrap.io
MAIL_PORT=587
MAIL_USERNAME=no-reply@koval.net
MAIL_PASSWORD=SMTP_PASSWORD_VALUE
MAIL_ENCRYPTION=tls
# Extra settings depending on your mail configuration above.
# These variables can be set from a file if you append it with _FILE
MAILGUN_DOMAIN=
MAILGUN_SECRET=
MAILGUN_ENDPOINT=
POSTMARK_TOKEN=
#
# You probably won't need to change these settings.
#
BROADCAST_DRIVER=log
CACHE_DRIVER=file
QUEUE_CONNECTION=sync
SESSION_DRIVER=file
SESSION_LIFETIME=120
IS_EXTERNAL=false
REDIS_HOST=127.0.0.1
REDIS_PASSWORD=null
REDIS_PORT=6379
# always use quotes
REDIS_DB="0"
REDIS_CACHE_DB="1"
# The only tracker supported is Matomo.
# This is used on the public instance over at https://data-importer.firefly-iii.org
TRACKER_SITE_ID=
TRACKER_URL=
APP_NAME=DataImporter
#
# The APP_URL environment variable is NOT used anywhere.
# Don't bother setting it to fix your reverse proxy problems. It won't help.
# Don't open issues telling me it doesn't help because it's not supposed to.
# Laravel uses this to generate links on the command line, which is a feature the data importer does not use.
#
APP_URL=http://localhost

View File

@ -0,0 +1,42 @@
version: "3"
services:
app:
image: fireflyiii/core:latest
restart: unless-stopped
volumes:
- firefly_iii_upload:/var/www/html/storage/upload
env_file: .env
ports:
- 80:8080
depends_on:
- db
db:
image: mariadb:lts
restart: unless-stopped
env_file: .env
volumes:
- firefly_iii_db:/var/lib/mysql
importer:
image: fireflyiii/data-importer:latest
restart: unless-stopped
ports:
- 81:8080
depends_on:
- app
env_file: .importer.env
cron:
#
# To make this work, set STATIC_CRON_TOKEN in your .env file or as an environment variable and replace REPLACEME below
# The STATIC_CRON_TOKEN must be *exactly* 32 characters long
#
image: alpine
restart: unless-stopped
env_file: .env
command: sh -c "echo \"0 3 * * * wget -qO- http://app:8080/api/v1/cron/$STATIC_CRON_TOKEN\" | crontab - && crond -f -L /dev/stdout"
volumes:
firefly_iii_upload:
firefly_iii_db:

View File

@ -36,7 +36,7 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
clone: "{{ node }}-debian-12" clone: "{{ node }}-debian-12"
storage: nvme storage: nvme
notify: Initial boot register: create
- name: Wait for status - name: Wait for status
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: current state: current
@ -65,8 +65,21 @@
ipconfig0: ip=dhcp,ip6=auto ipconfig0: ip=dhcp,ip6=auto
ipconfig1: ip=dhcp ipconfig1: ip=dhcp
- name: Force all notified handlers to run # Initial boot
ansible.builtin.meta: flush_handlers # For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
when: create.changed is true
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90
# VM Configuration # VM Configuration
- name: Resize root disk - name: Resize root disk
@ -101,18 +114,3 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: restarted state: restarted
timeout: 60 timeout: 60
handlers:
# Initial boot
# For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90

View File

@ -1,4 +1,4 @@
- name: Install docker - name: Install software
hosts: music hosts: music
gather_facts: false gather_facts: false
tasks: tasks:

View File

@ -7,39 +7,46 @@
- name: Wait for connection - name: Wait for connection
ansible.builtin.wait_for_connection: ansible.builtin.wait_for_connection:
timeout: 300 timeout: 300
- name: Get user
ansible.builtin.user:
name: debian
register: user
- name: Docker compose down - name: Docker compose down
community.docker.docker_compose_v2: ansible.builtin.command: docker compose down
project_src: "$HOME/{{ app }}" args:
state: absent chdir: "{{ user.home }}/{{ app }}"
ignore_errors: true
- name: Copy project - name: Copy project
ansible.builtin.copy: ansible.builtin.copy:
src: "./{{ app }}" src: "./{{ app }}"
dest: "$HOME" dest: "{{ user.home }}"
mode: "0744" mode: "0744"
- name: Replace LastFM API key secret - name: Replace LastFM API key secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/.env" path: "{{ user.home }}/{{ app }}/.env"
regexp: "LASTFM_APIKEY_VALUE" regexp: "LASTFM_APIKEY_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/music', secret_name='LASTFM_APIKEY')['value'] }}" replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/music', secret_name='LASTFM_APIKEY')['value'] }}"
- name: Replace LastFM secret - name: Replace LastFM secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/.env" path: "{{ user.home }}/{{ app }}/.env"
regexp: "LASTFM_SECRET_VALUE" regexp: "LASTFM_SECRET_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/music', secret_name='LASTFM_SECRET')['value'] }}" replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/music', secret_name='LASTFM_SECRET')['value'] }}"
- name: Replace Mongo Password secret - name: Replace Mongo Password secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/.env" path: "{{ user.home }}/{{ app }}/.env"
regexp: "SPOTIFY_ID_VALUE" regexp: "SPOTIFY_ID_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/music', secret_name='SPOTIFY_ID')['value'] }}" replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/music', secret_name='SPOTIFY_ID')['value'] }}"
- name: Replace SMTP Password secret - name: Replace SMTP Password secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/.env" path: "{{ user.home }}/{{ app }}/.env"
regexp: "SPOTIFY_SECRET_VALUE" regexp: "SPOTIFY_SECRET_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/music', secret_name='SPOTIFY_SECRET')['value'] }}" replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/music', secret_name='SPOTIFY_SECRET')['value'] }}"
- name: Docker compose up - name: Docker compose up -d
community.docker.docker_compose_v2: ansible.builtin.command: docker compose up -d
project_src: "$HOME/{{ app }}" args:
chdir: "{{ user.home }}/{{ app }}"
- name: Update data permissions - name: Update data permissions
ansible.builtin.file: ansible.builtin.file:

View File

@ -49,4 +49,4 @@ services:
volumes: volumes:
- /mnt/nvme/filebrowser:/config - /mnt/nvme/filebrowser:/config
- /mnt/media/downloads:/srv/downloads - /mnt/media/downloads:/srv/downloads
- /mnt/media/music:/srv/music - /mnt/media/music:/srv/music

View File

@ -36,7 +36,7 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
clone: "{{ node }}-debian-12" clone: "{{ node }}-debian-12"
storage: nvme storage: nvme
notify: Initial boot register: create
- name: Wait for status - name: Wait for status
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: current state: current
@ -65,8 +65,21 @@
ipconfig0: ip=dhcp,ip6=auto ipconfig0: ip=dhcp,ip6=auto
ipconfig1: ip=dhcp ipconfig1: ip=dhcp
- name: Force all notified handlers to run # Initial boot
ansible.builtin.meta: flush_handlers # For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
when: create.changed is true
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90
# VM Configuration # VM Configuration
- name: Resize root disk - name: Resize root disk
@ -101,18 +114,3 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: restarted state: restarted
timeout: 60 timeout: 60
handlers:
# Initial boot
# For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90

View File

@ -1,4 +1,4 @@
- name: Install docker - name: Install software
hosts: photos hosts: photos
gather_facts: false gather_facts: false
tasks: tasks:

View File

@ -7,25 +7,40 @@
- name: Wait for connection - name: Wait for connection
ansible.builtin.wait_for_connection: ansible.builtin.wait_for_connection:
timeout: 300 timeout: 300
- name: Get user
ansible.builtin.user:
name: debian
register: user
- name: Docker compose down - name: Docker compose down
community.docker.docker_compose_v2: ansible.builtin.command: docker compose down
project_src: "$HOME/{{ app }}" args:
state: absent chdir: "{{ user.home }}/{{ app }}"
ignore_errors: true
- name: Copy project - name: Copy project
ansible.builtin.copy: ansible.builtin.copy:
src: "./{{ app }}" src: "./{{ app }}"
dest: "$HOME" dest: "{{ user.home }}"
mode: "0744" mode: "0744"
- name: Replace Typesense secret - name: Replace Typesense secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/.env" path: "{{ user.home }}/{{ app }}/.env"
regexp: "TYPESENSE_API_KEY_VALUE" regexp: "TYPESENSE_API_KEY_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/photos', secret_name='TYPESENSE_API_KEY')['value'] }}" replace:
"{{ lookup('infisical.vault.read_secrets', project_id=infisical_project, env_slug='prod',
path='/photos', secret_name='TYPESENSE_API_KEY')['value'] }}"
- name: Replace DB secret - name: Replace DB secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/.env" path: "{{ user.home }}/{{ app }}/.env"
regexp: "DB_PASSWORD_VALUE" regexp: "DB_PASSWORD_VALUE"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/photos', secret_name='DB_PASSWORD')['value'] }}" replace:
"{{ lookup('infisical.vault.read_secrets', project_id=infisical_project, env_slug='prod',
path='/photos', secret_name='DB_PASSWORD')['value'] }}"
- name: Docker compose pull
ansible.builtin.command: docker compose pull
args:
chdir: "{{ user.home }}/{{ app }}"
- name: Docker compose up -d - name: Docker compose up -d
community.docker.docker_compose_v2: ansible.builtin.command: docker compose up -d
project_src: "$HOME/{{ app }}" args:
chdir: "{{ user.home }}/{{ app }}"

View File

@ -4,9 +4,9 @@ services:
immich-server: immich-server:
container_name: immich_server container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release} image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
command: ["start.sh", "immich"]
volumes: volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload - ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file: env_file:
- .env - .env
ports: ports:
@ -16,22 +16,6 @@ services:
- database - database
restart: always restart: always
immich-microservices:
container_name: immich_microservices
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.yml
# service: hwaccel
command: ["start.sh", "microservices"]
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
env_file:
- .env
depends_on:
- redis
- database
restart: always
immich-machine-learning: immich-machine-learning:
container_name: immich_machine_learning container_name: immich_machine_learning
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}

View File

@ -36,7 +36,7 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
clone: "{{ node }}-debian-12" clone: "{{ node }}-debian-12"
storage: nvme storage: nvme
notify: Initial boot register: create
- name: Wait for status - name: Wait for status
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: current state: current
@ -65,8 +65,21 @@
ipconfig0: ip=dhcp,ip6=auto ipconfig0: ip=dhcp,ip6=auto
ipconfig1: ip=dhcp ipconfig1: ip=dhcp
- name: Force all notified handlers to run # Initial boot
ansible.builtin.meta: flush_handlers # For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
when: create.changed is true
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90
# VM Configuration # VM Configuration
- name: Resize root disk - name: Resize root disk
@ -100,18 +113,3 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: restarted state: restarted
timeout: 60 timeout: 60
handlers:
# Initial boot
# For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90

View File

@ -1,4 +1,4 @@
- name: Install docker - name: Install software
hosts: samba hosts: samba
gather_facts: false gather_facts: false
tasks: tasks:

View File

@ -7,25 +7,32 @@
- name: Wait for connection - name: Wait for connection
ansible.builtin.wait_for_connection: ansible.builtin.wait_for_connection:
timeout: 300 timeout: 300
- name: Get user
ansible.builtin.user:
name: debian
register: user
- name: Docker compose down - name: Docker compose down
community.docker.docker_compose_v2: ansible.builtin.command: docker compose down
project_src: "$HOME/{{ app }}" args:
state: absent chdir: "{{ user.home }}/{{ app }}"
ignore_errors: true
- name: Copy project - name: Copy project
ansible.builtin.copy: ansible.builtin.copy:
src: "./{{ app }}" src: "./{{ app }}"
dest: "$HOME" dest: "{{ user.home }}"
mode: "0744" mode: "0744"
- name: Replace KVK Password secret - name: Replace KVK Password secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/config.yml" path: "{{ user.home }}/{{ app }}/config.yml"
regexp: "KVK_PASSWORD" regexp: "KVK_PASSWORD"
replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/samba', secret_name='KVK_PASSWORD')['value'] }}" replace: "{{ lookup('infisical.vault.read_secrets', env_slug='prod', path='/samba', secret_name='KVK_PASSWORD')['value'] }}"
- name: Docker compose up - name: Docker compose up -d
community.docker.docker_compose_v2: ansible.builtin.command: docker compose up -d
project_src: "$HOME/{{ app }}" args:
chdir: "{{ user.home }}/{{ app }}"
- name: Update samba permissions - name: Update samba permissions
ansible.builtin.file: ansible.builtin.file:

View File

@ -15,12 +15,12 @@ global:
- "force group = debian" - "force group = debian"
share: share:
- name: kvkbackups - name: kvkbackups
comment: KVK Backups comment: KVK Backups
path: /samba/kvkbackups path: /samba/kvkbackups
validusers: kvk validusers: kvk
writelist: kvk writelist: kvk
browsable: true browsable: yes
readonly: false readonly: no
guestok: false guestok: no
veto: false veto: no

View File

@ -36,7 +36,7 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
clone: "{{ node }}-debian-12" clone: "{{ node }}-debian-12"
storage: nvme storage: nvme
notify: Initial boot register: create
- name: Wait for status - name: Wait for status
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: current state: current
@ -65,8 +65,21 @@
ipconfig0: ip=dhcp,ip6=auto ipconfig0: ip=dhcp,ip6=auto
ipconfig1: ip=dhcp ipconfig1: ip=dhcp
- name: Force all notified handlers to run # Initial boot
ansible.builtin.meta: flush_handlers # For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
when: create.changed is true
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90
# VM Configuration # VM Configuration
- name: Resize root disk - name: Resize root disk
@ -95,18 +108,3 @@
community.general.proxmox_kvm: community.general.proxmox_kvm:
state: restarted state: restarted
timeout: 60 timeout: 60
handlers:
# Initial boot
# For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Initial boot
block:
- name: Start
community.general.proxmox_kvm:
state: started
register: start
- name: Wait 1.5 min # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 90

View File

@ -1,4 +1,4 @@
- name: Install docker - name: Install software
hosts: secrets hosts: secrets
gather_facts: false gather_facts: false
tasks: tasks:

View File

@ -7,31 +7,38 @@
- name: Wait for connection - name: Wait for connection
ansible.builtin.wait_for_connection: ansible.builtin.wait_for_connection:
timeout: 300 timeout: 300
- name: Get user
ansible.builtin.user:
name: debian
register: user
- name: Docker compose down - name: Docker compose down
community.docker.docker_compose_v2: ansible.builtin.command: docker compose down
project_src: "$HOME/{{ app }}" args:
state: absent chdir: "{{ user.home }}/{{ app }}"
ignore_errors: true
- name: Copy project - name: Copy project
ansible.builtin.copy: ansible.builtin.copy:
src: "./{{ app }}" src: "./{{ app }}"
dest: "$HOME" dest: "{{ user.home }}"
mode: "0744" mode: "0744"
- name: Replace Encryption Key secret - name: Replace Encryption Key secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/.env" path: "{{ user.home }}/{{ app }}/.env"
regexp: "ENCRYPTION_KEY_VALUE" regexp: "ENCRYPTION_KEY_VALUE"
replace: "{{ lookup('ansible.builtin.env', 'INFISICAL_ENCRYPTION_KEY') }}" replace: "{{ lookup('ansible.builtin.env', 'INFISICAL_ENCRYPTION_KEY') }}"
- name: Replace Auth secret - name: Replace Auth secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/.env" path: "{{ user.home }}/{{ app }}/.env"
regexp: "AUTH_SECRET_VALUE" regexp: "AUTH_SECRET_VALUE"
replace: "{{ lookup('ansible.builtin.env', 'INFISICAL_AUTH_SECRET') }}" replace: "{{ lookup('ansible.builtin.env', 'INFISICAL_AUTH_SECRET') }}"
- name: Replace SMTP Password secret - name: Replace SMTP Password secret
ansible.builtin.replace: ansible.builtin.replace:
path: "$HOME/{{ app }}/.env" path: "{{ user.home }}/{{ app }}/.env"
regexp: "SMTP_PASSWORD_VALUE" regexp: "SMTP_PASSWORD_VALUE"
replace: "{{ lookup('ansible.builtin.env', 'SMTP_PASSWORD') }}" replace: "{{ lookup('ansible.builtin.env', 'SMTP_PASSWORD') }}"
- name: Docker compose up -d - name: Docker compose up -d
community.docker.docker_compose_v2: ansible.builtin.command: docker compose up -d
project_src: "$HOME/{{ app }}" args:
chdir: "{{ user.home }}/{{ app }}"

View File

@ -0,0 +1,114 @@
- name: Provision Proxmox VM
hosts: unifi
connection: ansible.builtin.local
gather_facts: false
vars:
api_user: "{{ lookup('ansible.builtin.env', 'PROXMOX_USER') }}"
api_host: "{{ lookup('ansible.builtin.env', 'PROXMOX_HOST' ) }}"
api_token_id: "{{ lookup('ansible.builtin.env', 'PROXMOX_TOKEN_ID') }}"
api_token_secret: "{{ lookup('ansible.builtin.env', 'PROXMOX_TOKEN_SECRET') }}"
ssh_public: "{{ lookup('ansible.builtin.env', 'SSH_PUBLIC') }}"
vmname: "{{ inventory_hostname | regex_replace('^([^\\.]+)\\..+$', '\\1') }}"
node: pve2
module_defaults:
community.general.proxmox_kvm:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
node: "{{ node }}"
community.general.proxmox_nic:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
community.general.proxmox_disk:
api_user: "{{ api_user }}"
api_host: "{{ api_host }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
name: "{{ vmname }}"
tasks:
# Initial setup
- name: Create VM
community.general.proxmox_kvm:
clone: "{{ node }}-debian-12"
storage: nvme
notify:
- Start VM
- Wait
- name: Wait for status
community.general.proxmox_kvm:
state: current
register: vm
retries: 30
delay: 10
until: vm.status is defined
# Networking and initial config
- name: Add PUB NIC
community.general.proxmox_nic:
interface: net0
firewall: false
bridge: PUB
- name: Add SRV NIC
community.general.proxmox_nic:
interface: net1
firewall: false
bridge: SRV
- name: Add LAN NIC
community.general.proxmox_nic:
interface: net2
firewall: false
bridge: vmbr101
- name: Configure cloud-init
community.general.proxmox_kvm:
update: true
ciuser: debian
sshkeys: "{{ ssh_public }}"
ipconfig:
ipconfig0: ip=dhcp,ip6=auto
ipconfig1: ip=dhcp
ipconfig2: ip=192.168.1.2/24
- name: Force all notified handlers to run
ansible.builtin.meta: flush_handlers
# VM Configuration
- name: Resize root disk
community.general.proxmox_disk:
disk: scsi0
size: 48G
state: resized
- name: Update VM
community.general.proxmox_kvm:
update: true
agent: enabled=1
tags:
- debian-12
- provisioned
onboot: true
cores: 4
memory: 8192
- name: Retart VM
community.general.proxmox_kvm:
state: restarted
timeout: 60
handlers:
# Initial boot
# For some reason debian cloud images don't use
# cloud-init for networking on first boot (cloud-init files
# are regenerated AFTER networking starts). But we need the
# hostname to be registered with DHCP later on so ¯\_(ツ)_/¯
- name: Start VM
community.general.proxmox_kvm:
state: started
register: start
- name: Wait # Initial apt update, apt upgrade, cloud-init
ansible.builtin.wait_for:
timeout: 120

View File

@ -0,0 +1,25 @@
- name: Initialise VM
hosts: unifi
gather_facts: false
tasks:
- name: Wait for connection
ansible.builtin.wait_for_connection:
timeout: 300
- name: Install system packages
ansible.builtin.apt:
update_cache: true
pkg:
- qemu-guest-agent
- parted
# For unifi
- ca-certificates
- curl
become: true
- name: Enable qemu-guest-agent
ansible.builtin.systemd:
name: qemu-guest-agent
state: started
enabled: true
become: true

View File

@ -23,9 +23,24 @@ proxmox:
samba: samba:
hosts: hosts:
samba.srv.home.local.koval.net: samba.srv.home.local.koval.net:
finance:
hosts:
finance.srv.home.local.koval.net:
vars: vars:
ansible_user: debian ansible_user: debian
ansible_ssh_private_key_file: ~/.ssh/id_rsa ansible_ssh_private_key_file: ~/.ssh/id_rsa
ansible_ssh_common_args: -o StrictHostKeyChecking=accept-new # TODO: Improve this ansible_ssh_common_args: -o StrictHostKeyChecking=accept-new # TODO: Improve this
unmanaged: unmanaged:
hosts: children:
backups:
hosts:
backups.srv.home.local.koval.net:
unifi:
hosts:
unifi.srv.home.local.koval.net:
vars:
ansible_user: debian
ansible_ssh_private_key_file: ~/.ssh/id_rsa
ansible_ssh_common_args: -o StrictHostKeyChecking=accept-new # TODO: Improve this
vars:
infisical_project: d102ada3-7d49-4138-9759-033ca79fe731

View File

@ -1,4 +1,4 @@
ansible ansible
proxmoxer proxmoxer
requests requests
infisical infisical-python

View File

@ -1,4 +0,0 @@
collections:
- name: community.general
- name: community.docker
- name: infisical.vault