run all backup tasks in a slice

This commit is contained in:
Jef Roosens 2026-04-20 21:53:11 +02:00
parent d351573a3b
commit 4c54a80122
Signed by: Jef Roosens
GPG key ID: 21FD3D77D56BAF49
6 changed files with 61 additions and 3 deletions

View file

@ -14,6 +14,12 @@ receivers:
filesystem:
network:
load:
# Record backup script outputs
journald:
matches:
- _SYSTEMD_SLICE: backup.slice
prometheus:
config:
scrape_configs:
@ -22,13 +28,17 @@ receivers:
static_configs:
- targets: ['localhost:2019']
- job_name: 'miniflux'
scrape_interval: 30s
scrape_interval: 1m
static_configs:
- targets: ['localhost:8002']
- job_name: 'restic-rest'
scrape_interval: 30s
scrape_interval: 1m
static_configs:
- targets: ['localhost:8000']
- job_name: 'forgejo'
scrape_interval: 1m
static_configs:
- targets: ['localhost:8027']
# Processors specify what happens with the received data
processors:
@ -59,6 +69,15 @@ exporters:
# x-greptime-pipeline-name: '<pipeline_name>'
tls:
insecure: true
otlphttp/logs_journald:
endpoint: '{{ otel_logs_endpoint }}'
headers:
# x-greptime-db-name: '<your_db_name>'
x-greptime-log-table-name: 'journald_logs'
x-greptime-pipeline-name: 'journald_logs'
# x-greptime-pipeline-name: 'greptime_identity'
tls:
insecure: true
otlphttp/metrics:
endpoint: '{{ otel_metrics_endpoint }}'
@ -69,6 +88,8 @@ exporters:
# x-greptime-db-name: '<your_db_name>'
tls:
insecure: true
debug:
verbosity: normal
# Service pipelines pull the configured receivers, processors, and exporters together
# into pipelines that process data
@ -88,3 +109,7 @@ service:
receivers: [otlp]
processors: [batch, resourcedetection]
exporters: [otlphttp/logs]
logs/journald:
receivers: [journald]
processors: [batch, resourcedetection]
exporters: [debug, otlphttp/logs_journald]

View file

@ -3,7 +3,7 @@
#
# All types:
# name: (required) unique identifier, used in unit and script filenames
# type: (required) backup template to use: btrfs-subvolume, podman-postgres, postgres
# type: (required) backup template to use: btrfs-subvolume, podman-mysql, podman-postgres, postgres, echo-test
# user: (optional) user to run the backup as; defaults to root
# group: (optional) group to run the backup as; defaults to backups
# timer_delay_sec: (optional) RandomizedDelaySec for the timer; defaults to 30 minutes
@ -11,6 +11,12 @@
# btrfs-subvolume:
# path: (required) path to the btrfs subvolume to back up
#
# podman-mysql:
# container: (required) name of the podman container running mysql/mariadb
# mysql_user: (required) mysql user to connect as
# mysql_password: (required) mysql password for the user
# database: (required) mysql database to dump
#
# podman-postgres:
# container: (required) name of the podman container running postgres
# pg_user: (required) postgres user to connect as
@ -20,6 +26,10 @@
# pwd: (required) working directory for podman compose
# user: (required) postgres user to connect as
# database: (required) postgres database to dump
#
# echo-test:
# lines: (optional) number of log lines to emit; defaults to 10
# interval_sec: (optional) seconds to sleep between lines; defaults to 1
backups: []
# Restic REST server URL to publish backups to

View file

@ -0,0 +1,5 @@
[Unit]
Description=Backup services slice
[Slice]
CPUQuota=25%

View file

@ -35,6 +35,15 @@
loop: "{{ backups }}"
when: item.user is defined
- name: Ensure backup slice unit is present
ansible.builtin.copy:
src: "backup.slice"
dest: "/etc/systemd/system/backup.slice"
owner: root
group: root
mode: "0644"
notify: Reload systemd
- name: Ensure systemd service unit is present for each backup
ansible.builtin.template:
src: "backup.service.j2"

View file

@ -4,6 +4,7 @@ After=network.target
[Service]
Type=oneshot
Slice=backup.slice
User={{ item.user | default('root') }}
Group={{ item.group | default('backups') }}

View file

@ -0,0 +1,8 @@
#!/usr/bin/env bash
echo "log line 1"
echo "log line 2"
echo "log line 3"
echo "log line 4"
echo "log line 5"
echo "log line 6"