Compare commits

...
Sign in to create a new pull request.

7 commits

32 changed files with 596 additions and 136 deletions

View file

@ -1,49 +1,52 @@
$ANSIBLE_VAULT;1.1;AES256 $ANSIBLE_VAULT;1.1;AES256
65626333343266643235663938663438356638613431393864666264636364363431316436636234 33383364343639356334353035346237343135633831643837633539663433313431616130623862
3065623230376661396633643138633766633563393461380a636664373666646435643235653232 3638363236326362373564663134383266353634343861370a363239653062656634663139616338
30313935623961366634656134643834636239623836633864643961376237653531336238363135 32653965643465316364633161343264323763363066303833656661303464623866643437303664
3662316535303637640a363863353263633661343635346238616335353232303261326163323233 3465663461663361370a353835653064343433356463333231643831643139303562656435653436
32373237303864353037643966656563323331326161623334636238666237383735643532626566 64303735613233386137363765393935396666343362616638306263643732623763613462303535
64363931363932383263666434393139396137613934663134616430396537616566333835333865 30633364656562666534316233306462373139316631613931396430313631623131313365383033
66653239363539363432363735353930393239333063623339623330666432323635356363376337 33356637653038636261326264653866313432363233646636653762386366323838353164313438
39643938653737343633663665343132613236326666336434613966343134613035343562356133 33316664363038346466653566636633303461633433633461353533643633323661353536623937
64613630613037663638633439306433633261373731306564363133633832326632623733313434 33623461623562343831333166306337393538373032353133303935666161613839303766343332
64376538313634333564343263636436323230663935363964396636666532333331313535323962 64353632666265363134386563343237353361333435363539626663363531663835363033353438
34623764666362643031643339356163366132336239366639333939633965383736383839646261 38363332663063393832353866303562643435646637663339653031643563396165613939323164
30343331626434366662613139306335336231643066356465363763383237636466636162393266 33356631666330643861373534343432313636663764636265663939663965376532356230653763
31613432643835306230386536323438366537313137626361326338363539303031326439303065 33306530386234643434613130393838616138306461386539343333643739343165633234316263
66343634653034643964636333383131333530636330346462653336633435356430663234376539 36303566613365653662363434643963656366663730643831346637336461623165343834373938
33633963616630396134366632613139366134313430363764303738636263623362373332336266 32646539346135326536363939353232396239643630326564396463336537613639393961663064
35616461306635343364636634396664316635383164323933396233613539353436373264616137 63376663303364616162393031346662303731336334626634396535323933373864373861326331
38373335333631303133363730626365643765366462373337386132343361303230626661613431 63343235663338363731343936313963636234326639633631323438656363626637363131653932
32363334636563613333646633323261316534386138616133663539393864353863353431396563 34643239646263383130336632343166396636646433363061366639333439343961306134633765
37386166326133653734666266383932633638333930623835333164303366633432303563386661 33643064356331646334316537346566626531653537336530653037333665303439663936356166
63313032643733643738383731623838623939316330613465653165666166356366646537313431 31316130336363386637656330313437316339626365386266356137616435613334306233306236
35613662363331323530323563613438616362353838616463623963616231653730613264383439 33383534663730396530353035626136633762316565336133366663616337323465646338323936
30386164356537326639313636303636386631613363323863653566363730366664633935376236 34656466316462633037626133303237363638323961363134303434646636613063353832356632
36646539653865383633643733383038313032356433623434343666386231633537646638376436 38353866396331383832666565303438323965396565356631353761653839356332363132643438
61636464353565336131396231643433353063303934326533306565623533303466633631363737 31356139633165663033626465623531396234396634393764616536346136323036663133303630
61636464393931636461343038323434346464363438373039346338666536323363366533636535 34653835363436356236303362333635623031663563323634343732646631383133666235623366
31346336393162653232323766323962373039373236353862383266313238386634343333343461 38656334373365363837343933313935663533303263346134316463393530303830663536323739
64393633656361313635343764373564623039396634626332323664326464626631646562623930 61303632613836353965663461623064636562653035323330653034623732303961343665666264
31396566353366393362623432376635366165353064653830333736373630353563323836346430 63373935373361363731626237353066663038346564613066323631376462373630303931306463
33326132366365616265626137383235353838653634393366313233343033626334383339663535 65363866646439393730383562376637386262646665646564386332356539343264333464303735
39333531353734653235323730633363613938303765633637373765663737633536313237626565 39393932653566363463616436313335656534303433656132643333633434313966313739333061
65336335633233626137643339386362313534393336656637326335643137333330656330386362 39643730326536666530333735373766373566663731346531653439346434623133613336383363
30656265356232343638393761303765396363656437316339396637306264623830373761363962 62626165653335643934653463653765636661323562313363333866393361366466343833653536
37663865303833366165623934343963666633616366376435393239373862646562383462393964 66376532306331373861393234356234363834326266353532663736353462333038353531346538
62373636633436643636346666663339313338646534383135316462346366373462346637313662 61626633303838303962336134376230626635616237303438636235393338623563373038376362
64363433666137643734393338326132393865343135663435323566666530363561343766646435 37333061313231643036303833373333336265313233326230373139626364663234313534623537
63653735623564323661333734643236646534663133633331616565353039626364366337333834 64623661366338323638656135613333353361353634643533393030393532363032313961393632
64366161636662616639613464396563623231386230636561666134383139323431383933613937 62356561353064663234396335383737613963386566613064393136313364303338346133353565
62613838383332343438313939333434646632353435643832376363353539333530306530323165 39633737663164636665626534346265633831613835343862316230653530346533346133326435
39303533393762353138623537363461333138383066383838376663636339626632643534303961 38323462666564666435633331353436373434313834613266656638343161623339656464306232
63646163333533623536663565623833303238623235633239613763653930363065666435376437 32616464623537366264363839383034346333323034663665326434343738306562303537363932
31383030313831643965386531396664363035306439626266353030363738376232366138306436 36346333373333316334336131386436633562656136353134656563663137316665656639646463
30336663313335313233313235653133313866353666336463376264393965636633636436643235 39353564366539636531303066623138613931323130306130363433323162313237346238323464
36653363363533343037353632646439366130396638343362626434376637313533383166356231 39633235313335353734323738356231613636643661343165616136343633333065643765633466
61646161303430396264376433363161313032366265666133333566616463636431643035393763 37363161653933646536343131656561313966306436336334313962376630373039373938303535
63653437353839393665643138663562633864633662343935313634386466366535326361633737 65343735613164656639383331623265656466656534663163383937303763626639373233646461
38363963386334376538626365363362663833376139363163636332313231666565393532646533 63393665653132316364363562316136383633343365623630613536653536326138376334396562
64386230313436316138643834373462643330336366323863336463356265376461346261356464 63613432356531386230393363383861323663353832373265303765616435303436356361393365
35643230353939333830 65386132333938333939353561303362346235343231383035313761366330363532623337386463
35623937303533613364383831343764653631333936313361386234323634383664356262313137
33643130343961396335623033346434373735303663376331346534613338386130633436346462
303936363639633134386435653639656334

View file

@ -10,8 +10,6 @@ rclone_obf_pass2: "{{ vault_rclone_obf_pass2 }}"
lander_commit_sha: 'e438bd045ca2ee64e3d9ab98f416027b5417c3f6' lander_commit_sha: 'e438bd045ca2ee64e3d9ab98f416027b5417c3f6'
lander_api_key: "{{ vault_lander_api_key }}" lander_api_key: "{{ vault_lander_api_key }}"
restic_rest_version: '0.12.1'
ntfy_user_pi_pass: "{{ vault_ntfy_user_pi_pass }}" ntfy_user_pi_pass: "{{ vault_ntfy_user_pi_pass }}"
nefarious_admin_user: "{{ vault_nefarious_admin_user }}" nefarious_admin_user: "{{ vault_nefarious_admin_user }}"

View file

@ -26,14 +26,17 @@
roles: roles:
- role: any.tools.restic - role: any.tools.restic
vars: vars:
# renovate: datasource=github-releases depName=restic/restic
restic_version: "0.18.1" restic_version: "0.18.1"
- role: any.tools.otel-cli - role: any.tools.otel-cli
vars: vars:
# renovate: datasource=github-releases depName=equinix-labs/otel-cli
otel_cli_version: "0.4.5" otel_cli_version: "0.4.5"
# TODO restic-rest subvolumes # TODO restic-rest subvolumes
- role: any.software.restic-rest - role: any.software.restic-rest
vars: vars:
restic_rest_data_dir: "/mnt/data1/restic-rest" restic_rest_data_dir: "/mnt/data1/restic-rest"
# renovate: datasource=github-releases depName=restic/rest-server
restic_rest_version: "0.12.1" restic_rest_version: "0.12.1"
- name: Set up OTEL collector - name: Set up OTEL collector
@ -63,7 +66,7 @@
name: "/@rootfs/data/miniflux/postgres" name: "/@rootfs/data/miniflux/postgres"
- role: any.software.miniflux-podman - role: any.software.miniflux-podman
vars: vars:
postgres_data_dir: '/data/miniflux/postgres' postgres_data_dir: "/data/miniflux/postgres"
- role: any.tools.backup-scripts - role: any.tools.backup-scripts
become: true become: true
vars: vars:
@ -88,6 +91,7 @@
name: "/webdav/data" name: "/webdav/data"
- role: any.software.webdav - role: any.software.webdav
vars: vars:
# renovate: datasource=github-releases depName=hacdias/webdav
webdav_version: "5.7.4" webdav_version: "5.7.4"
data_dir: "{{ btrfs_raid.path }}/webdav/data" data_dir: "{{ btrfs_raid.path }}/webdav/data"
@ -101,7 +105,6 @@
type: "btrfs-subvolume" type: "btrfs-subvolume"
path: "{{ btrfs_raid.path }}/webdav/data" path: "{{ btrfs_raid.path }}/webdav/data"
- name: Set up Forgejo - name: Set up Forgejo
hosts: emma hosts: emma
tags: forgejo tags: forgejo
@ -129,6 +132,7 @@
- role: any.software.forgejo-podman - role: any.software.forgejo-podman
vars: vars:
# General # General
# renovate: datasource=forgejo-releases depName=forgejo/forgejo
forgejo_version: '11.0.11' forgejo_version: '11.0.11'
forgejo_postgres_version: '14.8' forgejo_postgres_version: '14.8'
@ -187,9 +191,9 @@
- role: any.tools.backup-scripts - role: any.tools.backup-scripts
vars: vars:
backups: backups:
- name: 'otter-data' - name: "otter-data"
type: 'btrfs-subvolume' type: "btrfs-subvolume"
path: '/data/otter/data' path: "/data/otter/data"
- name: Set up Nefarious - name: Set up Nefarious
hosts: emma hosts: emma
@ -227,14 +231,14 @@
become: true become: true
- role: any.software.actual-podman - role: any.software.actual-podman
vars: vars:
data_dir: '/data/actual/data' data_dir: "/data/actual/data"
- role: any.tools.backup-scripts - role: any.tools.backup-scripts
become: true become: true
vars: vars:
backups: backups:
- name: 'actual-data' - name: "actual-data"
type: 'btrfs-subvolume' type: "btrfs-subvolume"
path: '/data/actual/data' path: "/data/actual/data"
- name: Set up Baikal - name: Set up Baikal
hosts: emma hosts: emma
@ -262,12 +266,12 @@
become: true become: true
vars: vars:
backups: backups:
- name: 'baikal-config' - name: "baikal-config"
type: 'btrfs-subvolume' type: "btrfs-subvolume"
path: '/data/baikal/config' path: "/data/baikal/config"
- name: 'baikal-Specific' - name: "baikal-Specific"
type: 'btrfs-subvolume' type: "btrfs-subvolume"
path: '/data/baikal/Specific' path: "/data/baikal/Specific"
- name: Set up Syncthing - name: Set up Syncthing
hosts: emma hosts: emma
@ -315,17 +319,17 @@
become: true become: true
vars: vars:
backups: backups:
- name: 'monica-data' - name: "monica-data"
type: 'btrfs-subvolume' type: "btrfs-subvolume"
path: '/data/monica/config' path: "/data/monica/config"
- name: 'monica-mariadb' - name: "monica-mariadb"
type: 'podman-mysql' type: "podman-mysql"
user: 'debian' user: "debian"
container: 'systemd-monica-mariadb' container: "systemd-monica-mariadb"
mysql_user: 'monica' mysql_user: "monica"
mysql_password: 'monica' mysql_password: "monica"
database: 'monica' database: "monica"
- name: Set up Recipya - name: Set up Recipya
hosts: emma hosts: emma
@ -371,6 +375,7 @@
immich_postgres_dir: "/data/immich/postgres" immich_postgres_dir: "/data/immich/postgres"
immich_upload_dir: "/mnt/data1/photos/immich-upload" immich_upload_dir: "/mnt/data1/photos/immich-upload"
immich_model_cache_dir: "/data/immich/model-cache" immich_model_cache_dir: "/data/immich/model-cache"
immich_hw_accel: "nvidia"
immich_libraries: immich_libraries:
- path: "/mnt/data1/photos/jef" - path: "/mnt/data1/photos/jef"
name: "jef" name: "jef"
@ -400,7 +405,7 @@
name: "/@rootfs/data/matrix-tuwunel/data" name: "/@rootfs/data/matrix-tuwunel/data"
- role: any.software.tuwunel - role: any.software.tuwunel
vars: vars:
tuwunel_data_dir: '/data/matrix-tuwunel/data' tuwunel_data_dir: "/data/matrix-tuwunel/data"
- role: any.tools.backup-scripts - role: any.tools.backup-scripts
vars: vars:
backups: backups:
@ -408,6 +413,17 @@
type: "btrfs-subvolume" type: "btrfs-subvolume"
path: "/data/matrix-tuwunel/data" path: "/data/matrix-tuwunel/data"
- name: Set up log pipeline test
hosts: emma
tags: [log-test, otel]
become: true
roles:
- role: any.tools.backup-scripts
vars:
backups:
- name: "log-test"
type: "echo-test"
- name: Set up GreptimeDB - name: Set up GreptimeDB
hosts: emma hosts: emma
tags: [greptimedb, otel] tags: [greptimedb, otel]
@ -423,6 +439,9 @@
vars: vars:
greptimedb_data_dir: "/data/greptimedb/data" greptimedb_data_dir: "/data/greptimedb/data"
# renovate: datasource=gitea-releases depName=GreptimeTeam/greptimedb
greptimedb_version: 'v1.0.0'
- name: Set up Grafana - name: Set up Grafana
hosts: emma hosts: emma
tags: [grafana, otel] tags: [grafana, otel]

20
renovate.json Normal file
View file

@ -0,0 +1,20 @@
{
"platform": "forgejo",
"endpoint": "https://git.rustybever.be",
"repositories": [
"Chewing_Bever/homelab"
],
"enabledManagers": [
"custom.regex"
],
"customManagers": [
{
"customType": "regex",
"managerFilePatterns": ["/plays/.*\\.yml$/"],
"matchStrings": [
"#\\s*renovate:\\s*(datasource=(?<datasource>.*?) )?depName=(?<depName>.*?)( versioning=(?<versioning>.*?))?\\s*\\w*:\\s*[\"']?(?<currentValue>[^\"']*)[\"']?\\s"
],
"versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}"
}
]
}

View file

@ -123,4 +123,4 @@ ENABLED = true
SCHEDULE = @weekly SCHEDULE = @weekly
[metrics] [metrics]
enabled = true ENABLED = true

View file

@ -2,4 +2,18 @@
reverse_proxy localhost:{{ forgejo_http_port }} { reverse_proxy localhost:{{ forgejo_http_port }} {
header_down +X-Robots-Tag "none" header_down +X-Robots-Tag "none"
} }
route /metrics {
@local {
remote_ip 127.0.0.1 ::1
}
handle @local {
reverse_proxy localhost:{{ forgejo_http_port }}
}
handle {
respond "Not Found" 404
}
}
} }

View file

@ -110,3 +110,6 @@ JWT_SECRET = {{ gitea_jwt_secret }}
[other] [other]
SHOW_FOOTER_VERSION = false SHOW_FOOTER_VERSION = false
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false SHOW_FOOTER_TEMPLATE_LOAD_TIME = false
[metrics]
enabled = true

View file

@ -0,0 +1,10 @@
# `any.software.greptimedb-podman`
## Description
* Installs GreptimeDB inside a Podman container
## Configuration
* `greptimedb_version`: version of GreptimeDB to install
* `greptimedb_data_dir`: directory to mount as the data directory

View file

@ -0,0 +1,143 @@
# GreptimeDB Pipeline OTel journald receiver
#
# Input: NDJSON log records produced by the OpenTelemetry Collector's
# journald receiver. The OTel OTLP exporter wraps the journald
# JSON entry as a string under the top-level "body" key, so the
# pipeline first parses that string into an object before doing
# anything else.
#
# Timestamp: __MONOTONIC_TIMESTAMP (microseconds since boot) is used as the
# time-index column. If you prefer wall-clock time, swap this for
# __REALTIME_TIMESTAMP with the same resolution.
#
# Apply this pipeline by setting the HTTP export header in the OTel config:
# x-greptime-pipeline-name: journald
#
# Upload via the GreptimeDB API:
# curl -X POST 'http://<host>:4000/v1/events/pipelines/journald' \
# -H 'Content-Type: application/x-yaml' \
# --data-binary @journald.yaml
version: 2
processors:
# ------------------------------------------------------------------
# 1. The OTel OTLP exporter encodes the journald entry as a JSON string
# in the "body" field. Parse it in-place so subsequent steps can
# address individual keys as .body.<key>.
# ------------------------------------------------------------------
- json_parse:
fields:
- Body, body
ignore_missing: false
# ------------------------------------------------------------------
# 2. Flatten every journald / systemd field from .body.* to the top
# level with clean snake_case names, cast numeric fields to integers,
# strip the trailing newline journald appends to _SELINUX_CONTEXT,
# lift __MONOTONIC_TIMESTAMP as a plain string for the epoch processor
# in step 3, and finally drop the now-empty .body object.
#
# del(.body.<key>) returns the value AND removes the key in one step.
# ------------------------------------------------------------------
- vrl:
source: |
.transport = del(.body._TRANSPORT)
.hostname = del(.body._HOSTNAME)
.exe = del(.body._EXE)
.cmdline = del(.body._CMDLINE)
.runtime_scope = del(.body._RUNTIME_SCOPE)
.systemd_cgroup = del(.body._SYSTEMD_CGROUP)
.comm = del(.body._COMM)
.message = del(.body.MESSAGE)
.systemd_invocation_id = del(.body._SYSTEMD_INVOCATION_ID)
.gid = to_int!(del(.body._GID))
.uid = to_int!(del(.body._UID))
.priority = to_int!(del(.body.PRIORITY))
.boot_id = del(.body._BOOT_ID)
.pid = to_int!(del(.body._PID))
.seqnum_id = del(.body.__SEQNUM_ID)
.seqnum = to_int!(del(.body.__SEQNUM))
.syslog_identifier = del(.body.SYSLOG_IDENTIFIER)
.stream_id = del(.body._STREAM_ID)
.selinux_context = strip_whitespace(string!(del(.body._SELINUX_CONTEXT)))
.systemd_slice = del(.body._SYSTEMD_SLICE)
.syslog_facility = to_int!(del(.body.SYSLOG_FACILITY))
.cursor = del(.body.__CURSOR)
.systemd_unit = del(.body._SYSTEMD_UNIT)
.cap_effective = del(.body._CAP_EFFECTIVE)
.machine_id = del(.body._MACHINE_ID)
# Lift the raw timestamp string so the epoch processor (step 3)
# can consume it from the top level.
.monotonic_timestamp = to_int!(del(.body.__MONOTONIC_TIMESTAMP))
del(.body)
.
# ------------------------------------------------------------------
# 3. Parse the monotonic timestamp (µs since boot) into a typed value
# and rename it to `timestamp` so it becomes the time-index column.
# ------------------------------------------------------------------
# - epoch:
# fields:
# - __MONOTONIC_TIMESTAMP, timestamp
# resolution: microsecond
# ignore_missing: false
# ------------------------------------------------------------------
# Transform
#
# In version 2, only fields that require a specific type, index, or
# tag annotation need to be listed here. All remaining fields from the
# pipeline context are auto-detected and persisted by the engine.
#
# Resulting schema (auto-detected fields shown as comments):
# timestamp TimestampMicrosecond PRIMARY KEY (time index)
# message String fulltext index
# systemd_unit String inverted index
# hostname String inverted index
# comm String inverted index
# syslog_identifier String inverted index
# transport String inverted index
# systemd_slice String inverted index
# priority Int64 (auto)
# syslog_facility Int64 (auto)
# uid Int64 (auto)
# gid Int64 (auto)
# pid Int64 (auto)
# seqnum Int64 (auto)
# exe String (auto)
# cmdline String (auto)
# runtime_scope String (auto)
# systemd_cgroup String (auto)
# systemd_invocation_id String (auto)
# boot_id String (auto)
# seqnum_id String (auto)
# stream_id String (auto)
# selinux_context String (auto)
# cursor String (auto)
# cap_effective String (auto)
# machine_id String (auto)
# ------------------------------------------------------------------
transform:
# Time index — microsecond precision monotonic clock
- fields:
- Timestamp
type: epoch, us
index: timestamp
# Full-text search on the human-readable log body
- fields:
- message
type: string
index: fulltext
# Inverted indexes on the fields most commonly used in WHERE / GROUP BY
- fields:
- systemd_unit
- hostname
- comm
- syslog_identifier
- transport
- systemd_slice
type: string
index: inverted

View file

@ -23,17 +23,3 @@
owner: 'debian' owner: 'debian'
group: 'debian' group: 'debian'
notify: 'restart greptimedb' notify: 'restart greptimedb'
# - name: Ensure stack is deployed
# ansible.builtin.shell:
# chdir: '/etc/miniflux'
# cmd: 'docker compose up -d --remove-orphans'
# when: 'res.changed'
# - name: Ensure backup script is present
# ansible.builtin.copy:
# src: 'miniflux.backup.sh'
# dest: '/etc/backups/miniflux.backup.sh'
# owner: 'root'
# group: 'root'
# mode: '0644'

View file

@ -1,6 +1,6 @@
# vim: ft=systemd # vim: ft=systemd
[Container] [Container]
Image=docker.io/greptime/greptimedb:v1.0.0-rc.1 Image=docker.io/greptime/greptimedb:{{ greptimedb_version }}
Exec=standalone start --http-addr 0.0.0.0:4000 --rpc-bind-addr 0.0.0.0:4001 --mysql-addr 0.0.0.0:4002 --postgres-addr 0.0.0.0:4003 Exec=standalone start --http-addr 0.0.0.0:4000 --rpc-bind-addr 0.0.0.0:4001 --mysql-addr 0.0.0.0:4002 --postgres-addr 0.0.0.0:4003

View file

@ -0,0 +1,16 @@
# Immich version to deploy, used across all container images
immich_version: "v2.5.6"
# Hardware acceleration backend for the machine learning container.
# Supported values: intel, nvidia
immich_hw_accel: "intel"
# Port the machine learning container listens on, published to host loopback.
immich_ml_port: 3003
# URL the immich server uses to reach the machine learning container.
# Since the ML container runs as a system (root) container with Network=host,
# it binds directly to the host network. From within the rootless pod, this
# address may need to be the host's LAN IP or bridge IP rather than 127.0.0.1
# depending on the rootless network backend in use (pasta/slirp4netns).
immich_ml_url: "http://127.0.0.1:3003"

View file

@ -1,8 +1,19 @@
--- ---
- name: 'restart immich' - name: restart immich
ansible.builtin.systemd_service: ansible.builtin.systemd_service:
name: 'immich-server' name: immich-app
state: 'restarted' state: restarted
scope: user
scope: 'user'
daemon_reload: true daemon_reload: true
- name: restart immich-ml
ansible.builtin.systemd_service:
name: immich-ml
state: restarted
daemon_reload: true
become: true
- name: reload caddy
ansible.builtin.systemd_service:
name: caddy
state: reloaded

View file

@ -1,33 +1,66 @@
- name: Ensure Quadlet files are present - name: Ensure immich directories have correct permissions
ansible.builtin.template: ansible.builtin.file:
src: "{{ item }}.j2" path: "{{ item }}"
dest: "/home/debian/.config/containers/systemd/{{ item }}" state: directory
mode: '0755' mode: "0755"
owner: 'debian' owner: "debian"
group: 'debian' group: "debian"
become: true
loop: loop:
- 'immich-app.container' - "{{ immich_upload_dir }}"
- 'immich-postgres.container' - "{{ immich_postgres_dir }}"
# notify: 'restart immich'
- name: Ensure Quadlet files are present - name: Ensure system Quadlet directory is present
ansible.builtin.copy: ansible.builtin.file:
src: "{{ item }}" path: /etc/containers/systemd
dest: "/home/debian/.config/containers/systemd/{{ item }}" state: directory
mode: '0755'
owner: 'debian'
group: 'debian'
loop:
- 'immich-redis.container'
- 'immich.pod'
# notify: 'restart immich'
- name: Ensure Caddyfile is present
ansible.builtin.copy:
src: 'immich.Caddyfile'
dest: '/etc/caddy/immich.Caddyfile'
owner: root owner: root
group: root group: root
mode: '0644' mode: "0755"
become: true become: true
# notify: 'reload caddy'
- name: Ensure ML container system Quadlet is present
ansible.builtin.template:
src: immich-ml.container.j2
dest: /etc/containers/systemd/immich-ml.container
owner: root
group: root
mode: "0644"
become: true
notify: restart immich-ml
# - name: Ensure user Quadlet files are present (templates)
# ansible.builtin.template:
# src: "{{ item }}.j2"
# dest: "/home/debian/.config/containers/systemd/{{ item }}"
# mode: "0644"
# owner: "debian"
# group: "debian"
# become: true
# loop:
# - immich-app.container
# - immich-postgres.container
# notify: restart immich
# - name: Ensure user Quadlet files are present (static)
# ansible.builtin.copy:
# src: "{{ item }}"
# dest: "/home/debian/.config/containers/systemd/{{ item }}"
# mode: "0644"
# owner: "debian"
# group: "debian"
# become: true
# loop:
# - immich-redis.container
# - immich.pod
# notify: restart immich
# - name: Ensure Caddyfile is present
# ansible.builtin.copy:
# src: immich.Caddyfile
# dest: /etc/caddy/immich.Caddyfile
# owner: root
# group: root
# mode: "0644"
# become: true
# notify: reload caddy

View file

@ -4,7 +4,7 @@ Requires=immich-redis.service immich-postgres.service
After=immich-redis.service immich-postgres.service After=immich-redis.service immich-postgres.service
[Container] [Container]
Environment=IMMICH_VERSION=v2.5.6 DB_HOSTNAME=localhost DB_DATABASE_NAME=immich DB_USERNAME=immich DB_PASSWORD=immich REDIS_HOSTNAME=localhost Environment=IMMICH_VERSION=v2.5.6 DB_HOSTNAME=localhost DB_DATABASE_NAME=immich DB_USERNAME=immich DB_PASSWORD=immich REDIS_HOSTNAME=localhost MACHINE_LEARNING_URL={{ immich_ml_url }}
Image=ghcr.io/immich-app/immich-server:v2.5.6 Image=ghcr.io/immich-app/immich-server:v2.5.6
Pod=immich.pod Pod=immich.pod

View file

@ -0,0 +1,36 @@
# vim: ft=systemd
[Unit]
Description=Immich machine learning container
After=network.target
[Container]
Environment=IMMICH_VERSION={{ immich_version }}
{% if immich_hw_accel == 'nvidia' %}
Image=ghcr.io/immich-app/immich-machine-learning:{{ immich_version }}-cuda
{% elif immich_hw_accel == 'intel' %}
Image=ghcr.io/immich-app/immich-machine-learning:{{ immich_version }}-openvino
{% else %}
Image=ghcr.io/immich-app/immich-machine-learning:{{ immich_version }}
{% endif %}
Volume={{ immich_model_cache_dir }}:/cache
{% if immich_hw_accel == 'nvidia' %}
# Nvidia GPU access via CDI - requires nvidia-container-toolkit with CDI configured:
# nvidia-ctk cdi generate --output=/etc/cdi/nvidia.yaml
AddDevice=nvidia.com/gpu=all
{% elif immich_hw_accel == 'intel' %}
# Intel GPU and OpenVINO device access
AddDevice=/dev/dri
Volume=/dev/bus/usb:/dev/bus/usb
{% endif %}
PublishPort=0.0.0.0:8028:3003
User=0
[Service]
Restart=always
[Install]
WantedBy=default.target

View file

@ -2,4 +2,17 @@ nws.roosens.me {
reverse_proxy localhost:8002 { reverse_proxy localhost:8002 {
header_down +X-Robots-Tag "none" header_down +X-Robots-Tag "none"
} }
route /metrics {
@local {
remote_ip 127.0.0.1 ::1
}
handle @local {
reverse_proxy localhost:8002
}
handle {
respond "Not Found" 404
}
}
} }

View file

@ -0,0 +1,8 @@
---
- name: 'restart miniflux-app'
ansible.builtin.service:
name: 'miniflux-app'
state: 'restarted'
scope: 'user'
daemon_reload: true

View file

@ -1,3 +1,4 @@
--- ---
dependencies: dependencies:
- role: any.tools.caddy - role: any.tools.caddy
become: true

View file

@ -27,12 +27,14 @@
loop: loop:
- 'miniflux-app.container' - 'miniflux-app.container'
- 'miniflux.pod' - 'miniflux.pod'
notify: 'restart miniflux-app'
- name: Ensure configuration directory is present - name: Ensure configuration directory is present
ansible.builtin.file: ansible.builtin.file:
path: '/etc/miniflux' path: '/etc/miniflux'
state: directory state: directory
mode: '0755' mode: '0755'
become: true
- name: Ensure environment file is present - name: Ensure environment file is present
ansible.builtin.template: ansible.builtin.template:
@ -41,7 +43,8 @@
mode: '0644' mode: '0644'
owner: 'root' owner: 'root'
group: 'root' group: 'root'
register: res become: true
notify: 'restart miniflux-app'
- name: Ensure Caddyfile is present - name: Ensure Caddyfile is present
copy: copy:
@ -50,7 +53,8 @@
owner: root owner: root
group: root group: root
mode: '0644' mode: '0644'
notify: reload caddy become: true
notify: 'reload caddy'
# - name: Ensure stack is deployed # - name: Ensure stack is deployed
# ansible.builtin.shell: # ansible.builtin.shell:

View file

@ -9,3 +9,7 @@ BASE_URL=https://nws.roosens.me
CLEANUP_ARCHIVE_UNREAD_DAYS=-1 CLEANUP_ARCHIVE_UNREAD_DAYS=-1
CLEANUP_ARCHIVE_READ_DAYS=-1 CLEANUP_ARCHIVE_READ_DAYS=-1
METRICS_ALLOWED_NETWORKS=0.0.0.0/0
METRICS_COLLECTOR=1
METRICS_REFRESH_INTERVAL=30s

View file

@ -14,6 +14,12 @@ receivers:
filesystem: filesystem:
network: network:
load: load:
# Record backup script outputs
journald:
matches:
- _SYSTEMD_SLICE: backup.slice
prometheus: prometheus:
config: config:
scrape_configs: scrape_configs:
@ -22,13 +28,17 @@ receivers:
static_configs: static_configs:
- targets: ['localhost:2019'] - targets: ['localhost:2019']
- job_name: 'miniflux' - job_name: 'miniflux'
scrape_interval: 30s scrape_interval: 1m
static_configs: static_configs:
- targets: ['localhost:8002'] - targets: ['localhost:8002']
- job_name: 'restic-rest' - job_name: 'restic-rest'
scrape_interval: 30s scrape_interval: 1m
static_configs: static_configs:
- targets: ['localhost:8000'] - targets: ['localhost:8000']
- job_name: 'forgejo'
scrape_interval: 1m
static_configs:
- targets: ['localhost:8027']
# Processors specify what happens with the received data # Processors specify what happens with the received data
processors: processors:
@ -59,6 +69,15 @@ exporters:
# x-greptime-pipeline-name: '<pipeline_name>' # x-greptime-pipeline-name: '<pipeline_name>'
tls: tls:
insecure: true insecure: true
otlphttp/logs_journald:
endpoint: '{{ otel_logs_endpoint }}'
headers:
# x-greptime-db-name: '<your_db_name>'
x-greptime-log-table-name: 'journald_logs'
x-greptime-pipeline-name: 'journald_logs'
# x-greptime-pipeline-name: 'greptime_identity'
tls:
insecure: true
otlphttp/metrics: otlphttp/metrics:
endpoint: '{{ otel_metrics_endpoint }}' endpoint: '{{ otel_metrics_endpoint }}'
@ -69,6 +88,8 @@ exporters:
# x-greptime-db-name: '<your_db_name>' # x-greptime-db-name: '<your_db_name>'
tls: tls:
insecure: true insecure: true
debug:
verbosity: normal
# Service pipelines pull the configured receivers, processors, and exporters together # Service pipelines pull the configured receivers, processors, and exporters together
# into pipelines that process data # into pipelines that process data
@ -88,3 +109,7 @@ service:
receivers: [otlp] receivers: [otlp]
processors: [batch, resourcedetection] processors: [batch, resourcedetection]
exporters: [otlphttp/logs] exporters: [otlphttp/logs]
logs/journald:
receivers: [journald]
processors: [batch, resourcedetection]
exporters: [debug, otlphttp/logs_journald]

View file

@ -0,0 +1,7 @@
---
- name: 'restart restic-rest-server'
ansible.builtin.service:
name: 'restic-rest-server'
state: 'restarted'
daemon_reload: true

View file

@ -0,0 +1,58 @@
- name: Ensure download directory is present
ansible.builtin.file:
path: "/opt/restic-rest-{{ restic_rest_version }}"
state: directory
mode: '0755'
- name: Ensure binary is downloaded
ansible.builtin.unarchive:
src: "https://github.com/restic/rest-server/releases/download/v{{ restic_rest_version }}/rest-server_{{ restic_rest_version }}_linux_amd64.tar.gz"
remote_src: true
dest: "opt/restic-rest-{{ restic_rest_version }}"
creates: "opt/restic-rest-{{ restic_rest_version }}/rest-server_{{ restic_rest_version }}_linux_amd64/rest-server"
include:
- "rest-server_{{ restic_rest_version }}_linux_amd64/rest-server"
register: res
- name: Ensure binary is copied to correct location
ansible.builtin.copy:
src: "/opt/restic-rest-{{ restic_rest_version }}/rest-server_{{ restic_rest_version }}_linux_amd64/rest-server"
remote_src: true
dest: '/usr/local/bin/restic-rest-server'
owner: 'root'
group: 'root'
mode: '0755'
when: 'res.changed'
notify: 'restart restic-rest-server'
- name: Ensure system group exists
ansible.builtin.group:
name: 'restic'
gid: 202
system: true
state: present
- name: Ensure system user exists
ansible.builtin.user:
name: 'restic'
group: 'restic'
uid: 202
system: true
create_home: false
- name: Ensure data subvolume permissions are correct
ansible.builtin.file:
path: '{{ restic_rest_data_dir }}'
state: directory
mode: '0755'
owner: 'restic'
group: 'restic'
- name: Ensure service file is present
ansible.builtin.template:
src: 'restic-rest-server.service.j2'
dest: '/lib/systemd/system/restic-rest-server.service'
owner: 'root'
group: 'root'
mode: '0644'
notify: 'restart restic-rest-server'

View file

@ -0,0 +1,14 @@
[Unit]
Description=Restic REST server
After=network.target network-online.target
Requires=network-online.target
[Service]
Type=exec
User=restic
Group=restic
ExecStart=/usr/local/bin/restic-rest-server --path {{ restic_rest_data_dir }} --no-auth --prometheus
Restart=always
[Install]
WantedBy=multi-user.target

View file

@ -3,7 +3,7 @@
# #
# All types: # All types:
# name: (required) unique identifier, used in unit and script filenames # name: (required) unique identifier, used in unit and script filenames
# type: (required) backup template to use: btrfs-subvolume, podman-postgres, postgres # type: (required) backup template to use: btrfs-subvolume, podman-mysql, podman-postgres, postgres, echo-test
# user: (optional) user to run the backup as; defaults to root # user: (optional) user to run the backup as; defaults to root
# group: (optional) group to run the backup as; defaults to backups # group: (optional) group to run the backup as; defaults to backups
# timer_delay_sec: (optional) RandomizedDelaySec for the timer; defaults to 30 minutes # timer_delay_sec: (optional) RandomizedDelaySec for the timer; defaults to 30 minutes
@ -11,6 +11,12 @@
# btrfs-subvolume: # btrfs-subvolume:
# path: (required) path to the btrfs subvolume to back up # path: (required) path to the btrfs subvolume to back up
# #
# podman-mysql:
# container: (required) name of the podman container running mysql/mariadb
# mysql_user: (required) mysql user to connect as
# mysql_password: (required) mysql password for the user
# database: (required) mysql database to dump
#
# podman-postgres: # podman-postgres:
# container: (required) name of the podman container running postgres # container: (required) name of the podman container running postgres
# pg_user: (required) postgres user to connect as # pg_user: (required) postgres user to connect as
@ -20,6 +26,10 @@
# pwd: (required) working directory for podman compose # pwd: (required) working directory for podman compose
# user: (required) postgres user to connect as # user: (required) postgres user to connect as
# database: (required) postgres database to dump # database: (required) postgres database to dump
#
# echo-test:
# lines: (optional) number of log lines to emit; defaults to 10
# interval_sec: (optional) seconds to sleep between lines; defaults to 1
backups: [] backups: []
# Restic REST server URL to publish backups to # Restic REST server URL to publish backups to

View file

@ -0,0 +1,5 @@
[Unit]
Description=Backup services slice
[Slice]
CPUQuota=25%

View file

@ -35,6 +35,15 @@
loop: "{{ backups }}" loop: "{{ backups }}"
when: item.user is defined when: item.user is defined
- name: Ensure backup slice unit is present
ansible.builtin.copy:
src: "backup.slice"
dest: "/etc/systemd/system/backup.slice"
owner: root
group: root
mode: "0644"
notify: Reload systemd
- name: Ensure systemd service unit is present for each backup - name: Ensure systemd service unit is present for each backup
ansible.builtin.template: ansible.builtin.template:
src: "backup.service.j2" src: "backup.service.j2"

View file

@ -4,6 +4,7 @@ After=network.target
[Service] [Service]
Type=oneshot Type=oneshot
Slice=backup.slice
User={{ item.user | default('root') }} User={{ item.user | default('root') }}
Group={{ item.group | default('backups') }} Group={{ item.group | default('backups') }}

View file

@ -0,0 +1,8 @@
#!/usr/bin/env bash
echo "log line 1"
echo "log line 2"
echo "log line 3"
echo "log line 4"
echo "log line 5"
echo "log line 6"

View file

@ -7,13 +7,14 @@
- name: Ensure compressed binary is downloaded - name: Ensure compressed binary is downloaded
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://github.com/restic/restic/releases/download/v{{ restic_version }}/restic_{{ restic_version }}_linux_arm64.bz2" url: "https://github.com/restic/restic/releases/download/v{{ restic_version }}/restic_{{ restic_version }}_linux_amd64.bz2"
dest: "/opt/restic/{{ restic_version }}/restic-{{ restic_version }}.bz2" dest: "/opt/restic/{{ restic_version }}/restic-{{ restic_version }}.bz2"
register: res register: res
- name: Ensure binary is decompressed - name: Ensure binary is decompressed
ansible.builtin.shell: ansible.builtin.shell:
cmd: "bunzip2 -k /opt/restic/{{ restic_version }}/restic-{{ restic_version }}.bz2" cmd: "bunzip2 -k /opt/restic/{{ restic_version }}/restic-{{ restic_version }}.bz2"
creates: "/opt/restic/{{ restic_version }}/restic-{{ restic_version }}"
when: 'res.changed' when: 'res.changed'
- name: Ensure binary is copied to correct location - name: Ensure binary is copied to correct location

View file

@ -1,24 +1,24 @@
--- ---
- name: Ensure download directory is present - name: Ensure download directory is present
ansible.builtin.file: ansible.builtin.file:
path: "/home/debian/restic-{{ restic_version }}" path: "/opt/restic-{{ restic_version }}"
state: directory state: directory
mode: '0755' mode: '0755'
- name: Ensure compressed binary is downloaded - name: Ensure compressed binary is downloaded
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://github.com/restic/restic/releases/download/v{{ restic_version }}/restic_{{ restic_version }}_linux_arm64.bz2" url: "https://github.com/restic/restic/releases/download/v{{ restic_version }}/restic_{{ restic_version }}_linux_arm64.bz2"
dest: "/home/debian/restic-{{ restic_version }}/restic-{{ restic_version }}.bz2" dest: "/opt/restic-{{ restic_version }}/restic-{{ restic_version }}.bz2"
register: res register: res
- name: Ensure binary is decompressed - name: Ensure binary is decompressed
ansible.builtin.shell: ansible.builtin.shell:
cmd: "bunzip2 -k /home/debian/restic-{{ restic_version }}/restic-{{ restic_version }}.bz2" cmd: "bunzip2 -k /opt/restic-{{ restic_version }}/restic-{{ restic_version }}.bz2"
when: 'res.changed' when: 'res.changed'
- name: Ensure binary is copied to correct location - name: Ensure binary is copied to correct location
ansible.builtin.copy: ansible.builtin.copy:
src: "/home/debian/restic-{{ restic_version }}/restic-{{ restic_version }}" src: "/opt/restic-{{ restic_version }}/restic-{{ restic_version }}"
remote_src: true remote_src: true
dest: '/usr/local/bin/restic' dest: '/usr/local/bin/restic'
owner: 'root' owner: 'root'