Compare commits

..

1 Commits

Author SHA1 Message Date
Jef Roosens 42d73911a1
feat: add basis for signal handling
ci/woodpecker/push/release unknown status Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/lint Pipeline failed Details
ci/woodpecker/push/build Pipeline failed Details
2023-06-06 16:57:59 +02:00
33 changed files with 391 additions and 2714 deletions

View File

@ -1,3 +1,2 @@
[alias]
runs = "run -- --config data/config --backup data/backups --world data/worlds --layers 2min,2,4,4;3min,3,2,2"
runrs = "run --release -- --config data/config --backup data/backups --world data/worlds --layers 2min,2,4,4;3min,3,2,2"
runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar"

View File

@ -1,5 +0,0 @@
*
!Cargo.toml
!Cargo.lock
!src/

2
.gitignore vendored
View File

@ -19,4 +19,4 @@ target/
# testing files
*.jar
data*/
data/

View File

@ -5,17 +5,17 @@ matrix:
platform: "linux/${ARCH}"
when:
branch:
branches:
exclude: [main]
event: push
steps:
pipeline:
build:
image: 'rust:1.71-alpine3.18'
image: 'rust:1.70-alpine3.18'
commands:
- apk add --no-cache build-base
- cargo build --verbose
- cargo test --verbose
# Binaries, even debug ones, should be statically compiled
- '[ "$(readelf -d target/debug/alex | grep NEEDED | wc -l)" = 0 ]'
when:
event: [push]

View File

@ -1,13 +1,13 @@
platform: 'linux/amd64'
when:
branch:
exclude: [ main ]
event: push
branches:
exclude: [main]
steps:
pipeline:
clippy:
image: 'rust:1.71'
image: 'rust:1.70'
commands:
- rustup component add clippy
- cargo clippy -- --no-deps -Dwarnings
when:
event: [push]

View File

@ -1,13 +1,13 @@
platform: 'linux/amd64'
when:
branch:
exclude: [ main ]
event: push
branches:
exclude: [main]
steps:
pipeline:
lint:
image: 'rust:1.71'
image: 'rust:1.70'
commands:
- rustup component add rustfmt
- cargo fmt -- --check
when:
event: [push]

View File

@ -4,19 +4,19 @@ matrix:
- 'linux/arm64'
platform: ${PLATFORM}
branches: [ main ]
when:
event: tag
steps:
pipeline:
build:
image: 'rust:1.71-alpine3.18'
image: 'rust:1.70-alpine3.18'
commands:
- apk add --no-cache build-base
- cargo build --release --verbose
# Ensure the release binary is also statically compiled
- '[ "$(readelf -d target/release/alex | grep NEEDED | wc -l)" = 0 ]'
- du -h target/release/alex
when:
event: tag
publish:
image: 'curlimages/curl'
@ -28,3 +28,5 @@ steps:
--user "Chewing_Bever:$GITEA_PASSWORD"
--upload-file target/release/alex
https://git.rustybever.be/api/packages/Chewing_Bever/generic/alex/"${CI_COMMIT_TAG}"/alex-"$(echo '${PLATFORM}' | sed 's:/:-:g')"
when:
event: tag

View File

@ -7,85 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/Chewing_Bever/alex/src/branch/dev)
## [0.4.1](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.4.1)
### Changed
* Moved PKGBUILD to separate repo
* Properly update lock file
## [0.4.0](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.4.0)
### Added
* Extract command for working with the output of export
* Arch packages are now published to my bur repo
* Allow passing configuration variables from TOML file
### Changed
* Export command no longer reads backups that do not contribute to the final
state
* Running backups no longer block stdin input or shutdown
* Env vars `ALEX_CONFIG_DIR`, `ALEX_WORLD_DIR` and `ALEX_BACKUP_DIR` renamed to
`ALEX_CONFIG`, `ALEX_WORLD` and `ALEX_BACKUP` respectively
## [0.3.1](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.3.1)
### Added
* Export command to export any backup as a new full backup
## [0.3.0](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.3.0)
### Added
* Incremental backups
* Chain length describes how many incremental backups to create from the
same full backup
* "backups to keep" has been replaced by "chains to keep"
* Server type & version and backup size are now stored as metadata in the
metadata file
* Backup layers
* Store multiple chains of backups in parallel, configuring each with
different parameters (son-father-grandfather principle)
* CLI commands for creating, restoring & listing backups
### Changed
* Running the server now uses the `run` CLI subcommand
* `server_type` and `server_version` arguments are now optional flags
### Removed
* `max_backups` setting
## [0.2.2](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.2)
### Fixed
* Use correct env var for backup directory
## [0.2.1](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.1)
### Added
* `--dry` flag to inspect command that will be run
### Changed
* JVM flags now narrowely follow Aikar's specifications
## [0.2.0](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.0)
### Added
* Rudimentary signal handling for gently stopping server
* A single stop signal will trigger the Java process to shut down, but Alex
still expects to be run from a utility such as dumb-init
* Properly back up entire config directory
* Inject Java optimisation flags
## [0.1.1](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.1.1)
### Changed

361
Cargo.lock generated
View File

@ -10,14 +10,11 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "alex"
version = "0.4.1"
version = "0.1.0"
dependencies = [
"chrono",
"clap",
"figment",
"flate2",
"serde",
"serde_json",
"signal-hook",
"tar",
]
@ -54,15 +51,15 @@ dependencies = [
[[package]]
name = "anstyle"
version = "1.0.1"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd"
checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d"
[[package]]
name = "anstyle-parse"
version = "0.2.1"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333"
checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee"
dependencies = [
"utf8parse",
]
@ -78,20 +75,14 @@ dependencies = [
[[package]]
name = "anstyle-wincon"
version = "1.0.2"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c"
checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188"
dependencies = [
"anstyle",
"windows-sys",
]
[[package]]
name = "atomic"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba"
[[package]]
name = "autocfg"
version = "1.1.0"
@ -104,12 +95,6 @@ version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
[[package]]
name = "bumpalo"
version = "3.13.0"
@ -118,12 +103,9 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "cc"
version = "1.0.82"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01"
dependencies = [
"libc",
]
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
[[package]]
name = "cfg-if"
@ -141,7 +123,6 @@ dependencies = [
"iana-time-zone",
"js-sys",
"num-traits",
"serde",
"time",
"wasm-bindgen",
"winapi",
@ -149,9 +130,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.3.21"
version = "4.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd"
checksum = "b4ed2379f8603fa2b7509891660e802b88c70a79a6427a70abb5968054de2c28"
dependencies = [
"clap_builder",
"clap_derive",
@ -160,21 +141,22 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.3.21"
version = "4.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa"
checksum = "72394f3339a76daf211e57d4bcb374410f3965dcc606dd0e03738c7888766980"
dependencies = [
"anstream",
"anstyle",
"bitflags",
"clap_lex",
"strsim",
]
[[package]]
name = "clap_derive"
version = "4.3.12"
version = "4.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050"
checksum = "59e9ef9a08ee1c0e1f2e162121665ac45ac3783b0f897db7244ae75ad9a8f65b"
dependencies = [
"heck",
"proc-macro2",
@ -209,17 +191,11 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "errno"
version = "0.3.2"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f"
checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a"
dependencies = [
"errno-dragonfly",
"libc",
@ -236,25 +212,11 @@ dependencies = [
"libc",
]
[[package]]
name = "figment"
version = "0.10.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4547e226f4c9ab860571e070a9034192b3175580ecea38da34fcdb53a018c9a5"
dependencies = [
"atomic",
"pear",
"serde",
"toml",
"uncased",
"version_check",
]
[[package]]
name = "filetime"
version = "0.2.22"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0"
checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153"
dependencies = [
"cfg-if",
"libc",
@ -272,12 +234,6 @@ dependencies = [
"miniz_oxide",
]
[[package]]
name = "hashbrown"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
[[package]]
name = "heck"
version = "0.4.1"
@ -286,15 +242,15 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "hermit-abi"
version = "0.3.2"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
[[package]]
name = "iana-time-zone"
version = "0.1.57"
version = "0.1.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613"
checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c"
dependencies = [
"android_system_properties",
"core-foundation-sys",
@ -314,70 +270,54 @@ dependencies = [
]
[[package]]
name = "indexmap"
version = "2.0.0"
name = "io-lifetimes"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
dependencies = [
"equivalent",
"hashbrown",
"hermit-abi",
"libc",
"windows-sys",
]
[[package]]
name = "inlinable_string"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb"
[[package]]
name = "is-terminal"
version = "0.4.9"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f"
dependencies = [
"hermit-abi",
"io-lifetimes",
"rustix",
"windows-sys",
]
[[package]]
name = "itoa"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
[[package]]
name = "js-sys"
version = "0.3.64"
version = "0.3.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a"
checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790"
dependencies = [
"wasm-bindgen",
]
[[package]]
name = "libc"
version = "0.2.147"
version = "0.2.144"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1"
[[package]]
name = "linux-raw-sys"
version = "0.4.5"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503"
checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]]
name = "log"
version = "0.4.20"
version = "0.4.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de"
[[package]]
name = "miniz_oxide"
@ -390,146 +330,65 @@ dependencies = [
[[package]]
name = "num-traits"
version = "0.2.16"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2"
checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.18.0"
version = "1.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "pear"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61a386cd715229d399604b50d1361683fe687066f42d56f54be995bc6868f71c"
dependencies = [
"inlinable_string",
"pear_codegen",
"yansi",
]
[[package]]
name = "pear_codegen"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da9f0f13dac8069c139e8300a6510e3f4143ecf5259c60b116a9b271b4ca0d54"
dependencies = [
"proc-macro2",
"proc-macro2-diagnostics",
"quote",
"syn",
]
checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b"
[[package]]
name = "proc-macro2"
version = "1.0.66"
version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "proc-macro2-diagnostics"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"
dependencies = [
"proc-macro2",
"quote",
"syn",
"version_check",
"yansi",
]
[[package]]
name = "quote"
version = "1.0.32"
version = "1.0.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965"
checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.3.5"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags 1.3.2",
"bitflags",
]
[[package]]
name = "rustix"
version = "0.38.8"
version = "0.37.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f"
checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d"
dependencies = [
"bitflags 2.4.0",
"bitflags",
"errno",
"io-lifetimes",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "ryu"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
[[package]]
name = "serde"
version = "1.0.183"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.183"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.104"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "serde_spanned"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186"
dependencies = [
"serde",
]
[[package]]
name = "signal-hook"
version = "0.3.17"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801"
checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9"
dependencies = [
"libc",
"signal-hook-registry",
@ -552,9 +411,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "syn"
version = "2.0.28"
version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567"
checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e"
dependencies = [
"proc-macro2",
"quote",
@ -563,9 +422,9 @@ dependencies = [
[[package]]
name = "tar"
version = "0.4.40"
version = "0.4.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb"
checksum = "4b55807c0344e1e6c04d7c965f5289c39a8d94ae23ed5c0b57aabac549f871c6"
dependencies = [
"filetime",
"libc",
@ -583,54 +442,11 @@ dependencies = [
"winapi",
]
[[package]]
name = "toml"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit",
]
[[package]]
name = "toml_datetime"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
version = "0.19.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a"
dependencies = [
"indexmap",
"serde",
"serde_spanned",
"toml_datetime",
"winnow",
]
[[package]]
name = "uncased"
version = "0.9.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b9bc53168a4be7402ab86c3aad243a84dd7381d09be0eddc81280c1da95ca68"
dependencies = [
"version_check",
]
[[package]]
name = "unicode-ident"
version = "1.0.11"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0"
[[package]]
name = "utf8parse"
@ -638,12 +454,6 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "wasi"
version = "0.10.0+wasi-snapshot-preview1"
@ -652,9 +462,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
[[package]]
name = "wasm-bindgen"
version = "0.2.87"
version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342"
checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
@ -662,9 +472,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.87"
version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd"
checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb"
dependencies = [
"bumpalo",
"log",
@ -677,9 +487,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.87"
version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@ -687,9 +497,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.87"
version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8"
dependencies = [
"proc-macro2",
"quote",
@ -700,9 +510,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.87"
version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1"
checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93"
[[package]]
name = "winapi"
@ -746,9 +556,9 @@ dependencies = [
[[package]]
name = "windows-targets"
version = "0.48.1"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
@ -801,26 +611,11 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
[[package]]
name = "winnow"
version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5504cc7644f4b593cbc05c4a55bf9bd4e94b867c3c0bd440934174d50482427d"
dependencies = [
"memchr",
]
[[package]]
name = "xattr"
version = "1.0.1"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4686009f71ff3e5c4dbcf1a282d0a44db3f021ba69350cd42086b3e5f1c6985"
checksum = "6d1526bbe5aaeb5eb06885f4d987bcdfa5e23187055de9b83fe00156a821fabc"
dependencies = [
"libc",
]
[[package]]
name = "yansi"
version = "1.0.0-rc.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1367295b8f788d371ce2dbc842c7b709c73ee1364d30351dd300ec2203b12377"

View File

@ -1,6 +1,6 @@
[package]
name = "alex"
version = "0.4.1"
version = "0.1.0"
description = "Wrapper around Minecraft server processes, designed to complement Docker image installations."
authors = ["Jef Roosens"]
edition = "2021"
@ -12,12 +12,10 @@ edition = "2021"
tar = "0.4.38"
# Used to compress said tarballs using gzip
flate2 = "1.0.26"
chrono = { version = "0.4.26", features = ["serde"] }
# Used for backup filenames
chrono = "0.4.26"
clap = { version = "4.3.1", features = ["derive", "env"] }
signal-hook = "0.3.15"
serde = { version = "1.0.164", features = ["derive"] }
serde_json = "1.0.96"
figment = { version = "0.10.10", features = ["env", "toml"] }
[profile.release]
lto = "fat"

View File

@ -1,67 +0,0 @@
FROM rust:1.70-alpine3.18 AS builder
ARG DI_VER=1.2.5
WORKDIR /app
COPY . ./
RUN apk add --no-cache build-base unzip curl && \
curl -Lo - "https://github.com/Yelp/dumb-init/archive/refs/tags/v${DI_VER}.tar.gz" | tar -xzf - && \
cd "dumb-init-${DI_VER}" && \
make SHELL=/bin/sh && \
mv dumb-init ..
RUN cargo build && \
[ "$(readelf -d target/debug/alex | grep NEEDED | wc -l)" = 0 ]
# We use ${:-} instead of a default value because the argument is always passed
# to the build, it'll just be blank most likely
FROM eclipse-temurin:18-jre-alpine
# Build arguments
ARG MC_VERSION=1.19.4
ARG PAPERMC_VERSION=525
RUN addgroup -Sg 1000 paper && \
adduser -SHG paper -u 1000 paper
# Create worlds and config directory
WORKDIR /app
RUN mkdir -p worlds config/cache backups
# Download server file
ADD "https://papermc.io/api/v2/projects/paper/versions/$MC_VERSION/builds/$PAPERMC_VERSION/downloads/paper-$MC_VERSION-$PAPERMC_VERSION.jar" server.jar
# Make sure the server user can access all necessary folders
RUN chown -R paper:paper /app
# Store the cache in an anonymous volume, which means it won't get stored in the other volumes
VOLUME /app/config/cache
VOLUME /app/backups
COPY --from=builder /app/dumb-init /bin/dumb-init
COPY --from=builder /app/target/debug/alex /bin/alex
RUN chmod +x /bin/alex
# Default value to keep users from eating up all ram accidentally
ENV ALEX_CONFIG=/app/config \
ALEX_WORLD=/app/worlds \
ALEX_BACKUP=/app/backups \
ALEX_SERVER=paper \
ALEX_XMS=1024 \
ALEX_XMX=2048 \
ALEX_JAR=/app/server.jar \
ALEX_SERVER_VERSION="${MC_VERSION}-${PAPERMC_VERSION}" \
ALEX_LAYERS="2min,2,4,4;3min,3,2,2"
# Document exposed ports
EXPOSE 25565
# Switch to non-root user
USER paper:paper
ENTRYPOINT ["/bin/dumb-init", "--"]
CMD ["/bin/alex", "run"]

136
README.md
View File

@ -1,135 +1,3 @@
# Alex
# mc-wrapper
Alex is a wrapper around a typical Minecraft server process. It acts as the
parent process, and sits in between the user's input and the server's stdin.
This allows Alex to support additional commands that execute Rust code, notably
creating periodic backups.
## Installation
Alex is distributed as statically compiled binaries for Linux amd64 and arm64.
These can be found
[here](https://git.rustybever.be/Chewing_Bever/alex/packages).
### Arch
Arch users can install prebuilt `x86_64` & `aarch64` packages from my `bur`
repository. Add the following at the bottom of your `pacman.conf`:
```toml
[bur]
Server = https://arch.r8r.be/$repo/$arch
SigLevel = Optional
```
If you prefer building the package yourself, the PKGBUILD can be found
[here](https://git.rustybever.be/bur/alex-mc).
### Dockerfiles
You can easily install alex in your Docker images by letting Docker download it
for you. Add the following to your Dockerfile (replace with your required
version & architecture):
```dockerfile
ADD "https://git.rustybever.be/api/packages/Chewing_Bever/generic/alex/0.2.2/alex-linux-amd64" /bin/alex
```
## Why
The primary usecase for this is backups. A common problem I've had with
Minecraft backups is that they fail, because the server is writing to one of
the region files as the backup is being created. Alex solves this be sending
`save-off` and `save-all` to the server, before creating the tarball.
Afterwards, saving is enabled again with `save-on`.
## Features
* Create safe backups as gzip-compressed tarballs using the `backup` command
* Automatically create backups periodically
* Properly configures the process (working directory, optimisation flags)
* Configure everything as CLI arguments or environment variables
## Configuration
Most information can be retrieved easily by looking at the help command:
```
Wrapper around Minecraft server processes, designed to complement Docker image installations.
Usage: alex [OPTIONS] <COMMAND>
Commands:
run Run the server
backup Interact with the backup system without starting a server
help Print this message or the help of the given subcommand(s)
Options:
--config <CONFIG_DIR>
Directory where configs are stored, and where the server will run [env: ALEX_CONFIG_DIR=] [default: .]
--world <WORLD_DIR>
Directory where world files will be saved [env: ALEX_WORLD_DIR=] [default: ../worlds]
--backup <BACKUP_DIR>
Directory where backups will be stored [env: ALEX_BACKUP_DIR=] [default: ../backups]
--layers <LAYERS>
What backup layers to employ, provided as a list of tuples name,frequency,chains,chain_len delimited by semicolons (;) [env: ALEX_LAYERS=]
--server <SERVER>
Type of server [env: ALEX_SERVER=] [default: unknown] [possible values: unknown, paper, forge, vanilla]
--server-version <SERVER_VERSION>
Version string for the server, e.g. 1.19.4-545 [env: ALEX_SERVER_VERSION=] [default: ]
-h, --help
Print help
-V, --version
Print version
```
### Choosing layer parameters
One part of the configuration that does require some clarification is the layer
system. Alex can manage an arbitrary number of backup layers, each having its
own configuration. These layers can either use incremental or full backups,
depending on how they're configured.
These layers mostly correspond to the grandfather-father-son backup rotation
scheme. For example, one could have a layer that creates incremental backups
every 30 minutes, which are stored for 24 hours. This gives you 24 hours of
granular rollback in case your server suffers a crash. A second layer might
create a full backup every 24 hours, with backups being stored for 7 days. This
gives you 7 days worth of backups with the granularity of 24 hours. This
approach allows for greater versatility, while not having to store a large
amount of data. Thanks to incremental backups, frequent backups don't have to
take long at all.
A layer consists of 4 pieces of metadata:
* A name, which will be used in the file system and the in-game notifications
* The frequency, which describes in minutes how frequently a backup should be
created
* How many chains should be kept at all times
* How long each chain should be
These last two require some clarification. In Alex, a "chain" describes an
initial full backup and zero or more incremental backups that are created from
that initial full backup. This concept exists because an incremental backup has
no real meaning if its ancestors are not known. To restore one of these chains,
all backups in the chain need to be restored in-order. Note that a chain length
of 1 disables incremental backups entirely.
How many backups to keep is defined by how many chains should be stored.
Because an incremental backup needs to have its ancestors in order to be
restored, we can't simply "keep the last n backups", as this would break these
chains. Therefore, you configure how many backups to store using these chains.
For example, if you configure a layer to store 5 chains of length 4, you will
have 20 archive files on disk, namely 5 full backups and 15 incremental
backups. Note that Alex applies these rules to *full* chains. An in-progress
chain does not count towards this total. Therefore, you can have up to `n-1`
additional archive files, with `n` being the chain length, on disk.
To look at it from another perspective, say we wish to have a granularity of 30
minutes for a timespan of 24 hours. Then we could configure the layer to only
save a single chain, with a chain length of 48. If we prefer to have a few full
backups instead of a long chain of incremental backups, we could instead use a
chain length of 12 and store 4 chains. Either way, the total comes out to 48,
which spans 24 hours if we make a backup every 30 minutes.
A wrapper around a standard Minecraft server, written in Rust.

View File

@ -1,16 +0,0 @@
config = "data/config"
world = "data/worlds"
backup = "data/backups"
server = "Paper"
# [[layers]]
# name = "2min"
# frequency = 2
# chains = 4
# chain_len = 4
# [[layers]]
# name = "3min"
# frequency = 3
# chains = 2
# chain_len = 2

View File

@ -1,162 +0,0 @@
use std::{borrow::Borrow, fmt};
use serde::{Deserialize, Serialize};
use super::State;
/// Represents the changes relative to the previous backup
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Delta {
/// What files were added/modified in each part of the tarball.
pub added: State,
/// What files were removed in this backup, in comparison to the previous backup. For full
/// backups, this will always be empty, as they do not consider previous backups.
/// The map stores a separate list for each top-level directory, as the contents of these
/// directories can come for different source directories.
pub removed: State,
}
impl Delta {
pub fn new() -> Self {
Self {
added: State::new(),
removed: State::new(),
}
}
/// Returns whether the delta is empty by checking whether both its added and removed state
/// return true for their `is_empty`.
#[allow(dead_code)]
pub fn is_empty(&self) -> bool {
self.added.is_empty() && self.removed.is_empty()
}
/// Calculate the union of this delta with another delta.
///
/// The union of two deltas is a delta that produces the same state as if you were to apply
/// both deltas in-order. Note that this operation is not commutative.
pub fn union(&self, delta: &Self) -> Self {
let mut out = self.clone();
for (dir, added) in delta.added.iter() {
// Files that were removed in the current state, but added in the new state, are no
// longer removed
if let Some(orig_removed) = out.removed.get_mut(dir) {
orig_removed.retain(|k| !added.contains(k));
}
// Newly added files are added to the state as well
if let Some(orig_added) = out.added.get_mut(dir) {
orig_added.extend(added.iter().cloned());
} else {
out.added.insert(dir.clone(), added.clone());
}
}
for (dir, removed) in delta.removed.iter() {
// Files that were originally added, but now deleted are removed from the added list
if let Some(orig_added) = out.added.get_mut(dir) {
orig_added.retain(|k| !removed.contains(k));
}
// Newly removed files are added to the state as well
if let Some(orig_removed) = out.removed.get_mut(dir) {
orig_removed.extend(removed.iter().cloned());
} else {
out.removed.insert(dir.clone(), removed.clone());
}
}
out
}
// Calculate the difference between this delta and the other delta.
//
// The difference simply means removing all adds and removes that are also performed in the
// other delta.
pub fn difference(&self, other: &Self) -> Self {
let mut out = self.clone();
for (dir, added) in out.added.iter_mut() {
// If files are added in the other delta, we don't add them in this delta
if let Some(other_added) = other.added.get(dir) {
added.retain(|k| !other_added.contains(k));
};
}
for (dir, removed) in out.removed.iter_mut() {
// If files are removed in the other delta, we don't remove them in this delta either
if let Some(other_removed) = other.removed.get(dir) {
removed.retain(|k| !other_removed.contains(k));
}
}
out
}
// Calculate the strict difference between this delta and the other delta.
//
// The strict difference is a difference where all operations that would be overwritten by the
// other delta are also removed (a.k.a. adding a file after removing it, or vice versa)
pub fn strict_difference(&self, other: &Self) -> Self {
let mut out = self.difference(other);
for (dir, added) in out.added.iter_mut() {
// Remove additions that are removed in the other delta
if let Some(other_removed) = other.removed.get(dir) {
added.retain(|k| !other_removed.contains(k));
}
}
for (dir, removed) in out.removed.iter_mut() {
// Remove removals that are re-added in the other delta
if let Some(other_added) = other.added.get(dir) {
removed.retain(|k| !other_added.contains(k));
}
}
out
}
/// Given a chain of deltas, ordered from last to first, calculate the "contribution" for each
/// state.
///
/// The contribution of a delta in a given chain is defined as the parts of the state produced
/// by this chain that are actually provided by this delta. This comes down to calculating the
/// strict difference of this delta and all of its successive deltas.
pub fn contributions<I>(deltas: I) -> Vec<State>
where
I: IntoIterator,
I::Item: Borrow<Delta>,
{
let mut contributions: Vec<State> = Vec::new();
let mut deltas = deltas.into_iter();
if let Some(first_delta) = deltas.next() {
// From last to first, we calculate the strict difference of the delta with the union of all its
// following deltas. The list of added files of this difference is the contribution for
// that delta.
contributions.push(first_delta.borrow().added.clone());
let mut union_future = first_delta.borrow().clone();
for delta in deltas {
contributions.push(delta.borrow().strict_difference(&union_future).added);
union_future = union_future.union(delta.borrow());
}
}
// contributions.reverse();
contributions
}
}
impl fmt::Display for Delta {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let added_count: usize = self.added.values().map(|s| s.len()).sum();
let removed_count: usize = self.removed.values().map(|s| s.len()).sum();
write!(f, "+{}-{}", added_count, removed_count)
}
}

View File

@ -1,43 +0,0 @@
use std::io::{self, Write};
/// Wrapper around the Write trait that counts how many bytes have been written in total.
/// Heavily inspired by https://stackoverflow.com/a/42189386
pub struct CountingWrite<W> {
inner: W,
count: usize,
}
impl<W> CountingWrite<W>
where
W: Write,
{
pub fn new(writer: W) -> Self {
Self {
inner: writer,
count: 0,
}
}
pub fn bytes_written(&self) -> usize {
self.count
}
}
impl<W> Write for CountingWrite<W>
where
W: Write,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let res = self.inner.write(buf);
if let Ok(count) = res {
self.count += count;
}
res
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}

View File

@ -1,46 +0,0 @@
use std::{error::Error, fmt, str::FromStr};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ManagerConfig {
pub name: String,
pub frequency: u32,
pub chains: u64,
pub chain_len: u64,
}
#[derive(Debug)]
pub struct ParseManagerConfigErr;
impl Error for ParseManagerConfigErr {}
impl fmt::Display for ParseManagerConfigErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "parse manager config err")
}
}
impl FromStr for ManagerConfig {
type Err = ParseManagerConfigErr;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let splits: Vec<&str> = s.split(',').collect();
if let [name, frequency, chains, chain_len] = splits[..] {
let name: String = name.parse().map_err(|_| ParseManagerConfigErr)?;
let frequency: u32 = frequency.parse().map_err(|_| ParseManagerConfigErr)?;
let chains: u64 = chains.parse().map_err(|_| ParseManagerConfigErr)?;
let chain_len: u64 = chain_len.parse().map_err(|_| ParseManagerConfigErr)?;
Ok(ManagerConfig {
name,
chains,
chain_len,
frequency,
})
} else {
Err(ParseManagerConfigErr)
}
}
}

View File

@ -1,149 +0,0 @@
use std::{
collections::HashMap,
io,
path::{Path, PathBuf},
};
use chrono::Utc;
use serde::{Deserialize, Serialize};
use super::{Manager, ManagerConfig};
/// Manages a collection of backup layers, allowing them to be utilized as a single object.
pub struct MetaManager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de> + std::fmt::Debug,
{
backup_dir: PathBuf,
dirs: Vec<(PathBuf, PathBuf)>,
default_metadata: T,
managers: HashMap<String, Manager<T>>,
}
impl<T> MetaManager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de> + std::fmt::Debug,
{
pub fn new<P: Into<PathBuf>>(
backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>,
default_metadata: T,
) -> Self {
MetaManager {
backup_dir: backup_dir.into(),
dirs,
default_metadata,
managers: HashMap::new(),
}
}
/// Add a new manager to track, initializing it first.
pub fn add(&mut self, config: &ManagerConfig) -> io::Result<()> {
// Backup dir itself should exist, but we control its contents, so we can create
// separate directories for each layer
let path = self.backup_dir.join(&config.name);
// If the directory already exists, that's okay
match std::fs::create_dir(&path) {
Ok(()) => (),
Err(e) => match e.kind() {
io::ErrorKind::AlreadyExists => (),
_ => return Err(e),
},
};
let mut manager = Manager::new(
path,
self.dirs.clone(),
self.default_metadata.clone(),
config.chain_len,
config.chains,
chrono::Duration::minutes(config.frequency.into()),
);
manager.load()?;
self.managers.insert(config.name.clone(), manager);
Ok(())
}
/// Convenient wrapper for `add`.
pub fn add_all(&mut self, configs: &Vec<ManagerConfig>) -> io::Result<()> {
for config in configs {
self.add(config)?;
}
Ok(())
}
/// Return the name of the next scheduled layer, if one or more managers are present.
pub fn next_scheduled_layer(&self) -> Option<&str> {
self.managers
.iter()
.min_by_key(|(_, m)| m.next_scheduled_time())
.map(|(k, _)| k.as_str())
}
/// Return the earliest scheduled time for the underlying managers.
pub fn next_scheduled_time(&self) -> Option<chrono::DateTime<Utc>> {
self.managers
.values()
.map(|m| m.next_scheduled_time())
.min()
}
/// Perform a backup cycle for the earliest scheduled manager.
pub fn perform_backup_cycle(&mut self) -> io::Result<()> {
if let Some(manager) = self
.managers
.values_mut()
.min_by_key(|m| m.next_scheduled_time())
{
manager.create_backup()?;
manager.remove_old_backups()
} else {
Ok(())
}
}
/// Create a manual backup for a specific layer
pub fn create_backup(&mut self, layer: &str) -> Option<io::Result<()>> {
if let Some(manager) = self.managers.get_mut(layer) {
let mut res = manager.create_backup();
if res.is_ok() {
res = manager.remove_old_backups();
}
Some(res)
} else {
None
}
}
/// Restore a backup for a specific layer
pub fn restore_backup(
&self,
layer: &str,
start_time: chrono::DateTime<Utc>,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> Option<io::Result<()>> {
self.managers
.get(layer)
.map(|manager| manager.restore_backup(start_time, dirs))
}
pub fn export_backup<P: AsRef<Path>>(
&self,
layer: &str,
start_time: chrono::DateTime<Utc>,
output_path: P,
) -> Option<io::Result<()>> {
self.managers
.get(layer)
.map(|manager| manager.export_backup(start_time, output_path))
}
pub fn managers(&self) -> &HashMap<String, Manager<T>> {
&self.managers
}
}

View File

@ -1,253 +0,0 @@
mod config;
mod meta;
use std::{
fs::{File, OpenOptions},
io,
path::{Path, PathBuf},
};
use chrono::{SubsecRound, Utc};
use flate2::{write::GzEncoder, Compression};
use serde::{Deserialize, Serialize};
use super::{Backup, BackupType, Delta, State};
use crate::other;
pub use config::ManagerConfig;
pub use meta::MetaManager;
/// Manages a single backup layer consisting of one or more chains of backups.
pub struct Manager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de> + std::fmt::Debug,
{
backup_dir: PathBuf,
dirs: Vec<(PathBuf, PathBuf)>,
default_metadata: T,
chain_len: u64,
chains_to_keep: u64,
frequency: chrono::Duration,
chains: Vec<Vec<Backup<T>>>,
}
impl<T> Manager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de> + std::fmt::Debug,
{
const METADATA_FILE: &str = "alex.json";
pub fn new<P: Into<PathBuf>>(
backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>,
metadata: T,
chain_len: u64,
chains_to_keep: u64,
frequency: chrono::Duration,
) -> Self {
Self {
backup_dir: backup_dir.into(),
dirs,
default_metadata: metadata,
chain_len,
chains_to_keep,
frequency,
chains: Vec::new(),
}
}
/// Create a new backup, either full or incremental, depending on the state of the current
/// chain.
pub fn create_backup(&mut self) -> io::Result<()> {
// We start a new chain if the current chain is complete, or if there isn't a first chain
// yet
if let Some(current_chain) = self.chains.last() {
let current_chain_len: u64 = current_chain.len().try_into().unwrap();
if current_chain_len >= self.chain_len {
self.chains.push(Vec::new());
}
} else {
self.chains.push(Vec::new());
}
let current_chain = self.chains.last_mut().unwrap();
let mut backup = if !current_chain.is_empty() {
let previous_backup = current_chain.last().unwrap();
let previous_state = State::from(current_chain.iter().map(|b| &b.delta));
Backup::create_from(
previous_state,
previous_backup.start_time,
&self.backup_dir,
&self.dirs,
)?
} else {
Backup::create(&self.backup_dir, &self.dirs)?
};
backup.set_metadata(self.default_metadata.clone());
current_chain.push(backup);
self.save()?;
Ok(())
}
/// Delete all backups associated with outdated chains, and forget those chains.
pub fn remove_old_backups(&mut self) -> io::Result<()> {
let chains_to_store: usize = self.chains_to_keep.try_into().unwrap();
if chains_to_store < self.chains.len() {
let mut remove_count: usize = self.chains.len() - chains_to_store;
// We only count finished chains towards the list of stored chains
let chain_len: usize = self.chain_len.try_into().unwrap();
if self.chains.last().unwrap().len() < chain_len {
remove_count -= 1;
}
for chain in self.chains.drain(..remove_count) {
for backup in chain {
let path = Backup::path(&self.backup_dir, backup.start_time);
std::fs::remove_file(path)?;
}
}
self.save()?;
}
Ok(())
}
/// Write the in-memory state to disk.
pub fn save(&self) -> io::Result<()> {
let json_file = File::create(self.backup_dir.join(Self::METADATA_FILE))?;
serde_json::to_writer(json_file, &self.chains)?;
Ok(())
}
/// Overwrite the in-memory state with the on-disk state.
pub fn load(&mut self) -> io::Result<()> {
let json_file = match File::open(self.backup_dir.join(Self::METADATA_FILE)) {
Ok(f) => f,
Err(e) => {
// Don't error out if the file isn't there, it will be created when necessary
if e.kind() == io::ErrorKind::NotFound {
self.chains = Vec::new();
return Ok(());
} else {
return Err(e);
}
}
};
self.chains = serde_json::from_reader(json_file)?;
Ok(())
}
/// Calculate the next time a backup should be created. If no backup has been created yet, it
/// will return now.
pub fn next_scheduled_time(&self) -> chrono::DateTime<Utc> {
self.chains
.last()
.and_then(|last_chain| last_chain.last())
.map(|last_backup| last_backup.start_time + self.frequency)
.unwrap_or_else(chrono::offset::Utc::now)
}
/// Search for a chain containing a backup with the specified start time.
///
/// # Returns
///
/// A tuple (chain, index) with index being the index of the found backup in the returned
/// chain.
fn find(&self, start_time: chrono::DateTime<Utc>) -> Option<(&Vec<Backup<T>>, usize)> {
for chain in &self.chains {
if let Some(index) = chain
.iter()
.position(|b| b.start_time.trunc_subsecs(0) == start_time)
{
return Some((chain, index));
}
}
None
}
/// Restore the backup with the given start time by restoring its chain up to and including the
/// backup, in order.
pub fn restore_backup(
&self,
start_time: chrono::DateTime<Utc>,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> io::Result<()> {
self.find(start_time)
.ok_or_else(|| other("Unknown layer."))
.and_then(|(chain, index)| {
for backup in chain.iter().take(index + 1) {
backup.restore(&self.backup_dir, dirs)?;
}
Ok(())
})
}
/// Export the backup with the given start time as a new full archive.
pub fn export_backup<P: AsRef<Path>>(
&self,
start_time: chrono::DateTime<Utc>,
output_path: P,
) -> io::Result<()> {
self.find(start_time)
.ok_or_else(|| other("Unknown layer."))
.and_then(|(chain, index)| {
match chain[index].type_ {
// A full backup is simply copied to the output path
BackupType::Full => std::fs::copy(
Backup::path(&self.backup_dir, chain[index].start_time),
output_path,
)
.map(|_| ()),
// Incremental backups are exported one by one according to their contribution
BackupType::Incremental => {
let contributions = Delta::contributions(
chain.iter().take(index + 1).map(|b| &b.delta).rev(),
);
let tar_gz = OpenOptions::new()
.write(true)
.create(true)
.open(output_path.as_ref())?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
// We only need to consider backups that have a non-empty contribution.
// This allows us to skip reading backups that have been completely
// overwritten by their successors anyways.
for (contribution, backup) in contributions
.iter()
.rev()
.zip(chain.iter().take(index + 1))
.filter(|(contribution, _)| !contribution.is_empty())
{
println!("{}", &backup);
backup.append(&self.backup_dir, contribution, &mut ar)?;
}
let mut enc = ar.into_inner()?;
enc.try_finish()
}
}
})
}
/// Get a reference to the underlying chains
pub fn chains(&self) -> &Vec<Vec<Backup<T>>> {
&self.chains
}
}

View File

@ -1,315 +0,0 @@
mod delta;
mod io_ext;
pub mod manager;
mod path;
mod state;
use std::{
collections::HashSet,
fmt,
fs::File,
io,
path::{Path, PathBuf},
};
use chrono::Utc;
use flate2::{read::GzDecoder, write::GzEncoder, Compression};
use serde::{Deserialize, Serialize};
use delta::Delta;
pub use manager::Manager;
pub use manager::ManagerConfig;
pub use manager::MetaManager;
use path::PathExt;
pub use state::State;
const BYTE_SUFFIXES: [&str; 5] = ["B", "KiB", "MiB", "GiB", "TiB"];
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub enum BackupType {
Full,
Incremental,
}
/// Represents a successful backup
#[derive(Serialize, Deserialize, Debug)]
pub struct Backup<T: Clone> {
/// When the backup was started (also corresponds to the name)
pub start_time: chrono::DateTime<Utc>,
/// When the backup finished
pub end_time: chrono::DateTime<Utc>,
pub size: usize,
/// Type of the backup
pub type_: BackupType,
pub delta: Delta,
/// Additional metadata that can be associated with a given backup
pub metadata: Option<T>,
}
impl Backup<()> {
pub const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
/// Return the path to a backup file by properly formatting the data.
pub fn path<P: AsRef<Path>>(backup_dir: P, start_time: chrono::DateTime<Utc>) -> PathBuf {
let backup_dir = backup_dir.as_ref();
let filename = format!("{}", start_time.format(Self::FILENAME_FORMAT));
backup_dir.join(filename)
}
/// Extract an archive.
///
/// # Arguments
///
/// * `backup_path` - Path to the archive to extract
/// * `dirs` - list of tuples `(path_in_tar, dst_dir)` with `dst_dir` the directory on-disk
/// where the files stored under `path_in_tar` inside the tarball should be extracted to.
pub fn extract_archive<P: AsRef<Path>>(
archive_path: P,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> io::Result<()> {
let tar_gz = File::open(archive_path)?;
let enc = GzDecoder::new(tar_gz);
let mut ar = tar::Archive::new(enc);
// Unpack each file by matching it with one of the destination directories and extracting
// it to the right path
for entry in ar.entries()? {
let mut entry = entry?;
let entry_path_in_tar = entry.path()?.to_path_buf();
for (path_in_tar, dst_dir) in dirs {
if entry_path_in_tar.starts_with(path_in_tar) {
let dst_path =
dst_dir.join(entry_path_in_tar.strip_prefix(path_in_tar).unwrap());
// Ensure all parent directories are present
std::fs::create_dir_all(dst_path.parent().unwrap())?;
entry.unpack(dst_path)?;
break;
}
}
}
Ok(())
}
}
impl<T: Clone> Backup<T> {
/// Set the backup's metadata.
pub fn set_metadata(&mut self, metadata: T) {
self.metadata = Some(metadata);
}
/// Create a new Full backup, populated with the given directories.
///
/// # Arguments
///
/// * `backup_dir` - Directory to store archive in
/// * `dirs` - list of tuples `(path_in_tar, src_dir)` with `path_in_tar` the directory name
/// under which `src_dir`'s contents should be stored in the archive
///
/// # Returns
///
/// The `Backup` instance describing this new backup.
pub fn create<P: AsRef<Path>>(
backup_dir: P,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> {
let start_time = chrono::offset::Utc::now();
let path = Backup::path(backup_dir, start_time);
let tar_gz = io_ext::CountingWrite::new(File::create(path)?);
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
let mut delta = Delta::new();
for (dir_in_tar, src_dir) in dirs {
let mut added_files: HashSet<PathBuf> = HashSet::new();
for entry in src_dir.read_dir_recursive()?.ignored("cache").files() {
let path = entry?.path();
let stripped = path.strip_prefix(src_dir).unwrap();
ar.append_path_with_name(&path, dir_in_tar.join(stripped))?;
added_files.insert(stripped.to_path_buf());
}
delta.added.insert(dir_in_tar.to_path_buf(), added_files);
}
let mut enc = ar.into_inner()?;
// The docs recommend running try_finish before unwrapping using finish
enc.try_finish()?;
let tar_gz = enc.finish()?;
Ok(Backup {
type_: BackupType::Full,
start_time,
end_time: chrono::Utc::now(),
size: tar_gz.bytes_written(),
delta,
metadata: None,
})
}
/// Create a new Incremental backup from the given state, populated with the given directories.
///
/// # Arguments
///
/// * `previous_state` - State the file system was in during the previous backup in the chain
/// * `previous_start_time` - Start time of the previous backup; used to filter files
/// * `backup_dir` - Directory to store archive in
/// * `dirs` - list of tuples `(path_in_tar, src_dir)` with `path_in_tar` the directory name
/// under which `src_dir`'s contents should be stored in the archive
///
/// # Returns
///
/// The `Backup` instance describing this new backup.
pub fn create_from<P: AsRef<Path>>(
previous_state: State,
previous_start_time: chrono::DateTime<Utc>,
backup_dir: P,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> {
let start_time = chrono::offset::Utc::now();
let path = Backup::path(backup_dir, start_time);
let tar_gz = io_ext::CountingWrite::new(File::create(path)?);
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
let mut delta = Delta::new();
for (dir_in_tar, src_dir) in dirs {
let mut all_files: HashSet<PathBuf> = HashSet::new();
let mut added_files: HashSet<PathBuf> = HashSet::new();
for entry in src_dir.read_dir_recursive()?.ignored("cache").files() {
let path = entry?.path();
let stripped = path.strip_prefix(src_dir).unwrap();
if !path.not_modified_since(previous_start_time) {
ar.append_path_with_name(&path, dir_in_tar.join(stripped))?;
added_files.insert(stripped.to_path_buf());
}
all_files.insert(stripped.to_path_buf());
}
delta.added.insert(dir_in_tar.clone(), added_files);
if let Some(previous_files) = previous_state.get(dir_in_tar) {
delta.removed.insert(
dir_in_tar.to_path_buf(),
previous_files.difference(&all_files).cloned().collect(),
);
}
}
let mut enc = ar.into_inner()?;
// The docs recommend running try_finish before unwrapping using finish
enc.try_finish()?;
let tar_gz = enc.finish()?;
Ok(Backup {
type_: BackupType::Incremental,
start_time,
end_time: chrono::Utc::now(),
size: tar_gz.bytes_written(),
delta,
metadata: None,
})
}
/// Restore the backup by extracting its contents to the respective directories.
///
/// # Arguments
///
/// * `backup_dir` - Backup directory where the file is stored
/// * `dirs` - list of tuples `(path_in_tar, dst_dir)` with `dst_dir` the directory on-disk
/// where the files stored under `path_in_tar` inside the tarball should be extracted to.
pub fn restore<P: AsRef<Path>>(
&self,
backup_dir: P,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> io::Result<()> {
let backup_path = Backup::path(backup_dir, self.start_time);
Backup::extract_archive(backup_path, dirs)?;
// Remove any files
for (path_in_tar, dst_dir) in dirs {
if let Some(removed) = self.delta.removed.get(path_in_tar) {
for path in removed {
let dst_path = dst_dir.join(path);
std::fs::remove_file(dst_path)?;
}
}
}
Ok(())
}
pub fn open<P: AsRef<Path>>(&self, backup_dir: P) -> io::Result<tar::Archive<GzDecoder<File>>> {
let path = Backup::path(backup_dir, self.start_time);
let tar_gz = File::open(path)?;
let enc = GzDecoder::new(tar_gz);
Ok(tar::Archive::new(enc))
}
/// Open this backup's archive and append all its files that are part of the provided state to
/// the archive file.
pub fn append<P: AsRef<Path>>(
&self,
backup_dir: P,
state: &State,
ar: &mut tar::Builder<GzEncoder<File>>,
) -> io::Result<()> {
let mut own_ar = self.open(backup_dir)?;
for entry in own_ar.entries()? {
let entry = entry?;
let entry_path_in_tar = entry.path()?.to_path_buf();
if state.contains(&entry_path_in_tar) {
let header = entry.header().clone();
ar.append(&header, entry)?;
}
}
Ok(())
}
}
impl<T: Clone> fmt::Display for Backup<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let letter = match self.type_ {
BackupType::Full => 'F',
BackupType::Incremental => 'I',
};
// Pretty-print size
// If your backup is a petabyte or larger, this will crash and you need to re-evaluate your
// life choices
let index = self.size.ilog(1024) as usize;
let size = self.size as f64 / (1024.0_f64.powi(index as i32));
let duration = self.end_time - self.start_time;
write!(
f,
"{} ({}, {}m{}s, {:.2}{}, {})",
self.start_time.format(Backup::FILENAME_FORMAT),
letter,
duration.num_seconds() / 60,
duration.num_seconds() % 60,
size,
BYTE_SUFFIXES[index],
self.delta
)
}
}

View File

@ -1,149 +0,0 @@
use std::{
collections::HashSet,
ffi::OsString,
fs::{self, DirEntry},
io,
path::{Path, PathBuf},
};
use chrono::{Local, Utc};
pub struct ReadDirRecursive {
ignored: HashSet<OsString>,
read_dir: fs::ReadDir,
dir_stack: Vec<PathBuf>,
files_only: bool,
}
impl ReadDirRecursive {
/// Start the iterator for a new directory
pub fn start<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let path = path.as_ref();
let read_dir = path.read_dir()?;
Ok(ReadDirRecursive {
ignored: HashSet::new(),
read_dir,
dir_stack: Vec::new(),
files_only: false,
})
}
pub fn ignored<S: Into<OsString>>(mut self, s: S) -> Self {
self.ignored.insert(s.into());
self
}
pub fn files(mut self) -> Self {
self.files_only = true;
self
}
/// Tries to populate the `read_dir` field with a new `ReadDir` instance to consume.
fn next_read_dir(&mut self) -> io::Result<bool> {
if let Some(path) = self.dir_stack.pop() {
self.read_dir = path.read_dir()?;
Ok(true)
} else {
Ok(false)
}
}
/// Convenience method to add a new directory to the stack.
fn push_entry(&mut self, entry: &io::Result<DirEntry>) {
if let Ok(entry) = entry {
if entry.path().is_dir() {
self.dir_stack.push(entry.path());
}
}
}
/// Determine whether an entry should be returned by the iterator.
fn should_return(&self, entry: &io::Result<DirEntry>) -> bool {
if let Ok(entry) = entry {
let mut res = !self.ignored.contains(&entry.file_name());
// Please just let me combine these already
if self.files_only {
if let Ok(file_type) = entry.file_type() {
res = res && file_type.is_file();
}
// We couldn't determine if it's a file, so we don't return it
else {
res = false;
}
}
res
} else {
true
}
}
}
impl Iterator for ReadDirRecursive {
type Item = io::Result<DirEntry>;
fn next(&mut self) -> Option<Self::Item> {
loop {
// First, we try to consume the current directory's items
while let Some(entry) = self.read_dir.next() {
self.push_entry(&entry);
if self.should_return(&entry) {
return Some(entry);
}
}
// If we get an error while setting up a new directory, we return this, otherwise we
// keep trying to consume the directories
match self.next_read_dir() {
Ok(true) => (),
// There's no more directories to traverse, so the iterator is done
Ok(false) => return None,
Err(e) => return Some(Err(e)),
}
}
}
}
pub trait PathExt {
/// Confirm whether the file has not been modified since the given timestamp.
///
/// This function will only return true if it can determine with certainty that the file hasn't
/// been modified.
///
/// # Args
///
/// * `timestamp` - Timestamp to compare modified time with
///
/// # Returns
///
/// True if the file has not been modified for sure, false otherwise.
fn not_modified_since(&self, timestamp: chrono::DateTime<Utc>) -> bool;
/// An extension of the `read_dir` command that runs through the entire underlying directory
/// structure using breadth-first search
fn read_dir_recursive(&self) -> io::Result<ReadDirRecursive>;
}
impl PathExt for Path {
fn not_modified_since(&self, timestamp: chrono::DateTime<Utc>) -> bool {
self.metadata()
.and_then(|m| m.modified())
.map(|last_modified| {
let t: chrono::DateTime<Utc> = last_modified.into();
let t = t.with_timezone(&Local);
t < timestamp
})
.unwrap_or(false)
}
fn read_dir_recursive(&self) -> io::Result<ReadDirRecursive> {
ReadDirRecursive::start(self)
}
}

View File

@ -1,98 +0,0 @@
use std::{
borrow::Borrow,
collections::{HashMap, HashSet},
ops::{Deref, DerefMut},
path::{Path, PathBuf},
};
use serde::{Deserialize, Serialize};
use crate::backup::Delta;
/// Struct that represents a current state for a backup. This struct acts as a smart pointer around
/// a HashMap.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct State(HashMap<PathBuf, HashSet<PathBuf>>);
impl State {
pub fn new() -> Self {
State(HashMap::new())
}
/// Apply the delta to the current state.
pub fn apply(&mut self, delta: &Delta) {
// First we add new files, then we remove the old ones
for (dir, added) in delta.added.iter() {
if let Some(current) = self.0.get_mut(dir) {
current.extend(added.iter().cloned());
} else {
self.0.insert(dir.clone(), added.clone());
}
}
for (dir, removed) in delta.removed.iter() {
if let Some(current) = self.0.get_mut(dir) {
current.retain(|k| !removed.contains(k));
}
}
}
/// Returns whether the provided relative path is part of the given state.
pub fn contains<P: AsRef<Path>>(&self, path: P) -> bool {
let path = path.as_ref();
self.0.iter().any(|(dir, files)| {
path.starts_with(dir) && files.contains(path.strip_prefix(dir).unwrap())
})
}
/// Returns whether the state is empty.
///
/// Note that this does not necessarily mean that the state does not contain any sets, but
/// rather that any sets that it does contain are also empty.
pub fn is_empty(&self) -> bool {
self.0.values().all(|s| s.is_empty())
}
}
impl<T> From<T> for State
where
T: IntoIterator,
T::Item: Borrow<Delta>,
{
fn from(deltas: T) -> Self {
let mut state = State::new();
for delta in deltas {
state.apply(delta.borrow());
}
state
}
}
impl AsRef<HashMap<PathBuf, HashSet<PathBuf>>> for State {
fn as_ref(&self) -> &HashMap<PathBuf, HashSet<PathBuf>> {
&self.0
}
}
impl Deref for State {
type Target = HashMap<PathBuf, HashSet<PathBuf>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for State {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl Default for State {
fn default() -> Self {
Self::new()
}
}

View File

@ -1,302 +0,0 @@
use std::io;
use std::path::{Path, PathBuf};
use chrono::{TimeZone, Utc};
use clap::{Args, Subcommand};
use crate::{backup::Backup, other};
#[derive(Subcommand)]
pub enum BackupCommands {
/// List all tracked backups
///
/// Note that this will only list backups for the layers currently configured, and will ignore
/// any other layers also present in the backup directory.
List(BackupListArgs),
/// Manually create a new backup
///
/// Note that backups created using this command will count towards the length of a chain, and
/// can therefore shorten how far back in time your backups will be stored.
Create(BackupCreateArgs),
/// Restore a backup
///
/// This command will restore the selected backup by extracting its entire chain up to and
/// including the requested backup in-order.
Restore(BackupRestoreArgs),
/// Export a backup into a full archive
///
/// Just like the restore command, this will extract each backup from the chain up to and
/// including the requested backup, but instead of writing the files to disk, they will be
/// recompressed into a new tarball, resulting in a new tarball containing a full backup.
Export(BackupExportArgs),
/// Extract an archive file, which is assumed to be a full backup.
///
/// This command mostly exists as a convenience method for working with the output of `export`.
Extract(BackupExtractArgs),
}
#[derive(Args)]
pub struct BackupArgs {
#[command(subcommand)]
pub command: BackupCommands,
}
#[derive(Args)]
pub struct BackupCreateArgs {
/// What layer to create a backup in
layer: String,
}
#[derive(Args)]
pub struct BackupListArgs {
/// What layer to list
layer: Option<String>,
}
#[derive(Args)]
pub struct BackupRestoreArgs {
/// Path to the backup inside the backup directory to restore
path: PathBuf,
/// Directory to store config in
output_config: PathBuf,
/// Directory to store worlds in
output_worlds: PathBuf,
/// Whether to overwrite the contents of the output directories
///
/// If set, the output directories will be completely cleared before trying to restore the
/// backup.
#[arg(short, long, default_value_t = false)]
force: bool,
/// Create output directories if they don't exist
#[arg(short, long, default_value_t = false)]
make: bool,
}
#[derive(Args)]
pub struct BackupExportArgs {
/// Path to the backup inside the backup directory to export
path: PathBuf,
/// Path to store the exported archive
output: PathBuf,
/// Create output directories if they don't exist
#[arg(short, long, default_value_t = false)]
make: bool,
}
#[derive(Args)]
pub struct BackupExtractArgs {
/// Path to the backup to extract
path: PathBuf,
/// Directory to store config in
output_config: PathBuf,
/// Directory to store worlds in
output_worlds: PathBuf,
/// Whether to overwrite the contents of the output directories
///
/// If set, the output directories will be completely cleared before trying to restore the
/// backup.
#[arg(short, long, default_value_t = false)]
force: bool,
/// Create output directories if they don't exist
#[arg(short, long, default_value_t = false)]
make: bool,
}
impl BackupArgs {
pub fn run(&self, cli: &super::Config) -> io::Result<()> {
match &self.command {
BackupCommands::Create(args) => args.run(cli),
BackupCommands::List(args) => args.run(cli),
BackupCommands::Restore(args) => args.run(cli),
BackupCommands::Export(args) => args.run(cli),
BackupCommands::Extract(args) => args.run(cli),
}
}
}
impl BackupCreateArgs {
pub fn run(&self, cli: &super::Config) -> io::Result<()> {
let mut meta = cli.meta()?;
if let Some(res) = meta.create_backup(&self.layer) {
res
} else {
Err(io::Error::new(io::ErrorKind::Other, "Unknown layer"))
}
}
}
impl BackupListArgs {
pub fn run(&self, cli: &super::Config) -> io::Result<()> {
let meta = cli.meta()?;
// A bit scuffed? Sure
for (name, manager) in meta
.managers()
.iter()
.filter(|(name, _)| self.layer.is_none() || &self.layer.as_ref().unwrap() == name)
{
println!("{}", name);
for chain in manager.chains().iter().filter(|c| !c.is_empty()) {
let mut iter = chain.iter();
println!(" {}", iter.next().unwrap());
for backup in iter {
println!(" {}", backup);
}
}
}
Ok(())
}
}
/// Tries to parse the given path as the path to a backup inside the backup directory with a
/// formatted timestamp.
fn parse_backup_path(
backup_dir: &Path,
backup_path: &Path,
) -> io::Result<(String, chrono::DateTime<Utc>)> {
if !backup_path.starts_with(backup_dir) {
return Err(other("Provided file is not inside the backup directory."));
}
let layer = if let Some(parent) = backup_path.parent() {
// Backup files should be stored nested inside a layer's folder
if parent != backup_dir {
parent.file_name().unwrap().to_string_lossy()
} else {
return Err(other("Invalid path."));
}
} else {
return Err(other("Invalid path."));
};
let timestamp = if let Some(filename) = backup_path.file_name() {
Utc.datetime_from_str(&filename.to_string_lossy(), Backup::FILENAME_FORMAT)
.map_err(|_| other("Invalid filename."))?
} else {
return Err(other("Invalid filename."));
};
Ok((layer.to_string(), timestamp))
}
impl BackupRestoreArgs {
pub fn run(&self, cli: &super::Config) -> io::Result<()> {
let backup_dir = cli.backup.canonicalize()?;
// Create directories if needed
if self.make {
std::fs::create_dir_all(&self.output_config)?;
std::fs::create_dir_all(&self.output_worlds)?;
}
let output_config = self.output_config.canonicalize()?;
let output_worlds = self.output_worlds.canonicalize()?;
// Parse input path
let backup_path = self.path.canonicalize()?;
let (layer, timestamp) = parse_backup_path(&backup_dir, &backup_path)?;
let meta = cli.meta()?;
// Clear previous contents of directories
let mut entries = output_config
.read_dir()?
.chain(output_worlds.read_dir()?)
.peekable();
if entries.peek().is_some() && !self.force {
return Err(other("Output directories are not empty. If you wish to overwrite these contents, use the force flag."));
}
for entry in entries {
let path = entry?.path();
if path.is_dir() {
std::fs::remove_dir_all(path)?;
} else {
std::fs::remove_file(path)?;
}
}
let dirs = vec![
(PathBuf::from("config"), output_config),
(PathBuf::from("worlds"), output_worlds),
];
// Restore the backup
if let Some(res) = meta.restore_backup(&layer, timestamp, &dirs) {
res
} else {
Err(other("Unknown layer"))
}
}
}
impl BackupExportArgs {
pub fn run(&self, cli: &super::Config) -> io::Result<()> {
let backup_dir = cli.backup.canonicalize()?;
if self.make {
if let Some(parent) = &self.output.parent() {
std::fs::create_dir_all(parent)?;
}
}
// Parse input path
let backup_path = self.path.canonicalize()?;
let (layer, timestamp) = parse_backup_path(&backup_dir, &backup_path)?;
let meta = cli.meta()?;
if let Some(res) = meta.export_backup(&layer, timestamp, &self.output) {
res
} else {
Err(other("Unknown layer"))
}
}
}
impl BackupExtractArgs {
pub fn run(&self, _cli: &super::Config) -> io::Result<()> {
// Create directories if needed
if self.make {
std::fs::create_dir_all(&self.output_config)?;
std::fs::create_dir_all(&self.output_worlds)?;
}
let output_config = self.output_config.canonicalize()?;
let output_worlds = self.output_worlds.canonicalize()?;
let backup_path = self.path.canonicalize()?;
// Clear previous contents of directories
let mut entries = output_config
.read_dir()?
.chain(output_worlds.read_dir()?)
.peekable();
if entries.peek().is_some() && !self.force {
return Err(other("Output directories are not empty. If you wish to overwrite these contents, use the force flag."));
}
for entry in entries {
let path = entry?.path();
if path.is_dir() {
std::fs::remove_dir_all(path)?;
} else {
std::fs::remove_file(path)?;
}
}
let dirs = vec![
(PathBuf::from("config"), output_config),
(PathBuf::from("worlds"), output_worlds),
];
Backup::extract_archive(backup_path, &dirs)
}
}

View File

@ -1,49 +0,0 @@
use std::{io, path::PathBuf};
use serde::{Deserialize, Serialize};
use crate::{
backup::{ManagerConfig, MetaManager},
server::{Metadata, ServerType},
};
#[derive(Serialize, Deserialize, Debug)]
pub struct Config {
pub config: PathBuf,
pub world: PathBuf,
pub backup: PathBuf,
pub layers: Vec<ManagerConfig>,
pub server: ServerType,
pub server_version: String,
}
impl Default for Config {
fn default() -> Self {
Self {
config: PathBuf::from("."),
world: PathBuf::from("../worlds"),
backup: PathBuf::from("../backups"),
layers: Vec::new(),
server: ServerType::Unknown,
server_version: String::from(""),
}
}
}
impl Config {
/// Convenience method to initialize backup manager from the cli arguments
pub fn meta(&self) -> io::Result<MetaManager<Metadata>> {
let metadata = Metadata {
server_type: self.server,
server_version: self.server_version.clone(),
};
let dirs = vec![
(PathBuf::from("config"), self.config.canonicalize()?),
(PathBuf::from("worlds"), self.world.canonicalize()?),
];
let mut meta = MetaManager::new(self.backup.canonicalize()?, dirs, metadata);
meta.add_all(&self.layers)?;
Ok(meta)
}
}

View File

@ -1,119 +0,0 @@
mod backup;
mod config;
mod run;
use std::{path::PathBuf, str::FromStr};
use clap::{Args, Parser, Subcommand};
use figment::{
providers::{Env, Format, Serialized, Toml},
Figment,
};
use serde::{Deserialize, Serialize};
use crate::{backup::ManagerConfig, server::ServerType};
use backup::BackupArgs;
use config::Config;
use run::RunCli;
#[derive(Parser, Serialize)]
#[command(author, version, about, long_about = None)]
pub struct Cli {
#[command(subcommand)]
#[serde(skip)]
pub command: Commands,
/// Path to a TOML configuration file
#[arg(long = "config-file", global = true)]
pub config_file: Option<PathBuf>,
#[command(flatten)]
pub args: CliArgs,
}
#[derive(Args, Serialize, Deserialize, Clone)]
pub struct CliArgs {
/// Directory where configs are stored, and where the server will run
#[arg(long, value_name = "CONFIG_DIR", global = true)]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub config: Option<PathBuf>,
/// Directory where world files will be saved
#[arg(long, value_name = "WORLD_DIR", global = true)]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub world: Option<PathBuf>,
/// Directory where backups will be stored
#[arg(long, value_name = "BACKUP_DIR", global = true)]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub backup: Option<PathBuf>,
/// What backup layers to employ, provided as a list of tuples name,frequency,chains,chain_len
/// delimited by semicolons (;).
#[arg(long, global = true, value_delimiter = ';')]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub layers: Option<Vec<ManagerConfig>>,
/// Type of server
#[arg(long, global = true)]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub server: Option<ServerType>,
/// Version string for the server, e.g. 1.19.4-545
#[arg(long, global = true)]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub server_version: Option<String>,
}
#[derive(Subcommand)]
pub enum Commands {
/// Run the server
Run(RunCli),
/// Interact with the backup system without starting a server
Backup(BackupArgs),
}
impl Cli {
pub fn run(&self) -> crate::Result<()> {
let config = self.config(&self.args)?;
match &self.command {
Commands::Run(args) => args.run(self, &config),
Commands::Backup(args) => Ok(args.run(&config)?),
}
}
pub fn config<T, U>(&self, args: &U) -> crate::Result<T>
where
T: Default + Serialize + for<'de> Deserialize<'de>,
U: Serialize,
{
let toml_file = self
.config_file
.clone()
.unwrap_or(PathBuf::from(Env::var_or("ALEX_CONFIG_FILE", "")));
let mut figment = Figment::new()
.merge(Serialized::defaults(T::default()))
.merge(Toml::file(toml_file))
.merge(Env::prefixed("ALEX_").ignore(&["ALEX_LAYERS"]));
// Layers need to be parsed separately, as the env var format is different than the one
// serde expects
if let Some(layers_env) = Env::var("ALEX_LAYERS") {
let res = layers_env
.split(';')
.map(ManagerConfig::from_str)
.collect::<Vec<_>>();
if res.iter().any(|e| e.is_err()) {
return Err(crate::other("Invalid layer configuration").into());
}
let layers: Vec<_> = res.iter().flatten().collect();
figment = figment.merge(Serialized::default("layers", layers));
}
Ok(figment.merge(Serialized::defaults(args)).extract()?)
}
}

View File

@ -1,22 +0,0 @@
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct Config {
pub jar: PathBuf,
pub java: String,
pub xms: u64,
pub xmx: u64,
}
impl Default for Config {
fn default() -> Self {
Self {
jar: PathBuf::from("server.jar"),
java: String::from("java"),
xms: 1024,
xmx: 2048,
}
}
}

View File

@ -1,103 +0,0 @@
mod config;
use std::{path::PathBuf, sync::Arc};
use clap::Args;
use serde::{Deserialize, Serialize};
use crate::{server, signals, stdin};
use config::Config;
#[derive(Args)]
pub struct RunCli {
#[command(flatten)]
pub args: RunArgs,
/// Don't actually run the server, but simply output the server configuration that would have
/// been ran
#[arg(short, long, default_value_t = false)]
pub dry: bool,
}
#[derive(Args, Serialize, Deserialize, Clone)]
pub struct RunArgs {
/// Server jar to execute
#[arg(long, value_name = "JAR_PATH")]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub jar: Option<PathBuf>,
/// Java command to run the server jar with
#[arg(long, value_name = "JAVA_CMD")]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub java: Option<String>,
/// XMS value in megabytes for the server instance
#[arg(long)]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub xms: Option<u64>,
/// XMX value in megabytes for the server instance
#[arg(long)]
#[serde(skip_serializing_if = "::std::option::Option::is_none")]
pub xmx: Option<u64>,
}
fn backups_thread(server: Arc<server::ServerProcess>) {
loop {
let next_scheduled_time = {
server
.backups
.read()
.unwrap()
.next_scheduled_time()
.unwrap()
};
let now = chrono::offset::Utc::now();
if next_scheduled_time > now {
std::thread::sleep((next_scheduled_time - now).to_std().unwrap());
}
// We explicitely ignore the error here, as we don't want the thread to fail
let _ = server.backup();
}
}
impl RunCli {
pub fn run(&self, cli: &super::Cli, global: &super::Config) -> crate::Result<()> {
let config: Config = cli.config(&self.args)?;
let (_, mut signals) = signals::install_signal_handlers()?;
let mut cmd = server::ServerCommand::new(global.server, &global.server_version)
.java(&config.java)
.jar(config.jar.clone())
.config(global.config.clone())
.world(global.world.clone())
.backup(global.backup.clone())
.managers(global.layers.clone())
.xms(config.xms)
.xmx(config.xmx);
cmd.canonicalize()?;
if self.dry {
print!("{}", cmd);
return Ok(());
}
let counter = Arc::new(cmd.spawn()?);
if !global.layers.is_empty() {
let clone = Arc::clone(&counter);
std::thread::spawn(move || backups_thread(clone));
}
// Spawn thread that handles the main stdin loop
let clone = Arc::clone(&counter);
std::thread::spawn(move || stdin::handle_stdin(clone));
// Signal handler loop exits the process when necessary
Ok(signals::handle_signals(&mut signals, counter)?)
}
}

View File

@ -1,32 +0,0 @@
use std::{fmt, io};
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug)]
pub enum Error {
IO(io::Error),
Figment(figment::Error),
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::IO(err) => write!(fmt, "{}", err),
Error::Figment(err) => write!(fmt, "{}", err),
}
}
}
impl std::error::Error for Error {}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::IO(err)
}
}
impl From<figment::Error> for Error {
fn from(err: figment::Error) -> Self {
Error::Figment(err)
}
}

View File

@ -1,43 +1,110 @@
mod backup;
mod cli;
mod error;
mod server;
mod signals;
mod stdin;
use std::io;
use clap::Parser;
use server::ServerType;
use std::io;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use crate::cli::Cli;
pub use error::{Error, Result};
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Cli {
/// Type of server
type_: ServerType,
/// Version string for the server, e.g. 1.19.4-545
#[arg(env = "ALEX_SERVER_VERSION")]
server_version: String,
pub fn other(msg: &str) -> io::Error {
io::Error::new(io::ErrorKind::Other, msg)
/// Server jar to execute
#[arg(
long,
value_name = "JAR_PATH",
default_value = "server.jar",
env = "ALEX_JAR"
)]
jar: PathBuf,
/// Directory where configs are stored, and where the server will run
#[arg(
long,
value_name = "CONFIG_DIR",
default_value = ".",
env = "ALEX_CONFIG_DIR"
)]
config: PathBuf,
/// Directory where world files will be saved
#[arg(
long,
value_name = "WORLD_DIR",
default_value = "../worlds",
env = "ALEX_WORLD_DIR"
)]
world: PathBuf,
/// Directory where backups will be stored
#[arg(
long,
value_name = "BACKUP_DIR",
default_value = "../backups",
env = "ALEX_WORLD_DIR"
)]
backup: PathBuf,
/// Java command to run the server jar with
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"), env = "ALEX_JAVA")]
java: String,
/// XMS value in megabytes for the server instance
#[arg(long, default_value_t = 1024, env = "ALEX_XMS")]
xms: u64,
/// XMX value in megabytes for the server instance
#[arg(long, default_value_t = 2048, env = "ALEX_XMX")]
xmx: u64,
/// How many backups to keep
#[arg(short = 'n', long, default_value_t = 7, env = "ALEX_MAX_BACKUPS")]
max_backups: u64,
/// How frequently to perform a backup, in minutes; 0 to disable.
#[arg(short = 't', long, default_value_t = 0, env = "ALEX_FREQUENCY")]
frequency: u64,
}
// fn commands_backup(cli: &Cli, args: &BackupArgs) -> io::Result<()> {
// let metadata = server::Metadata {
// server_type: cli.server,
// server_version: cli.server_version.clone(),
// };
// let dirs = vec![
// (PathBuf::from("config"), cli.config.clone()),
// (PathBuf::from("worlds"), cli.world.clone()),
// ];
// let mut meta = MetaManager::new(cli.backup.clone(), dirs, metadata);
// meta.add_all(&cli.layers)?;
fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
loop {
std::thread::sleep(std::time::Duration::from_secs(frequency * 60));
// match &args.command {
// BackupCommands::List => ()
// }
{
let mut server = counter.lock().unwrap();
// // manager.create_backup()?;
// // manager.remove_old_backups()
// }
// We explicitely ignore the error here, as we don't want the thread to fail
let _ = server.backup();
}
}
}
fn main() -> crate::Result<()> {
fn main() -> io::Result<()> {
let (term, mut signals) = signals::install_signal_handlers()?;
let cli = Cli::parse();
cli.run()
let cmd = server::ServerCommand::new(cli.type_, &cli.server_version)
.java(&cli.java)
.jar(cli.jar)
.config(cli.config)
.world(cli.world)
.backup(cli.backup)
.xms(cli.xms)
.xmx(cli.xmx)
.max_backups(cli.max_backups);
let server = Arc::new(cmd.spawn()?);
if cli.frequency > 0 {
let clone = Arc::clone(&server);
std::thread::spawn(move || backups_thread(server, cli.frequency));
}
// Spawn thread that handles the main stdin loop
let clone = Arc::clone(&server);
std::thread::spawn(move || stdin::handle_stdin(server));
// Signal handler loop exits the process when necessary
signals::handle_signals(&mut signals, term, server)
}

View File

@ -1,12 +1,30 @@
use crate::backup::ManagerConfig;
use crate::backup::MetaManager;
use crate::server::{Metadata, ServerProcess, ServerType};
use crate::server::ServerProcess;
use clap::ValueEnum;
use std::fmt;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
pub enum ServerType {
Paper,
Forge,
Vanilla,
}
impl fmt::Display for ServerType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
ServerType::Paper => "PaperMC",
ServerType::Forge => "Forge",
ServerType::Vanilla => "Vanilla",
};
write!(f, "{}", s)
}
}
pub struct ServerCommand {
type_: ServerType,
version: String,
@ -17,7 +35,7 @@ pub struct ServerCommand {
backup_dir: PathBuf,
xms: u64,
xmx: u64,
managers: Vec<ManagerConfig>,
max_backups: u64,
}
impl ServerCommand {
@ -32,7 +50,7 @@ impl ServerCommand {
backup_dir: PathBuf::from("backups"),
xms: 1024,
xmx: 2048,
managers: Vec::new(),
max_backups: 7,
}
}
@ -73,9 +91,8 @@ impl ServerCommand {
self
}
pub fn managers(mut self, configs: Vec<ManagerConfig>) -> Self {
self.managers = configs;
pub fn max_backups(mut self, v: u64) -> Self {
self.max_backups = v;
self
}
@ -88,123 +105,33 @@ impl ServerCommand {
Ok(())
}
/// Canonicalize all paths to absolute paths. Without this command, all paths will be
/// interpreted relatively from the config directory.
pub fn canonicalize(&mut self) -> std::io::Result<()> {
pub fn spawn(self) -> std::io::Result<ServerProcess> {
// To avoid any issues, we use absolute paths for everything when spawning the process
self.jar = self.jar.canonicalize()?;
self.config_dir = self.config_dir.canonicalize()?;
self.world_dir = self.world_dir.canonicalize()?;
self.backup_dir = self.backup_dir.canonicalize()?;
let jar = self.jar.canonicalize()?;
let config_dir = self.config_dir.canonicalize()?;
let world_dir = self.world_dir.canonicalize()?;
let backup_dir = self.backup_dir.canonicalize()?;
Ok(())
}
fn create_cmd(&self) -> std::process::Command {
let mut cmd = Command::new(&self.java);
// Apply JVM optimisation flags
// https://aikar.co/2018/07/02/tuning-the-jvm-g1gc-garbage-collector-flags-for-minecraft/
cmd.arg(format!("-Xms{}M", self.xms))
.arg(format!("-Xmx{}M", self.xmx))
.args([
"-XX:+UseG1GC",
"-XX:+ParallelRefProcEnabled",
"-XX:MaxGCPauseMillis=200",
"-XX:+UnlockExperimentalVMOptions",
"-XX:+DisableExplicitGC",
"-XX:+AlwaysPreTouch",
]);
if self.xms > 12 * 1024 {
cmd.args([
"-XX:G1NewSizePercent=40",
"-XX:G1MaxNewSizePercent=50",
"-XX:G1HeapRegionSize=16M",
"-XX:G1ReservePercent=15",
]);
} else {
cmd.args([
"-XX:G1NewSizePercent=30",
"-XX:G1MaxNewSizePercent=40",
"-XX:G1HeapRegionSize=8M",
"-XX:G1ReservePercent=20",
]);
}
cmd.args(["-XX:G1HeapWastePercent=5", "-XX:G1MixedGCCountTarget=4"]);
if self.xms > 12 * 1024 {
cmd.args(["-XX:InitiatingHeapOccupancyPercent=20"]);
} else {
cmd.args(["-XX:InitiatingHeapOccupancyPercent=15"]);
}
cmd.args([
"-XX:G1MixedGCLiveThresholdPercent=90",
"-XX:G1RSetUpdatingPauseTimePercent=5",
"-XX:SurvivorRatio=32",
"-XX:+PerfDisableSharedMem",
"-XX:MaxTenuringThreshold=1",
"-Dusing.aikars.flags=https://mcflags.emc.gs",
"-Daikars.new.flags=true",
]);
cmd.current_dir(&self.config_dir)
.arg("-jar")
.arg(&self.jar)
.arg("--universe")
.arg(&self.world_dir)
.arg("--nogui")
.stdin(Stdio::piped());
cmd
}
pub fn spawn(&mut self) -> std::io::Result<ServerProcess> {
let metadata = Metadata {
server_type: self.type_,
server_version: self.version.clone(),
};
let dirs = vec![
(PathBuf::from("config"), self.config_dir.clone()),
(PathBuf::from("worlds"), self.world_dir.clone()),
];
let mut meta = MetaManager::new(self.backup_dir.clone(), dirs, metadata);
meta.add_all(&self.managers)?;
let mut cmd = self.create_cmd();
self.accept_eula()?;
let child = cmd.spawn()?;
Ok(ServerProcess::new(meta, child))
}
}
impl fmt::Display for ServerCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let cmd = self.create_cmd();
writeln!(f, "Command: {}", self.java)?;
writeln!(f, "Working dir: {}", self.config_dir.as_path().display())?;
// Print command env vars
writeln!(f, "Environment:")?;
for (key, val) in cmd.get_envs().filter(|(_, v)| v.is_some()) {
let val = val.unwrap();
writeln!(f, " {}={}", key.to_string_lossy(), val.to_string_lossy())?;
}
// Print command arguments
writeln!(f, "Arguments:")?;
for arg in cmd.get_args() {
writeln!(f, " {}", arg.to_string_lossy())?;
}
Ok(())
let child = Command::new(&self.java)
.current_dir(&config_dir)
.arg("-jar")
.arg(&jar)
.arg("--universe")
.arg(&world_dir)
.arg("--nogui")
.stdin(Stdio::piped())
.spawn()?;
Ok(ServerProcess::new(
self.type_,
self.version,
config_dir,
world_dir,
backup_dir,
self.max_backups,
child,
))
}
}

View File

@ -1,37 +1,5 @@
mod command;
mod process;
pub use command::ServerCommand;
pub use command::{ServerCommand, ServerType};
pub use process::ServerProcess;
use clap::ValueEnum;
use serde::{Deserialize, Serialize};
use std::fmt;
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum, Serialize, Deserialize, Debug)]
#[serde(rename_all = "lowercase")]
pub enum ServerType {
Unknown,
Paper,
Forge,
Vanilla,
}
impl fmt::Display for ServerType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
ServerType::Unknown => "Unknown",
ServerType::Paper => "PaperMC",
ServerType::Forge => "Forge",
ServerType::Vanilla => "Vanilla",
};
write!(f, "{}", s)
}
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct Metadata {
pub server_type: ServerType,
pub server_version: String,
}

View File

@ -1,21 +1,48 @@
use std::{io::Write, process::Child, sync::RwLock};
use crate::server::ServerType;
use flate2::write::GzEncoder;
use flate2::Compression;
use std::io::Write;
use std::path::PathBuf;
use std::process::Child;
use crate::{backup::MetaManager, server::Metadata};
#[link(name = "c")]
extern "C" {
fn geteuid() -> u32;
fn getegid() -> u32;
}
pub struct ServerProcess {
child: RwLock<Child>,
pub backups: RwLock<MetaManager<Metadata>>,
type_: ServerType,
version: String,
config_dir: PathBuf,
world_dir: PathBuf,
backup_dir: PathBuf,
max_backups: u64,
child: Child,
}
impl ServerProcess {
pub fn new(manager: MetaManager<Metadata>, child: Child) -> ServerProcess {
pub fn new(
type_: ServerType,
version: String,
config_dir: PathBuf,
world_dir: PathBuf,
backup_dir: PathBuf,
max_backups: u64,
child: Child,
) -> ServerProcess {
ServerProcess {
child: RwLock::new(child),
backups: RwLock::new(manager),
type_,
version,
config_dir,
world_dir,
backup_dir,
max_backups,
child,
}
}
pub fn send_command(&self, cmd: &str) -> std::io::Result<()> {
pub fn send_command(&mut self, cmd: &str) -> std::io::Result<()> {
match cmd.trim() {
"stop" | "exit" => self.stop()?,
"backup" => self.backup()?,
@ -25,35 +52,29 @@ impl ServerProcess {
Ok(())
}
fn custom(&self, cmd: &str) -> std::io::Result<()> {
let child = self.child.write().unwrap();
let mut stdin = child.stdin.as_ref().unwrap();
fn custom(&mut self, cmd: &str) -> std::io::Result<()> {
let mut stdin = self.child.stdin.as_ref().unwrap();
stdin.write_all(format!("{}\n", cmd.trim()).as_bytes())?;
stdin.flush()?;
Ok(())
}
pub fn stop(&self) -> std::io::Result<()> {
pub fn stop(&mut self) -> std::io::Result<()> {
self.custom("stop")?;
self.child.write().unwrap().wait()?;
self.child.wait()?;
Ok(())
}
pub fn kill(&self) -> std::io::Result<()> {
self.child.write().unwrap().kill()
pub fn kill(&mut self) -> std::io::Result<()> {
self.child.kill()
}
/// Perform a backup by disabling the server's save feature and flushing its data, before
/// creating an archive file.
pub fn backup(&self) -> std::io::Result<()> {
// We explicitely lock this entire function to prevent parallel backups
let mut backups = self.backups.write().unwrap();
let layer_name = String::from(backups.next_scheduled_layer().unwrap());
self.custom(&format!("say starting backup for layer '{}'", layer_name))?;
pub fn backup(&mut self) -> std::io::Result<()> {
self.custom("say backing up server")?;
// Make sure the server isn't modifying the files during the backup
self.custom("save-off")?;
@ -63,32 +84,80 @@ impl ServerProcess {
// We wait some time to (hopefully) ensure the save-all call has completed
std::thread::sleep(std::time::Duration::from_secs(10));
let start_time = chrono::offset::Utc::now();
let res = backups.perform_backup_cycle();
let res = self.create_backup_archive();
if res.is_ok() {
self.remove_old_backups()?;
}
// The server's save feature needs to be enabled again even if the archive failed to create
self.custom("save-on")?;
self.custom("save-all")?;
let duration = chrono::offset::Utc::now() - start_time;
let duration_str = format!(
"{}m{}s",
duration.num_seconds() / 60,
duration.num_seconds() % 60
);
if res.is_ok() {
self.custom(&format!(
"say backup created for layer '{}' in {}",
layer_name, duration_str
))?;
} else {
self.custom(&format!(
"an error occured after {} while creating backup for layer '{}'",
duration_str, layer_name
))?;
}
self.custom("say server backed up successfully")?;
res
}
/// Create a new compressed backup archive of the server's data.
fn create_backup_archive(&mut self) -> std::io::Result<()> {
// Create a gzip-compressed tarball of the worlds folder
let filename = format!(
"{}",
chrono::offset::Local::now().format("%Y-%m-%d_%H-%M-%S.tar.gz")
);
let path = self.backup_dir.join(filename);
let tar_gz = std::fs::File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut tar = tar::Builder::new(enc);
tar.append_dir_all("worlds", &self.world_dir)?;
// We don't store all files in the config, as this would include caches
tar.append_path_with_name(
self.config_dir.join("server.properties"),
"config/server.properties",
)?;
// We add a file to the backup describing for what version it was made
let info = format!("{} {}", self.type_, self.version);
let info_bytes = info.as_bytes();
let mut header = tar::Header::new_gnu();
header.set_size(info_bytes.len().try_into().unwrap());
header.set_mode(0o100644);
unsafe {
header.set_gid(getegid().into());
header.set_uid(geteuid().into());
}
tar.append_data(&mut header, "info.txt", info_bytes)?;
// tar.append_dir_all("config", &self.config_dir)?;
// Backup file gets finalized in the drop
Ok(())
}
/// Remove the oldest backups
fn remove_old_backups(&mut self) -> std::io::Result<()> {
// The naming format used allows us to sort the backups by name and still get a sorting by
// creation time
let mut backups = std::fs::read_dir(&self.backup_dir)?
.filter_map(|res| res.map(|e| e.path()).ok())
.collect::<Vec<PathBuf>>();
backups.sort();
let max_backups: usize = self.max_backups.try_into().unwrap();
if backups.len() > max_backups {
let excess_backups = backups.len() - max_backups;
for backup in &backups[0..excess_backups] {
std::fs::remove_file(backup)?;
}
}
Ok(())
}
}

View File

@ -1,13 +1,10 @@
use std::{
io,
sync::{atomic::AtomicBool, Arc},
};
use std::io;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicBool, Ordering};
use signal_hook::{
consts::TERM_SIGNALS,
flag,
iterator::{Signals, SignalsInfo},
};
use signal_hook::consts::TERM_SIGNALS;
use signal_hook::flag;
use signal_hook::iterator::{Signals, SignalsInfo};
use crate::server;
@ -19,15 +16,12 @@ pub fn install_signal_handlers() -> io::Result<(Arc<AtomicBool>, SignalsInfo)> {
// atomic bool. With this, the process will get killed immediately once it receives a second
// termination signal (e.g. a double ctrl-c).
// https://docs.rs/signal-hook/0.3.15/signal_hook/#a-complex-signal-handling-with-a-background-thread
for sig in TERM_SIGNALS {
// When terminated by a second term signal, exit with exit code 1.
// This will do nothing the first time (because term_now is false).
flag::register_conditional_shutdown(*sig, 1, Arc::clone(&term))?;
// But this will "arm" the above for the second time, by setting it to true.
// The order of registering these is important, if you put this one first, it will
// first arm and then terminate all in the first round.
flag::register(*sig, Arc::clone(&term))?;
}
// for sig in TERM_SIGNALS {
// // But this will "arm" the above for the second time, by setting it to true.
// // The order of registering these is important, if you put this one first, it will
// // first arm and then terminate all in the first round.
// flag::register(*sig, Arc::clone(&term))?;
// }
let signals = TERM_SIGNALS;
@ -35,10 +29,7 @@ pub fn install_signal_handlers() -> io::Result<(Arc<AtomicBool>, SignalsInfo)> {
}
/// Loop that handles terminating signals as they come in.
pub fn handle_signals(
signals: &mut SignalsInfo,
server: Arc<server::ServerProcess>,
) -> io::Result<()> {
pub fn handle_signals(signals: &mut SignalsInfo, term: Arc<AtomicBool>, server: Arc<server::ServerProcess>) -> io::Result<()> {
let mut force = false;
// We only register terminating signals, so we don't need to differentiate between what kind of
@ -46,16 +37,13 @@ pub fn handle_signals(
for _ in signals {
// If term is already true, this is the second signal, meaning we kill the process
// immediately.
// This will currently not work, as the initial stop command will block the kill from
// happening.
if force {
return server.kill();
return server.kill()
}
// The stop command runs in a separate thread to avoid blocking the signal handling loop.
// After stopping the server, the thread terminates the process.
else {
let clone = Arc::clone(&server);
std::thread::spawn(move || {
let _ = clone.stop();
std::process::exit(0);

View File

@ -1,8 +1,9 @@
use std::{io, sync::Arc};
use std::sync::{Arc, Mutex};
use std::io;
use crate::server;
pub fn handle_stdin(server: Arc<server::ServerProcess>) {
pub fn handle_stdin(counter: Arc<Mutex<server::ServerProcess>>) {
let stdin = io::stdin();
let input = &mut String::new();
@ -13,9 +14,13 @@ pub fn handle_stdin(server: Arc<server::ServerProcess>) {
continue;
};
{
let mut server = counter.lock().unwrap();
if let Err(e) = server.send_command(input) {
println!("{}", e);
};
}
if input.trim() == "stop" {
std::process::exit(0);