Compare commits

...

39 Commits

Author SHA1 Message Date
Jef Roosens 75e9d7a9d2
chore: bumb version to 0.3.0
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/tag/lint Pipeline was successful Details
ci/woodpecker/tag/clippy Pipeline was successful Details
ci/woodpecker/push/release Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/tag/build Pipeline was successful Details
ci/woodpecker/tag/release Pipeline was successful Details
2023-07-04 15:55:52 +02:00
Jef Roosens e6fa8a0eeb
docs: add a few docstrings 2023-07-04 15:49:16 +02:00
Jef Roosens 55c5f24937
feat: specify output dirs for restore instead of using config & world
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-07-04 15:36:12 +02:00
Jef Roosens 36c441b8c2
feat: respect backup list filter option 2023-07-04 14:15:00 +02:00
Jef Roosens f71db90922
feat: store end time as metadata
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-07-03 12:59:50 +02:00
Jef Roosens 2c256cf904
refactor: quick maths 2023-07-03 12:40:40 +02:00
Jef Roosens bfd278abbe
feat: show backup sizes in list command
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-07-03 12:11:41 +02:00
Jef Roosens c5193f0f3c
feat: store backup sizes in metadata file 2023-07-03 11:54:11 +02:00
Jef Roosens a4a03ca4c5
feat: improve list view
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-24 13:51:37 +02:00
Jef Roosens 5159bfdddd
feat: basic list command 2023-06-24 13:35:59 +02:00
Jef Roosens 0eda768c03
chore: update readme
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-24 12:16:53 +02:00
Jef Roosens a4e2a1276f
feat: restore backup chains using cli commands
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-23 22:47:38 +02:00
Jef Roosens e373fc85f1
feat: create backups from cli for specific layer 2023-06-23 18:02:38 +02:00
Jef Roosens 1cfe13674d
refactor: structure code to allow expanding cli functionality
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-23 15:51:17 +02:00
Jef Roosens d5cea49c8b
feat: further generalize backup code 2023-06-23 15:00:39 +02:00
Jef Roosens 03e21fda87
feat: show message describing what layer is backing up
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-23 13:01:19 +02:00
Jef Roosens 29636ffcdb
feat: implement backup layers using meta manager 2023-06-23 12:30:10 +02:00
Jef Roosens a236c36a4f
feat: take backup layers as arguments 2023-06-23 10:53:17 +02:00
Jef Roosens 0a459ee30b
refactor: let backup manager calculate next backup time
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-22 21:15:40 +02:00
Jef Roosens 4e8d0a8d25
refactor: we go rusty 2023-06-22 20:23:13 +02:00
Jef Roosens 188fb30343
fix: better serde bounds
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-22 20:10:37 +02:00
Jef Roosens 53dc3783ca
feat: store server info in metadata file; change cli flags
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-20 19:31:50 +02:00
Jef Roosens ef631fab1d
refactor: separate backup logic into own module
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-19 14:04:38 +02:00
Jef Roosens 74a0b91fd1
refactor: remove open function
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-18 23:33:56 +02:00
Jef Roosens b48c531d80
feat: configurable parameters for incremental backups
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-18 22:48:11 +02:00
Jef Roosens b51d951688
feat: re-implement remove old backups
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-18 21:56:43 +02:00
Jef Roosens bb7b57899b
refactor: store backups in nested vecs instead; introduce concept of
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
chains
2023-06-18 21:15:05 +02:00
Jef Roosens f7235fb342
refactor: move iterating over files to Path extension trait
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-17 12:08:46 +02:00
Jef Roosens 5275356353
feat: added backup cli command
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-16 17:23:36 +02:00
Jef Roosens 27d7e681c3
feat: temporarily disable "remove old backups"
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-15 22:54:17 +02:00
Jef Roosens 8add96b39b
feat: persistently store backup state
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-15 20:38:52 +02:00
Jef Roosens d204c68400
fix: actually working incremental backup
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
2023-06-15 09:56:40 +02:00
Jef Roosens a9e7b215d1
feat: move running server to subcommand 2023-06-14 22:17:53 +02:00
Jef Roosens fcc111b4ef
feat: possible incremental backup implementation using new abstraction 2023-06-14 21:47:59 +02:00
Jef Roosens b7a678e32f
feat: lots of backup stuff 2023-06-13 17:43:47 +02:00
Jef Roosens 703a25e8be
refactor: use utc time 2023-06-13 15:12:30 +02:00
Jef Roosens 29d6713486
feat: implement own listing of files 2023-06-13 15:12:30 +02:00
Jef Roosens 4958257f6e
refactor: move backup logic to separate module 2023-06-13 15:12:30 +02:00
Jef Roosens 90aa929b73
feat: show backup time in message 2023-06-13 15:12:26 +02:00
21 changed files with 1576 additions and 273 deletions

View File

@ -1,3 +1,3 @@
[alias]
runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar"
runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar"
runs = "run -- --config data/config --backup data/backups --world data/worlds --layers 2min,2,4,4;3min,3,2,2"
runrs = "run --release -- --config data/config --backup data/backups --world data/worlds --layers 2min,2,4,4;3min,3,2,2"

2
.gitignore vendored
View File

@ -19,4 +19,4 @@ target/
# testing files
*.jar
data/
data*/

View File

@ -7,6 +7,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/Chewing_Bever/alex/src/branch/dev)
## [0.3.0](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.3.0)
### Added
* Incremental backups
* Chain length describes how many incremental backups to create from the
same full backup
* "backups to keep" has been replaced by "chains to keep"
* Server type & version and backup size are now stored as metadata in the
metadata file
* Backup layers
* Store multiple chains of backups in parallel, configuring each with
different parameters (son-father-grandfather principle)
* CLI commands for creating, restoring & listing backups
### Changed
* Running the server now uses the `run` CLI subcommand
* `server_type` and `server_version` arguments are now optional flags
### Removed
* `max_backups` setting
## [0.2.2](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.2)
### Fixed

48
Cargo.lock generated
View File

@ -10,11 +10,13 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "alex"
version = "0.2.2"
version = "0.3.0"
dependencies = [
"chrono",
"clap",
"flate2",
"serde",
"serde_json",
"signal-hook",
"tar",
]
@ -123,6 +125,7 @@ dependencies = [
"iana-time-zone",
"js-sys",
"num-traits",
"serde",
"time",
"wasm-bindgen",
"winapi",
@ -292,6 +295,12 @@ dependencies = [
"windows-sys",
]
[[package]]
name = "itoa"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
[[package]]
name = "js-sys"
version = "0.3.63"
@ -384,6 +393,43 @@ dependencies = [
"windows-sys",
]
[[package]]
name = "ryu"
version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
[[package]]
name = "serde"
version = "1.0.164"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.164"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "signal-hook"
version = "0.3.15"

View File

@ -1,6 +1,6 @@
[package]
name = "alex"
version = "0.2.2"
version = "0.3.0"
description = "Wrapper around Minecraft server processes, designed to complement Docker image installations."
authors = ["Jef Roosens"]
edition = "2021"
@ -12,10 +12,11 @@ edition = "2021"
tar = "0.4.38"
# Used to compress said tarballs using gzip
flate2 = "1.0.26"
# Used for backup filenames
chrono = "0.4.26"
chrono = { version = "0.4.26", features = ["serde"] }
clap = { version = "4.3.1", features = ["derive", "env"] }
signal-hook = "0.3.15"
serde = { version = "1.0.164", features = ["derive", "rc"] }
serde_json = "1.0.96"
[profile.release]
lto = "fat"

View File

@ -47,13 +47,15 @@ COPY --from=builder /app/target/debug/alex /bin/alex
RUN chmod +x /bin/alex
# Default value to keep users from eating up all ram accidentally
ENV ALEX_XMS=1024 \
ENV ALEX_CONFIG_DIR=/app/config \
ALEX_WORLD_DIR=/app/worlds \
ALEX_BACKUP_DIR=/app/backups \
ALEX_SERVER=paper \
ALEX_XMS=1024 \
ALEX_XMX=2048 \
ALEX_JAR=/app/server.jar \
ALEX_CONFIG_DIR=/app/config \
ALEX_WORLD_DIR=/app/worlds \
ALEX_BACKUPS_DIR=/app/backups \
ALEX_SERVER_VERSION="${MC_VERSION}-${PAPERMC_VERSION}"
ALEX_SERVER_VERSION="${MC_VERSION}-${PAPERMC_VERSION}" \
ALEX_LAYERS="2min,2,4,4;3min,3,2,2"
# Document exposed ports
EXPOSE 25565
@ -62,4 +64,4 @@ EXPOSE 25565
USER paper:paper
ENTRYPOINT ["/bin/dumb-init", "--"]
CMD ["/bin/alex", "paper"]
CMD ["/bin/alex", "run"]

105
README.md
View File

@ -2,7 +2,24 @@
Alex is a wrapper around a typical Minecraft server process. It acts as the
parent process, and sits in between the user's input and the server's stdin.
This allows Alex to support additional commands that execute Rust code.
This allows Alex to support additional commands that execute Rust code, notably
creating periodic backups.
## Installation
Alex is distributed as statically compiled binaries for Linux amd64 and arm64.
These can be found
[here](https://git.rustybever.be/Chewing_Bever/alex/packages).
### Dockerfiles
You can easily install alex in your Docker images by letting Docker download it
for you. Add the following to your Dockerfile (replace with your required
version & architecture):
```dockerfile
ADD "https://git.rustybever.be/api/packages/Chewing_Bever/generic/alex/0.2.2/alex-linux-amd64" /bin/alex
```
## Why
@ -19,8 +36,86 @@ Afterwards, saving is enabled again with `save-on`.
* Properly configures the process (working directory, optimisation flags)
* Configure everything as CLI arguments or environment variables
## Installation
## Configuration
Alex is distributed as statically compiled binaries for Linux amd64 and arm64.
These can be found
[here](https://git.rustybever.be/Chewing_Bever/alex/packages).
Most information can be retrieved easily by looking at the help command:
```
Wrapper around Minecraft server processes, designed to complement Docker image installations.
Usage: alex [OPTIONS] <COMMAND>
Commands:
run Run the server
backup Interact with the backup system without starting a server
help Print this message or the help of the given subcommand(s)
Options:
--config <CONFIG_DIR>
Directory where configs are stored, and where the server will run [env: ALEX_CONFIG_DIR=] [default: .]
--world <WORLD_DIR>
Directory where world files will be saved [env: ALEX_WORLD_DIR=] [default: ../worlds]
--backup <BACKUP_DIR>
Directory where backups will be stored [env: ALEX_BACKUP_DIR=] [default: ../backups]
--layers <LAYERS>
What backup layers to employ, provided as a list of tuples name,frequency,chains,chain_len delimited by semicolons (;) [env: ALEX_LAYERS=]
--server <SERVER>
Type of server [env: ALEX_SERVER=] [default: unknown] [possible values: unknown, paper, forge, vanilla]
--server-version <SERVER_VERSION>
Version string for the server, e.g. 1.19.4-545 [env: ALEX_SERVER_VERSION=] [default: ]
-h, --help
Print help
-V, --version
Print version
```
### Choosing layer parameters
One part of the configuration that does require some clarification is the layer
system. Alex can manage an arbitrary number of backup layers, each having its
own configuration. These layers can either use incremental or full backups,
depending on how they're configured.
These layers mostly correspond to the grandfather-father-son backup rotation
scheme. For example, one could have a layer that creates incremental backups
every 30 minutes, which are stored for 24 hours. This gives you 24 hours of
granular rollback in case your server suffers a crash. A second layer might
create a full backup every 24 hours, with backups being stored for 7 days. This
gives you 7 days worth of backups with the granularity of 24 hours. This
approach allows for greater versatility, while not having to store a large
amount of data. Thanks to incremental backups, frequent backups don't have to
take long at all.
A layer consists of 4 pieces of metadata:
* A name, which will be used in the file system and the in-game notifications
* The frequency, which describes in minutes how frequently a backup should be
created
* How many chains should be kept at all times
* How long each chain should be
These last two require some clarification. In Alex, a "chain" describes an
initial full backup and zero or more incremental backups that are created from
that initial full backup. This concept exists because an incremental backup has
no real meaning if its ancestors are not known. To restore one of these chains,
all backups in the chain need to be restored in-order. Note that a chain length
of 1 disables incremental backups entirely.
How many backups to keep is defined by how many chains should be stored.
Because an incremental backup needs to have its ancestors in order to be
restored, we can't simply "keep the last n backups", as this would break these
chains. Therefore, you configure how many backups to store using these chains.
For example, if you configure a layer to store 5 chains of length 4, you will
have 20 archive files on disk, namely 5 full backups and 15 incremental
backups. Note that Alex applies these rules to *full* chains. An in-progress
chain does not count towards this total. Therefore, you can have up to `n-1`
additional archive files, with `n` being the chain length, on disk.
To look at it from another perspective, say we wish to have a granularity of 30
minutes for a timespan of 24 hours. Then we could configure the layer to only
save a single chain, with a chain length of 48. If we prefer to have a few full
backups instead of a long chain of incremental backups, we could instead use a
chain length of 12 and store 4 chains. Either way, the total comes out to 48,
which spans 24 hours if we make a backup every 30 minutes.

View File

@ -0,0 +1,86 @@
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::path::PathBuf;
/// Represents the changes relative to the previous backup
#[derive(Debug, Serialize, Deserialize)]
pub struct Delta {
/// What files were added/modified in each part of the tarball.
pub added: HashMap<PathBuf, HashSet<PathBuf>>,
/// What files were removed in this backup, in comparison to the previous backup. For full
/// backups, this will always be empty, as they do not consider previous backups.
/// The map stores a separate list for each top-level directory, as the contents of these
/// directories can come for different source directories.
pub removed: HashMap<PathBuf, HashSet<PathBuf>>,
}
impl Delta {
pub fn new() -> Self {
Self {
added: HashMap::new(),
removed: HashMap::new(),
}
}
/// Update the current state so that its result becomes the merge of itself and the other
/// state.
#[allow(dead_code)]
pub fn merge(&mut self, delta: &Self) {
for (dir, added) in delta.added.iter() {
// Files that were removed in the current state, but added in the new state, are no
// longer removed
if let Some(orig_removed) = self.removed.get_mut(dir) {
orig_removed.retain(|k| !added.contains(k));
}
// Newly added files are added to the state as well
if let Some(orig_added) = self.added.get_mut(dir) {
orig_added.extend(added.iter().cloned());
} else {
self.added.insert(dir.clone(), added.clone());
}
}
for (dir, removed) in delta.removed.iter() {
// Files that were originally added, but now deleted are removed from the added list
if let Some(orig_added) = self.added.get_mut(dir) {
orig_added.retain(|k| !removed.contains(k));
}
// Newly removed files are added to the state as well
if let Some(orig_removed) = self.removed.get_mut(dir) {
orig_removed.extend(removed.iter().cloned());
} else {
self.removed.insert(dir.clone(), removed.clone());
}
}
}
/// Modify the given state by applying this delta's changes to it
pub fn apply(&self, state: &mut HashMap<PathBuf, HashSet<PathBuf>>) {
// First we add new files, then we remove the old ones
for (dir, added) in self.added.iter() {
if let Some(current) = state.get_mut(dir) {
current.extend(added.iter().cloned());
} else {
state.insert(dir.clone(), added.clone());
}
}
for (dir, removed) in self.removed.iter() {
if let Some(current) = state.get_mut(dir) {
current.retain(|k| !removed.contains(k));
}
}
}
}
impl fmt::Display for Delta {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let added_count: usize = self.added.values().map(|s| s.len()).sum();
let removed_count: usize = self.removed.values().map(|s| s.len()).sum();
write!(f, "+{}-{}", added_count, removed_count)
}
}

View File

@ -0,0 +1,43 @@
use std::io::{self, Write};
/// Wrapper around the Write trait that counts how many bytes have been written in total.
/// Heavily inspired by https://stackoverflow.com/a/42189386
pub struct CountingWrite<W> {
inner: W,
count: usize,
}
impl<W> CountingWrite<W>
where
W: Write,
{
pub fn new(writer: W) -> Self {
Self {
inner: writer,
count: 0,
}
}
pub fn bytes_written(&self) -> usize {
self.count
}
}
impl<W> Write for CountingWrite<W>
where
W: Write,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let res = self.inner.write(buf);
if let Ok(count) = res {
self.count += count;
}
res
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}

View File

@ -0,0 +1,46 @@
use std::error::Error;
use std::fmt;
use std::str::FromStr;
#[derive(Clone, Debug)]
pub struct ManagerConfig {
pub name: String,
pub frequency: u32,
pub chains: u64,
pub chain_len: u64,
}
#[derive(Debug)]
pub struct ParseManagerConfigErr;
impl Error for ParseManagerConfigErr {}
impl fmt::Display for ParseManagerConfigErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "parse manager config err")
}
}
impl FromStr for ManagerConfig {
type Err = ParseManagerConfigErr;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let splits: Vec<&str> = s.split(',').collect();
if let [name, frequency, chains, chain_len] = splits[..] {
let name: String = name.parse().map_err(|_| ParseManagerConfigErr)?;
let frequency: u32 = frequency.parse().map_err(|_| ParseManagerConfigErr)?;
let chains: u64 = chains.parse().map_err(|_| ParseManagerConfigErr)?;
let chain_len: u64 = chain_len.parse().map_err(|_| ParseManagerConfigErr)?;
Ok(ManagerConfig {
name,
chains,
chain_len,
frequency,
})
} else {
Err(ParseManagerConfigErr)
}
}
}

View File

@ -0,0 +1,135 @@
use super::{Manager, ManagerConfig};
use chrono::Utc;
use serde::Deserialize;
use serde::Serialize;
use std::collections::HashMap;
use std::io;
use std::path::PathBuf;
/// Manages a collection of backup layers, allowing them to be utilized as a single object.
pub struct MetaManager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de> + std::fmt::Debug,
{
backup_dir: PathBuf,
dirs: Vec<(PathBuf, PathBuf)>,
default_metadata: T,
managers: HashMap<String, Manager<T>>,
}
impl<T> MetaManager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de> + std::fmt::Debug,
{
pub fn new<P: Into<PathBuf>>(
backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>,
default_metadata: T,
) -> Self {
MetaManager {
backup_dir: backup_dir.into(),
dirs,
default_metadata,
managers: HashMap::new(),
}
}
/// Add a new manager to track, initializing it first.
pub fn add(&mut self, config: &ManagerConfig) -> io::Result<()> {
// Backup dir itself should exist, but we control its contents, so we can create
// separate directories for each layer
let path = self.backup_dir.join(&config.name);
// If the directory already exists, that's okay
match std::fs::create_dir(&path) {
Ok(()) => (),
Err(e) => match e.kind() {
io::ErrorKind::AlreadyExists => (),
_ => return Err(e),
},
};
let mut manager = Manager::new(
path,
self.dirs.clone(),
self.default_metadata.clone(),
config.chain_len,
config.chains,
chrono::Duration::minutes(config.frequency.into()),
);
manager.load()?;
self.managers.insert(config.name.clone(), manager);
Ok(())
}
/// Convenient wrapper for `add`.
pub fn add_all(&mut self, configs: &Vec<ManagerConfig>) -> io::Result<()> {
for config in configs {
self.add(config)?;
}
Ok(())
}
/// Return the name of the next scheduled layer, if one or more managers are present.
pub fn next_scheduled_layer(&self) -> Option<&str> {
self.managers
.iter()
.min_by_key(|(_, m)| m.next_scheduled_time())
.map(|(k, _)| k.as_str())
}
/// Return the earliest scheduled time for the underlying managers.
pub fn next_scheduled_time(&self) -> Option<chrono::DateTime<Utc>> {
self.managers
.values()
.map(|m| m.next_scheduled_time())
.min()
}
/// Perform a backup cycle for the earliest scheduled manager.
pub fn perform_backup_cycle(&mut self) -> io::Result<()> {
if let Some(manager) = self
.managers
.values_mut()
.min_by_key(|m| m.next_scheduled_time())
{
manager.create_backup()?;
manager.remove_old_backups()
} else {
Ok(())
}
}
/// Create a manual backup for a specific layer
pub fn create_backup(&mut self, layer: &str) -> Option<io::Result<()>> {
if let Some(manager) = self.managers.get_mut(layer) {
let mut res = manager.create_backup();
if res.is_ok() {
res = manager.remove_old_backups();
}
Some(res)
} else {
None
}
}
/// Restore a backup for a specific layer
pub fn restore_backup(
&self,
layer: &str,
start_time: chrono::DateTime<Utc>,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> Option<io::Result<()>> {
self.managers
.get(layer)
.map(|manager| manager.restore_backup(start_time, dirs))
}
pub fn managers(&self) -> &HashMap<String, Manager<T>> {
&self.managers
}
}

View File

@ -0,0 +1,194 @@
mod config;
mod meta;
pub use config::ManagerConfig;
pub use meta::MetaManager;
use super::Backup;
use crate::other;
use chrono::SubsecRound;
use chrono::Utc;
use serde::Deserialize;
use serde::Serialize;
use std::fs::File;
use std::io;
use std::path::PathBuf;
/// Manages a single backup layer consisting of one or more chains of backups.
pub struct Manager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de> + std::fmt::Debug,
{
backup_dir: PathBuf,
dirs: Vec<(PathBuf, PathBuf)>,
default_metadata: T,
chain_len: u64,
chains_to_keep: u64,
frequency: chrono::Duration,
chains: Vec<Vec<Backup<T>>>,
}
impl<T> Manager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de> + std::fmt::Debug,
{
const METADATA_FILE: &str = "alex.json";
pub fn new<P: Into<PathBuf>>(
backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>,
metadata: T,
chain_len: u64,
chains_to_keep: u64,
frequency: chrono::Duration,
) -> Self {
Self {
backup_dir: backup_dir.into(),
dirs,
default_metadata: metadata,
chain_len,
chains_to_keep,
frequency,
chains: Vec::new(),
}
}
/// Create a new backup, either full or incremental, depending on the state of the current
/// chain.
pub fn create_backup(&mut self) -> io::Result<()> {
// We start a new chain if the current chain is complete, or if there isn't a first chain
// yet
if let Some(current_chain) = self.chains.last() {
let current_chain_len: u64 = current_chain.len().try_into().unwrap();
if current_chain_len >= self.chain_len {
self.chains.push(Vec::new());
}
} else {
self.chains.push(Vec::new());
}
let current_chain = self.chains.last_mut().unwrap();
let mut backup = if !current_chain.is_empty() {
let previous_backup = current_chain.last().unwrap();
let state = Backup::state(current_chain);
Backup::create_from(
state,
previous_backup.start_time,
&self.backup_dir,
&self.dirs,
)?
} else {
Backup::create(&self.backup_dir, &self.dirs)?
};
backup.set_metadata(self.default_metadata.clone());
current_chain.push(backup);
self.save()?;
Ok(())
}
/// Delete all backups associated with outdated chains, and forget those chains.
pub fn remove_old_backups(&mut self) -> io::Result<()> {
let chains_to_store: usize = self.chains_to_keep.try_into().unwrap();
if chains_to_store < self.chains.len() {
let mut remove_count: usize = self.chains.len() - chains_to_store;
// We only count finished chains towards the list of stored chains
let chain_len: usize = self.chain_len.try_into().unwrap();
if self.chains.last().unwrap().len() < chain_len {
remove_count -= 1;
}
for chain in self.chains.drain(..remove_count) {
for backup in chain {
let path = Backup::path(&self.backup_dir, backup.start_time);
std::fs::remove_file(path)?;
}
}
self.save()?;
}
Ok(())
}
/// Write the in-memory state to disk.
pub fn save(&self) -> io::Result<()> {
let json_file = File::create(self.backup_dir.join(Self::METADATA_FILE))?;
serde_json::to_writer(json_file, &self.chains)?;
Ok(())
}
/// Overwrite the in-memory state with the on-disk state.
pub fn load(&mut self) -> io::Result<()> {
let json_file = match File::open(self.backup_dir.join(Self::METADATA_FILE)) {
Ok(f) => f,
Err(e) => {
// Don't error out if the file isn't there, it will be created when necessary
if e.kind() == io::ErrorKind::NotFound {
self.chains = Vec::new();
return Ok(());
} else {
return Err(e);
}
}
};
self.chains = serde_json::from_reader(json_file)?;
Ok(())
}
/// Calculate the next time a backup should be created. If no backup has been created yet, it
/// will return now.
pub fn next_scheduled_time(&self) -> chrono::DateTime<Utc> {
if let Some(last_chain) = self.chains.last() {
if let Some(last_backup) = last_chain.last() {
return last_backup.start_time + self.frequency;
}
}
chrono::offset::Utc::now()
}
/// Restore the backup with the given start time by restoring its chain up to and including the
/// backup, in order.
pub fn restore_backup(
&self,
start_time: chrono::DateTime<Utc>,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> io::Result<()> {
// Iterate over each chain, skipping elements until the element with the given start time
// is possibly found.
for chain in &self.chains {
// If we find the element in the chain, restore the entire chain up to and including
// the element
if let Some(index) = chain
.iter()
.position(|b| b.start_time.trunc_subsecs(0) == start_time)
{
for backup in chain.iter().take(index + 1) {
backup.restore(&self.backup_dir, dirs)?;
}
return Ok(());
}
}
Err(other("Unknown backup."))
}
/// Get a reference to the underlying chains
pub fn chains(&self) -> &Vec<Vec<Backup<T>>> {
&self.chains
}
}

278
src/backup/mod.rs 100644
View File

@ -0,0 +1,278 @@
mod delta;
mod io_ext;
pub mod manager;
mod path;
use delta::Delta;
pub use manager::Manager;
pub use manager::ManagerConfig;
pub use manager::MetaManager;
use chrono::Utc;
use flate2::read::GzDecoder;
use flate2::write::GzEncoder;
use flate2::Compression;
use path::PathExt;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
const BYTE_SUFFIXES: [&str; 5] = ["B", "KiB", "MiB", "GiB", "TiB"];
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub enum BackupType {
Full,
Incremental,
}
/// Represents a successful backup
#[derive(Serialize, Deserialize, Debug)]
pub struct Backup<T: Clone> {
/// When the backup was started (also corresponds to the name)
pub start_time: chrono::DateTime<Utc>,
/// When the backup finished
pub end_time: chrono::DateTime<Utc>,
pub size: usize,
/// Type of the backup
pub type_: BackupType,
pub delta: Delta,
/// Additional metadata that can be associated with a given backup
pub metadata: Option<T>,
}
impl Backup<()> {
pub const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
/// Return the path to a backup file by properly formatting the data.
pub fn path<P: AsRef<Path>>(backup_dir: P, start_time: chrono::DateTime<Utc>) -> PathBuf {
let backup_dir = backup_dir.as_ref();
let filename = format!("{}", start_time.format(Self::FILENAME_FORMAT));
backup_dir.join(filename)
}
}
impl<T: Clone> Backup<T> {
/// Set the backup's metadata.
pub fn set_metadata(&mut self, metadata: T) {
self.metadata = Some(metadata);
}
/// Resolve the state of the list of backups by applying their deltas in-order to an initially
/// empty state.
pub fn state(backups: &Vec<Self>) -> HashMap<PathBuf, HashSet<PathBuf>> {
let mut state: HashMap<PathBuf, HashSet<PathBuf>> = HashMap::new();
for backup in backups {
backup.delta.apply(&mut state);
}
state
}
/// Create a new Full backup, populated with the given directories.
///
/// # Arguments
///
/// * `backup_dir` - Directory to store archive in
/// * `dirs` - list of tuples `(path_in_tar, src_dir)` with `path_in_tar` the directory name
/// under which `src_dir`'s contents should be stored in the archive
///
/// # Returns
///
/// The `Backup` instance describing this new backup.
pub fn create<P: AsRef<Path>>(
backup_dir: P,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> {
let start_time = chrono::offset::Utc::now();
let path = Backup::path(backup_dir, start_time);
let tar_gz = io_ext::CountingWrite::new(File::create(path)?);
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
let mut delta = Delta::new();
for (dir_in_tar, src_dir) in dirs {
let mut added_files: HashSet<PathBuf> = HashSet::new();
for entry in src_dir.read_dir_recursive()?.ignored("cache").files() {
let path = entry?.path();
let stripped = path.strip_prefix(src_dir).unwrap();
ar.append_path_with_name(&path, dir_in_tar.join(stripped))?;
added_files.insert(stripped.to_path_buf());
}
delta.added.insert(dir_in_tar.to_path_buf(), added_files);
}
let mut enc = ar.into_inner()?;
// The docs recommend running try_finish before unwrapping using finish
enc.try_finish()?;
let tar_gz = enc.finish()?;
Ok(Backup {
type_: BackupType::Full,
start_time,
end_time: chrono::Utc::now(),
size: tar_gz.bytes_written(),
delta,
metadata: None,
})
}
/// Create a new Incremental backup from the given state, populated with the given directories.
///
/// # Arguments
///
/// * `previous_state` - State the file system was in during the previous backup in the chain
/// * `previous_start_time` - Start time of the previous backup; used to filter files
/// * `backup_dir` - Directory to store archive in
/// * `dirs` - list of tuples `(path_in_tar, src_dir)` with `path_in_tar` the directory name
/// under which `src_dir`'s contents should be stored in the archive
///
/// # Returns
///
/// The `Backup` instance describing this new backup.
pub fn create_from<P: AsRef<Path>>(
previous_state: HashMap<PathBuf, HashSet<PathBuf>>,
previous_start_time: chrono::DateTime<Utc>,
backup_dir: P,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> {
let start_time = chrono::offset::Utc::now();
let path = Backup::path(backup_dir, start_time);
let tar_gz = io_ext::CountingWrite::new(File::create(path)?);
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
let mut delta = Delta::new();
for (dir_in_tar, src_dir) in dirs {
let mut all_files: HashSet<PathBuf> = HashSet::new();
let mut added_files: HashSet<PathBuf> = HashSet::new();
for entry in src_dir.read_dir_recursive()?.ignored("cache").files() {
let path = entry?.path();
let stripped = path.strip_prefix(src_dir).unwrap();
if !path.not_modified_since(previous_start_time) {
ar.append_path_with_name(&path, dir_in_tar.join(stripped))?;
added_files.insert(stripped.to_path_buf());
}
all_files.insert(stripped.to_path_buf());
}
delta.added.insert(dir_in_tar.clone(), added_files);
if let Some(previous_files) = previous_state.get(dir_in_tar) {
delta.removed.insert(
dir_in_tar.to_path_buf(),
previous_files.difference(&all_files).cloned().collect(),
);
}
}
let mut enc = ar.into_inner()?;
// The docs recommend running try_finish before unwrapping using finish
enc.try_finish()?;
let tar_gz = enc.finish()?;
Ok(Backup {
type_: BackupType::Incremental,
start_time,
end_time: chrono::Utc::now(),
size: tar_gz.bytes_written(),
delta,
metadata: None,
})
}
/// Restore the backup by extracting its contents to the respective directories.
///
/// # Arguments
///
/// * `backup_dir` - Backup directory where the file is stored
/// * `dirs` - list of tuples `(path_in_tar, dst_dir)` with `dst_dir` the directory on-disk
/// where the files stored under `path_in_tar` inside the tarball should be extracted to.
pub fn restore<P: AsRef<Path>>(
&self,
backup_dir: P,
dirs: &Vec<(PathBuf, PathBuf)>,
) -> io::Result<()> {
let path = Backup::path(backup_dir, self.start_time);
let tar_gz = File::open(path)?;
let enc = GzDecoder::new(tar_gz);
let mut ar = tar::Archive::new(enc);
// Unpack each file by matching it with one of the destination directories and extracting
// it to the right path
for entry in ar.entries()? {
let mut entry = entry?;
let entry_path_in_tar = entry.path()?.to_path_buf();
for (path_in_tar, dst_dir) in dirs {
if entry_path_in_tar.starts_with(path_in_tar) {
let dst_path =
dst_dir.join(entry_path_in_tar.strip_prefix(path_in_tar).unwrap());
// Ensure all parent directories are present
std::fs::create_dir_all(dst_path.parent().unwrap())?;
entry.unpack(dst_path)?;
break;
}
}
}
// Remove any files
for (path_in_tar, dst_dir) in dirs {
if let Some(removed) = self.delta.removed.get(path_in_tar) {
for path in removed {
let dst_path = dst_dir.join(path);
std::fs::remove_file(dst_path)?;
}
}
}
Ok(())
}
}
impl<T: Clone> fmt::Display for Backup<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let letter = match self.type_ {
BackupType::Full => 'F',
BackupType::Incremental => 'I',
};
// Pretty-print size
// If your backup is a petabyte or larger, this will crash and you need to re-evaluate your
// life choices
let index = self.size.ilog(1024) as usize;
let size = self.size as f64 / (1024.0_f64.powi(index as i32));
let duration = self.end_time - self.start_time;
write!(
f,
"{} ({}, {}m{}s, {:.2}{}, {})",
self.start_time.format(Backup::FILENAME_FORMAT),
letter,
duration.num_seconds() / 60,
duration.num_seconds() % 60,
size,
BYTE_SUFFIXES[index],
self.delta
)
}
}

147
src/backup/path.rs 100644
View File

@ -0,0 +1,147 @@
use chrono::{Local, Utc};
use std::collections::HashSet;
use std::ffi::OsString;
use std::fs::DirEntry;
use std::path::{Path, PathBuf};
use std::{fs, io};
pub struct ReadDirRecursive {
ignored: HashSet<OsString>,
read_dir: fs::ReadDir,
dir_stack: Vec<PathBuf>,
files_only: bool,
}
impl ReadDirRecursive {
/// Start the iterator for a new directory
pub fn start<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let path = path.as_ref();
let read_dir = path.read_dir()?;
Ok(ReadDirRecursive {
ignored: HashSet::new(),
read_dir,
dir_stack: Vec::new(),
files_only: false,
})
}
pub fn ignored<S: Into<OsString>>(mut self, s: S) -> Self {
self.ignored.insert(s.into());
self
}
pub fn files(mut self) -> Self {
self.files_only = true;
self
}
/// Tries to populate the `read_dir` field with a new `ReadDir` instance to consume.
fn next_read_dir(&mut self) -> io::Result<bool> {
if let Some(path) = self.dir_stack.pop() {
self.read_dir = path.read_dir()?;
Ok(true)
} else {
Ok(false)
}
}
/// Convenience method to add a new directory to the stack.
fn push_entry(&mut self, entry: &io::Result<DirEntry>) {
if let Ok(entry) = entry {
if entry.path().is_dir() {
self.dir_stack.push(entry.path());
}
}
}
/// Determine whether an entry should be returned by the iterator.
fn should_return(&self, entry: &io::Result<DirEntry>) -> bool {
if let Ok(entry) = entry {
let mut res = !self.ignored.contains(&entry.file_name());
// Please just let me combine these already
if self.files_only {
if let Ok(file_type) = entry.file_type() {
res = res && file_type.is_file();
}
// We couldn't determine if it's a file, so we don't return it
else {
res = false;
}
}
res
} else {
true
}
}
}
impl Iterator for ReadDirRecursive {
type Item = io::Result<DirEntry>;
fn next(&mut self) -> Option<Self::Item> {
loop {
// First, we try to consume the current directory's items
while let Some(entry) = self.read_dir.next() {
self.push_entry(&entry);
if self.should_return(&entry) {
return Some(entry);
}
}
// If we get an error while setting up a new directory, we return this, otherwise we
// keep trying to consume the directories
match self.next_read_dir() {
Ok(true) => (),
// There's no more directories to traverse, so the iterator is done
Ok(false) => return None,
Err(e) => return Some(Err(e)),
}
}
}
}
pub trait PathExt {
/// Confirm whether the file has not been modified since the given timestamp.
///
/// This function will only return true if it can determine with certainty that the file hasn't
/// been modified.
///
/// # Args
///
/// * `timestamp` - Timestamp to compare modified time with
///
/// # Returns
///
/// True if the file has not been modified for sure, false otherwise.
fn not_modified_since(&self, timestamp: chrono::DateTime<Utc>) -> bool;
/// An extension of the `read_dir` command that runs through the entire underlying directory
/// structure using breadth-first search
fn read_dir_recursive(&self) -> io::Result<ReadDirRecursive>;
}
impl PathExt for Path {
fn not_modified_since(&self, timestamp: chrono::DateTime<Utc>) -> bool {
if let Ok(metadata) = self.metadata() {
if let Ok(last_modified) = metadata.modified() {
let t: chrono::DateTime<Utc> = last_modified.into();
let t = t.with_timezone(&Local);
return t < timestamp;
}
}
false
}
fn read_dir_recursive(&self) -> io::Result<ReadDirRecursive> {
ReadDirRecursive::start(self)
}
}

173
src/cli/backup.rs 100644
View File

@ -0,0 +1,173 @@
use crate::backup::Backup;
use crate::cli::Cli;
use crate::other;
use chrono::{TimeZone, Utc};
use clap::{Args, Subcommand};
use std::io;
use std::path::PathBuf;
#[derive(Subcommand)]
pub enum BackupCommands {
/// List all tracked backups
List(BackupListArgs),
/// Manually create a new backup
Create(BackupCreateArgs),
/// Restore a backup
Restore(BackupRestoreArgs),
}
#[derive(Args)]
pub struct BackupArgs {
#[command(subcommand)]
pub command: BackupCommands,
}
#[derive(Args)]
pub struct BackupCreateArgs {
/// What layer to create a backup in
layer: String,
}
#[derive(Args)]
pub struct BackupListArgs {
/// What layer to list
layer: Option<String>,
}
#[derive(Args)]
pub struct BackupRestoreArgs {
/// Path to the backup inside the backup directory
path: PathBuf,
/// Directory to store config in
output_config: PathBuf,
/// Directory to store worlds in
output_worlds: PathBuf,
/// Whether to overwrite the contents of the existing directories
#[arg(short, long, default_value_t = false)]
force: bool,
/// Create output directories if they don't exist
#[arg(short, long, default_value_t = false)]
make: bool,
}
impl BackupArgs {
pub fn run(&self, cli: &Cli) -> io::Result<()> {
match &self.command {
BackupCommands::Create(args) => args.run(cli),
BackupCommands::List(args) => args.run(cli),
BackupCommands::Restore(args) => args.run(cli),
}
}
}
impl BackupCreateArgs {
pub fn run(&self, cli: &Cli) -> io::Result<()> {
let mut meta = cli.meta()?;
if let Some(res) = meta.create_backup(&self.layer) {
res
} else {
Err(io::Error::new(io::ErrorKind::Other, "Unknown layer"))
}
}
}
impl BackupRestoreArgs {
pub fn run(&self, cli: &Cli) -> io::Result<()> {
let backup_dir = cli.backup.canonicalize()?;
// Create directories if needed
if self.make {
std::fs::create_dir_all(&self.output_config)?;
std::fs::create_dir_all(&self.output_worlds)?;
}
let output_config = self.output_config.canonicalize()?;
let output_worlds = self.output_worlds.canonicalize()?;
// Parse input path
let path = self.path.canonicalize()?;
if !path.starts_with(&backup_dir) {
return Err(other("Provided file is not inside the backup directory."));
}
let layer = if let Some(parent) = path.parent() {
// Backup files should be stored nested inside a layer's folder
if parent != backup_dir {
parent.file_name().unwrap().to_string_lossy()
} else {
return Err(other("Invalid path."));
}
} else {
return Err(other("Invalid path."));
};
let timestamp = if let Some(filename) = path.file_name() {
Utc.datetime_from_str(&filename.to_string_lossy(), Backup::FILENAME_FORMAT)
.map_err(|_| other("Invalid filename."))?
} else {
return Err(other("Invalid filename."));
};
let meta = cli.meta()?;
// Clear previous contents of directories
let mut entries = output_config
.read_dir()?
.chain(output_worlds.read_dir()?)
.peekable();
if entries.peek().is_some() && !self.force {
return Err(other("Output directories are not empty. If you wish to overwrite these contents, use the force flag."));
}
for entry in entries {
let path = entry?.path();
if path.is_dir() {
std::fs::remove_dir_all(path)?;
} else {
std::fs::remove_file(path)?;
}
}
let dirs = vec![
(PathBuf::from("config"), output_config),
(PathBuf::from("worlds"), output_worlds),
];
// Restore the backup
if let Some(res) = meta.restore_backup(&layer, timestamp, &dirs) {
res
} else {
Err(other("Unknown layer"))
}
}
}
impl BackupListArgs {
pub fn run(&self, cli: &Cli) -> io::Result<()> {
let meta = cli.meta()?;
// A bit scuffed? Sure
for (name, manager) in meta
.managers()
.iter()
.filter(|(name, _)| self.layer.is_none() || &self.layer.as_ref().unwrap() == name)
{
println!("{}", name);
for chain in manager.chains().iter().filter(|c| !c.is_empty()) {
let mut iter = chain.iter();
println!(" {}", iter.next().unwrap());
for backup in iter {
println!(" {}", backup);
}
}
}
Ok(())
}
}

92
src/cli/mod.rs 100644
View File

@ -0,0 +1,92 @@
mod backup;
mod run;
pub use crate::backup::MetaManager;
pub use crate::server::Metadata;
pub use backup::{BackupArgs, BackupCommands};
pub use run::RunArgs;
use crate::backup::ManagerConfig;
use crate::server::ServerType;
use clap::{Parser, Subcommand};
use std::io;
use std::path::PathBuf;
#[derive(Subcommand)]
pub enum Commands {
/// Run the server
Run(RunArgs),
/// Interact with the backup system without starting a server
Backup(BackupArgs),
}
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
pub struct Cli {
#[command(subcommand)]
pub command: Commands,
/// Directory where configs are stored, and where the server will run
#[arg(
long,
value_name = "CONFIG_DIR",
default_value = ".",
env = "ALEX_CONFIG_DIR",
global = true
)]
pub config: PathBuf,
/// Directory where world files will be saved
#[arg(
long,
value_name = "WORLD_DIR",
default_value = "../worlds",
env = "ALEX_WORLD_DIR",
global = true
)]
pub world: PathBuf,
/// Directory where backups will be stored
#[arg(
long,
value_name = "BACKUP_DIR",
default_value = "../backups",
env = "ALEX_BACKUP_DIR",
global = true
)]
pub backup: PathBuf,
/// What backup layers to employ, provided as a list of tuples name,frequency,chains,chain_len
/// delimited by semicolons (;).
#[arg(long, env = "ALEX_LAYERS", global = true, value_delimiter = ';')]
pub layers: Vec<ManagerConfig>,
/// Type of server
#[arg(long, default_value = "unknown", env = "ALEX_SERVER", global = true)]
pub server: ServerType,
/// Version string for the server, e.g. 1.19.4-545
#[arg(long, default_value = "", env = "ALEX_SERVER_VERSION", global = true)]
pub server_version: String,
}
impl Cli {
pub fn run(&self) -> io::Result<()> {
match &self.command {
Commands::Run(args) => args.run(self),
Commands::Backup(args) => args.run(self),
}
}
/// Convenience method to initialize backup manager from the cli arguments
pub fn meta(&self) -> io::Result<MetaManager<Metadata>> {
let metadata = Metadata {
server_type: self.server,
server_version: self.server_version.clone(),
};
let dirs = vec![
(PathBuf::from("config"), self.config.canonicalize()?),
(PathBuf::from("worlds"), self.world.canonicalize()?),
];
let mut meta = MetaManager::new(self.backup.canonicalize()?, dirs, metadata);
meta.add_all(&self.layers)?;
Ok(meta)
}
}

94
src/cli/run.rs 100644
View File

@ -0,0 +1,94 @@
use crate::cli::Cli;
use crate::server;
use crate::signals;
use crate::stdin;
use clap::Args;
use std::io;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
#[derive(Args)]
pub struct RunArgs {
/// Server jar to execute
#[arg(
long,
value_name = "JAR_PATH",
default_value = "server.jar",
env = "ALEX_JAR"
)]
pub jar: PathBuf,
/// Java command to run the server jar with
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"), env = "ALEX_JAVA")]
pub java: String,
/// XMS value in megabytes for the server instance
#[arg(long, default_value_t = 1024, env = "ALEX_XMS")]
pub xms: u64,
/// XMX value in megabytes for the server instance
#[arg(long, default_value_t = 2048, env = "ALEX_XMX")]
pub xmx: u64,
/// Don't actually run the server, but simply output the server configuration that would have
/// been ran
#[arg(short, long, default_value_t = false)]
pub dry: bool,
}
fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>) {
loop {
let next_scheduled_time = {
let server = counter.lock().unwrap();
server.backups.next_scheduled_time().unwrap()
};
let now = chrono::offset::Utc::now();
if next_scheduled_time > now {
std::thread::sleep((next_scheduled_time - now).to_std().unwrap());
}
{
let mut server = counter.lock().unwrap();
// We explicitely ignore the error here, as we don't want the thread to fail
let _ = server.backup();
}
}
}
impl RunArgs {
pub fn run(&self, cli: &Cli) -> io::Result<()> {
let (_, mut signals) = signals::install_signal_handlers()?;
let mut cmd = server::ServerCommand::new(cli.server, &cli.server_version)
.java(&self.java)
.jar(self.jar.clone())
.config(cli.config.clone())
.world(cli.world.clone())
.backup(cli.backup.clone())
.managers(cli.layers.clone())
.xms(self.xms)
.xmx(self.xmx);
cmd.canonicalize()?;
if self.dry {
print!("{}", cmd);
return Ok(());
}
let counter = Arc::new(Mutex::new(cmd.spawn()?));
if !cli.layers.is_empty() {
let clone = Arc::clone(&counter);
std::thread::spawn(move || backups_thread(clone));
}
// Spawn thread that handles the main stdin loop
let clone = Arc::clone(&counter);
std::thread::spawn(move || stdin::handle_stdin(clone));
// Signal handler loop exits the process when necessary
signals::handle_signals(&mut signals, counter)
}
}

View File

@ -1,123 +1,38 @@
mod backup;
mod cli;
mod server;
mod signals;
mod stdin;
use crate::cli::Cli;
use clap::Parser;
use server::ServerType;
use std::io;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Cli {
/// Type of server
type_: ServerType,
/// Version string for the server, e.g. 1.19.4-545
#[arg(env = "ALEX_SERVER_VERSION")]
server_version: String,
/// Server jar to execute
#[arg(
long,
value_name = "JAR_PATH",
default_value = "server.jar",
env = "ALEX_JAR"
)]
jar: PathBuf,
/// Directory where configs are stored, and where the server will run
#[arg(
long,
value_name = "CONFIG_DIR",
default_value = ".",
env = "ALEX_CONFIG_DIR"
)]
config: PathBuf,
/// Directory where world files will be saved
#[arg(
long,
value_name = "WORLD_DIR",
default_value = "../worlds",
env = "ALEX_WORLD_DIR"
)]
world: PathBuf,
/// Directory where backups will be stored
#[arg(
long,
value_name = "BACKUP_DIR",
default_value = "../backups",
env = "ALEX_BACKUP_DIR"
)]
backup: PathBuf,
/// Java command to run the server jar with
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"), env = "ALEX_JAVA")]
java: String,
/// XMS value in megabytes for the server instance
#[arg(long, default_value_t = 1024, env = "ALEX_XMS")]
xms: u64,
/// XMX value in megabytes for the server instance
#[arg(long, default_value_t = 2048, env = "ALEX_XMX")]
xmx: u64,
/// How many backups to keep
#[arg(short = 'n', long, default_value_t = 7, env = "ALEX_MAX_BACKUPS")]
max_backups: u64,
/// How frequently to perform a backup, in minutes; 0 to disable.
#[arg(short = 't', long, default_value_t = 0, env = "ALEX_FREQUENCY")]
frequency: u64,
/// Don't actually run the server, but simply output the server configuration that would have
/// been ran
#[arg(short, long, default_value_t = false)]
dry: bool,
pub fn other(msg: &str) -> io::Error {
io::Error::new(io::ErrorKind::Other, msg)
}
fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
loop {
std::thread::sleep(std::time::Duration::from_secs(frequency * 60));
// fn commands_backup(cli: &Cli, args: &BackupArgs) -> io::Result<()> {
// let metadata = server::Metadata {
// server_type: cli.server,
// server_version: cli.server_version.clone(),
// };
// let dirs = vec![
// (PathBuf::from("config"), cli.config.clone()),
// (PathBuf::from("worlds"), cli.world.clone()),
// ];
// let mut meta = MetaManager::new(cli.backup.clone(), dirs, metadata);
// meta.add_all(&cli.layers)?;
{
let mut server = counter.lock().unwrap();
// match &args.command {
// BackupCommands::List => ()
// }
// We explicitely ignore the error here, as we don't want the thread to fail
let _ = server.backup();
}
}
}
// // manager.create_backup()?;
// // manager.remove_old_backups()
// }
fn main() -> io::Result<()> {
let (_, mut signals) = signals::install_signal_handlers()?;
let cli = Cli::parse();
let mut cmd = server::ServerCommand::new(cli.type_, &cli.server_version)
.java(&cli.java)
.jar(cli.jar)
.config(cli.config)
.world(cli.world)
.backup(cli.backup)
.xms(cli.xms)
.xmx(cli.xmx)
.max_backups(cli.max_backups);
cmd.canonicalize()?;
if cli.dry {
print!("{}", cmd);
return Ok(());
}
let counter = Arc::new(Mutex::new(cmd.spawn()?));
if cli.frequency > 0 {
let clone = Arc::clone(&counter);
std::thread::spawn(move || backups_thread(clone, cli.frequency));
}
// Spawn thread that handles the main stdin loop
let clone = Arc::clone(&counter);
std::thread::spawn(move || stdin::handle_stdin(clone));
// Signal handler loop exits the process when necessary
signals::handle_signals(&mut signals, counter)
cli.run()
}

View File

@ -1,30 +1,12 @@
use crate::server::ServerProcess;
use clap::ValueEnum;
use crate::backup::ManagerConfig;
use crate::backup::MetaManager;
use crate::server::{Metadata, ServerProcess, ServerType};
use std::fmt;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
pub enum ServerType {
Paper,
Forge,
Vanilla,
}
impl fmt::Display for ServerType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
ServerType::Paper => "PaperMC",
ServerType::Forge => "Forge",
ServerType::Vanilla => "Vanilla",
};
write!(f, "{}", s)
}
}
pub struct ServerCommand {
type_: ServerType,
version: String,
@ -35,7 +17,7 @@ pub struct ServerCommand {
backup_dir: PathBuf,
xms: u64,
xmx: u64,
max_backups: u64,
managers: Vec<ManagerConfig>,
}
impl ServerCommand {
@ -50,7 +32,7 @@ impl ServerCommand {
backup_dir: PathBuf::from("backups"),
xms: 1024,
xmx: 2048,
max_backups: 7,
managers: Vec::new(),
}
}
@ -91,8 +73,9 @@ impl ServerCommand {
self
}
pub fn max_backups(mut self, v: u64) -> Self {
self.max_backups = v;
pub fn managers(mut self, configs: Vec<ManagerConfig>) -> Self {
self.managers = configs;
self
}
@ -179,19 +162,24 @@ impl ServerCommand {
}
pub fn spawn(&mut self) -> std::io::Result<ServerProcess> {
let metadata = Metadata {
server_type: self.type_,
server_version: self.version.clone(),
};
let dirs = vec![
(PathBuf::from("config"), self.config_dir.clone()),
(PathBuf::from("worlds"), self.world_dir.clone()),
];
let mut meta = MetaManager::new(self.backup_dir.clone(), dirs, metadata);
meta.add_all(&self.managers)?;
let mut cmd = self.create_cmd();
self.accept_eula()?;
let child = cmd.spawn()?;
Ok(ServerProcess::new(
self.type_,
self.version.clone(),
self.config_dir.clone(),
self.world_dir.clone(),
self.backup_dir.clone(),
self.max_backups,
child,
))
Ok(ServerProcess::new(meta, child))
}
}

View File

@ -1,5 +1,36 @@
mod command;
mod process;
pub use command::{ServerCommand, ServerType};
pub use command::ServerCommand;
pub use process::ServerProcess;
use clap::ValueEnum;
use serde::{Deserialize, Serialize};
use std::fmt;
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum, Serialize, Deserialize, Debug)]
pub enum ServerType {
Unknown,
Paper,
Forge,
Vanilla,
}
impl fmt::Display for ServerType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
ServerType::Unknown => "Unknown",
ServerType::Paper => "PaperMC",
ServerType::Forge => "Forge",
ServerType::Vanilla => "Vanilla",
};
write!(f, "{}", s)
}
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct Metadata {
pub server_type: ServerType,
pub server_version: String,
}

View File

@ -1,44 +1,18 @@
use crate::server::ServerType;
use flate2::write::GzEncoder;
use flate2::Compression;
use crate::backup::MetaManager;
use crate::server::Metadata;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::Child;
#[link(name = "c")]
extern "C" {
fn geteuid() -> u32;
fn getegid() -> u32;
}
pub struct ServerProcess {
type_: ServerType,
version: String,
config_dir: PathBuf,
world_dir: PathBuf,
backup_dir: PathBuf,
max_backups: u64,
child: Child,
pub backups: MetaManager<Metadata>,
}
impl ServerProcess {
pub fn new(
type_: ServerType,
version: String,
config_dir: PathBuf,
world_dir: PathBuf,
backup_dir: PathBuf,
max_backups: u64,
child: Child,
) -> ServerProcess {
pub fn new(manager: MetaManager<Metadata>, child: Child) -> ServerProcess {
ServerProcess {
type_,
version,
config_dir,
world_dir,
backup_dir,
max_backups,
child,
backups: manager,
}
}
@ -74,7 +48,8 @@ impl ServerProcess {
/// Perform a backup by disabling the server's save feature and flushing its data, before
/// creating an archive file.
pub fn backup(&mut self) -> std::io::Result<()> {
self.custom("say backing up server")?;
let layer_name = String::from(self.backups.next_scheduled_layer().unwrap());
self.custom(&format!("say starting backup for layer '{}'", layer_name))?;
// Make sure the server isn't modifying the files during the backup
self.custom("save-off")?;
@ -84,94 +59,32 @@ impl ServerProcess {
// We wait some time to (hopefully) ensure the save-all call has completed
std::thread::sleep(std::time::Duration::from_secs(10));
let res = self.create_backup_archive();
if res.is_ok() {
self.remove_old_backups()?;
}
let start_time = chrono::offset::Utc::now();
let res = self.backups.perform_backup_cycle();
// The server's save feature needs to be enabled again even if the archive failed to create
self.custom("save-on")?;
self.custom("save-all")?;
let duration = chrono::offset::Utc::now() - start_time;
let duration_str = format!(
"{}m{}s",
duration.num_seconds() / 60,
duration.num_seconds() % 60
);
if res.is_ok() {
self.custom("say server backed up successfully")?;
self.custom(&format!(
"say backup created for layer '{}' in {}",
layer_name, duration_str
))?;
} else {
self.custom("an error occured while backing up the server")?;
self.custom(&format!(
"an error occured after {} while creating backup for layer '{}'",
duration_str, layer_name
))?;
}
res
}
/// Create a new compressed backup archive of the server's data.
fn create_backup_archive(&mut self) -> std::io::Result<()> {
// Create a gzip-compressed tarball of the worlds folder
let filename = format!(
"{}",
chrono::offset::Local::now().format("%Y-%m-%d_%H-%M-%S.tar.gz")
);
let path = self.backup_dir.join(filename);
let tar_gz = std::fs::File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut tar = tar::Builder::new(enc);
tar.append_dir_all("worlds", &self.world_dir)?;
// Add all files from the config directory that aren't the cache
for entry in self
.config_dir
.read_dir()?
.filter_map(|e| e.ok())
.filter(|e| e.file_name() != "cache")
{
let tar_path = Path::new("config").join(entry.file_name());
if entry.file_type()?.is_dir() {
tar.append_dir_all(tar_path, entry.path())?;
} else {
tar.append_path_with_name(entry.path(), tar_path)?;
}
}
// We add a file to the backup describing for what version it was made
let info = format!("{} {}", self.type_, self.version);
let info_bytes = info.as_bytes();
let mut header = tar::Header::new_gnu();
header.set_size(info_bytes.len().try_into().unwrap());
header.set_mode(0o100644);
unsafe {
header.set_gid(getegid().into());
header.set_uid(geteuid().into());
}
tar.append_data(&mut header, "info.txt", info_bytes)?;
// Backup file gets finalized in the drop
Ok(())
}
/// Remove the oldest backups
fn remove_old_backups(&mut self) -> std::io::Result<()> {
// The naming format used allows us to sort the backups by name and still get a sorting by
// creation time
let mut backups = self
.backup_dir
.read_dir()?
.filter_map(|res| res.map(|e| e.path()).ok())
.collect::<Vec<PathBuf>>();
backups.sort();
let max_backups: usize = self.max_backups.try_into().unwrap();
if backups.len() > max_backups {
let excess_backups = backups.len() - max_backups;
for backup in &backups[0..excess_backups] {
std::fs::remove_file(backup)?;
}
}
Ok(())
}
}