Compare commits

...

19 Commits

Author SHA1 Message Date
Jef Roosens 188fb30343
fix: better serde bounds
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-22 20:10:37 +02:00
Jef Roosens 53dc3783ca
feat: store server info in metadata file; change cli flags
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-20 19:31:50 +02:00
Jef Roosens ef631fab1d
refactor: separate backup logic into own module
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-19 14:04:38 +02:00
Jef Roosens 74a0b91fd1
refactor: remove open function
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-18 23:33:56 +02:00
Jef Roosens b48c531d80
feat: configurable parameters for incremental backups
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-18 22:48:11 +02:00
Jef Roosens b51d951688
feat: re-implement remove old backups
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-18 21:56:43 +02:00
Jef Roosens bb7b57899b
refactor: store backups in nested vecs instead; introduce concept of
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
chains
2023-06-18 21:15:05 +02:00
Jef Roosens f7235fb342
refactor: move iterating over files to Path extension trait
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-17 12:08:46 +02:00
Jef Roosens 5275356353
feat: added backup cli command
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-16 17:23:36 +02:00
Jef Roosens 27d7e681c3
feat: temporarily disable "remove old backups"
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-15 22:54:17 +02:00
Jef Roosens 8add96b39b
feat: persistently store backup state
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-15 20:38:52 +02:00
Jef Roosens d204c68400
fix: actually working incremental backup
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
2023-06-15 09:56:40 +02:00
Jef Roosens a9e7b215d1
feat: move running server to subcommand 2023-06-14 22:17:53 +02:00
Jef Roosens fcc111b4ef
feat: possible incremental backup implementation using new abstraction 2023-06-14 21:47:59 +02:00
Jef Roosens b7a678e32f
feat: lots of backup stuff 2023-06-13 17:43:47 +02:00
Jef Roosens 703a25e8be
refactor: use utc time 2023-06-13 15:12:30 +02:00
Jef Roosens 29d6713486
feat: implement own listing of files 2023-06-13 15:12:30 +02:00
Jef Roosens 4958257f6e
refactor: move backup logic to separate module 2023-06-13 15:12:30 +02:00
Jef Roosens 90aa929b73
feat: show backup time in message 2023-06-13 15:12:26 +02:00
14 changed files with 808 additions and 212 deletions

View File

@ -1,3 +1,3 @@
[alias]
runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar"
runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar"
runs = "run -- run paper 1.19.4-550 --config data/config --backup data/backups --world data/worlds --jar paper-1.19.4-550.jar"
runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper-1.19.4-525.jar"

2
.gitignore vendored
View File

@ -19,4 +19,4 @@ target/
# testing files
*.jar
data/
data*/

View File

@ -7,6 +7,24 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/Chewing_Bever/alex/src/branch/dev)
### Added
* `backup` CLI command
* Incremental backups
* Chain length describes how many incremental backups to create from the
same full backup
* "backups to keep" has been replaced by "chains to keep"
* Server type & version is now stored as metadata in the metadata file
### Changed
* Running the server now uses the `run` CLI subcommand
* `server_type` and `server_version` arguments are now optional flags
### Removed
* `max_backups` setting
## [0.2.2](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.2)
### Fixed

46
Cargo.lock generated
View File

@ -15,6 +15,8 @@ dependencies = [
"chrono",
"clap",
"flate2",
"serde",
"serde_json",
"signal-hook",
"tar",
]
@ -123,6 +125,7 @@ dependencies = [
"iana-time-zone",
"js-sys",
"num-traits",
"serde",
"time",
"wasm-bindgen",
"winapi",
@ -292,6 +295,12 @@ dependencies = [
"windows-sys",
]
[[package]]
name = "itoa"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
[[package]]
name = "js-sys"
version = "0.3.63"
@ -384,6 +393,43 @@ dependencies = [
"windows-sys",
]
[[package]]
name = "ryu"
version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
[[package]]
name = "serde"
version = "1.0.164"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.164"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "signal-hook"
version = "0.3.15"

View File

@ -12,10 +12,11 @@ edition = "2021"
tar = "0.4.38"
# Used to compress said tarballs using gzip
flate2 = "1.0.26"
# Used for backup filenames
chrono = "0.4.26"
chrono = { version = "0.4.26", features = ["serde"] }
clap = { version = "4.3.1", features = ["derive", "env"] }
signal-hook = "0.3.15"
serde = { version = "1.0.164", features = ["derive", "rc"] }
serde_json = "1.0.96"
[profile.release]
lto = "fat"

View File

@ -0,0 +1,76 @@
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
/// Represents the changes relative to the previous backup
#[derive(Debug, Serialize, Deserialize)]
pub struct Delta {
/// What files were added/modified in each part of the tarball.
pub added: HashMap<PathBuf, HashSet<PathBuf>>,
/// What files were removed in this backup, in comparison to the previous backup. For full
/// backups, this will always be empty, as they do not consider previous backups.
/// The map stores a separate list for each top-level directory, as the contents of these
/// directories can come for different source directories.
pub removed: HashMap<PathBuf, HashSet<PathBuf>>,
}
impl Delta {
pub fn new() -> Self {
Self {
added: HashMap::new(),
removed: HashMap::new(),
}
}
/// Update the current state so that its result becomes the merge of itself and the other
/// state.
#[allow(dead_code)]
pub fn merge(&mut self, delta: &Self) {
for (dir, added) in delta.added.iter() {
// Files that were removed in the current state, but added in the new state, are no
// longer removed
if let Some(orig_removed) = self.removed.get_mut(dir) {
orig_removed.retain(|k| !added.contains(k));
}
// Newly added files are added to the state as well
if let Some(orig_added) = self.added.get_mut(dir) {
orig_added.extend(added.iter().cloned());
} else {
self.added.insert(dir.clone(), added.clone());
}
}
for (dir, removed) in delta.removed.iter() {
// Files that were originally added, but now deleted are removed from the added list
if let Some(orig_added) = self.added.get_mut(dir) {
orig_added.retain(|k| !removed.contains(k));
}
// Newly removed files are added to the state as well
if let Some(orig_removed) = self.removed.get_mut(dir) {
orig_removed.extend(removed.iter().cloned());
} else {
self.removed.insert(dir.clone(), removed.clone());
}
}
}
/// Modify the given state by applying this delta's changes to it
pub fn apply(&self, state: &mut HashMap<PathBuf, HashSet<PathBuf>>) {
// First we add new files, then we remove the old ones
for (dir, added) in self.added.iter() {
if let Some(current) = state.get_mut(dir) {
current.extend(added.iter().cloned());
} else {
state.insert(dir.clone(), added.clone());
}
}
for (dir, removed) in self.removed.iter() {
if let Some(current) = state.get_mut(dir) {
current.retain(|k| !removed.contains(k));
}
}
}
}

View File

@ -0,0 +1,140 @@
use super::Backup;
use serde::Deserialize;
use serde::Serialize;
use std::fs::File;
use std::io;
use std::path::PathBuf;
pub struct Manager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de>,
{
backup_dir: PathBuf,
config_dir: PathBuf,
world_dir: PathBuf,
default_metadata: T,
chain_len: u64,
chains_to_keep: u64,
chains: Vec<Vec<Backup<T>>>,
}
impl<T> Manager<T>
where
T: Clone + Serialize + for<'de> Deserialize<'de>,
{
const METADATA_FILE: &str = "alex.json";
/// Initialize a new instance of a `BackupManager`.
pub fn new(
backup_dir: PathBuf,
config_dir: PathBuf,
world_dir: PathBuf,
metadata: T,
chain_len: u64,
chains_to_keep: u64,
) -> Self {
Self {
backup_dir,
config_dir,
world_dir,
default_metadata: metadata,
chain_len,
chains_to_keep,
chains: Vec::new(),
}
}
/// Create a new backup with the expected type.
pub fn create_backup(&mut self) -> io::Result<()> {
let dirs = vec![
(PathBuf::from("config"), self.config_dir.clone()),
(PathBuf::from("worlds"), self.world_dir.clone()),
];
// We start a new chain if the current chain is complete, or if there isn't a first chain
// yet
if let Some(current_chain) = self.chains.last() {
let current_chain_len: u64 = current_chain.len().try_into().unwrap();
if current_chain_len >= self.chain_len {
self.chains.push(Vec::new());
}
} else {
self.chains.push(Vec::new());
}
let current_chain = self.chains.last_mut().unwrap();
let mut backup = if !current_chain.is_empty() {
let previous_backup = current_chain.last().unwrap();
let state = Backup::state(current_chain);
Backup::create_from(state, previous_backup.start_time, &self.backup_dir, dirs)?
} else {
Backup::create(&self.backup_dir, dirs)?
};
backup.set_metadata(self.default_metadata.clone());
current_chain.push(backup);
self.save()?;
Ok(())
}
/// Delete all backups associated with outdated chains, and forget those chains.
pub fn remove_old_backups(&mut self) -> io::Result<()> {
let chains_to_store: usize = self.chains_to_keep.try_into().unwrap();
if chains_to_store < self.chains.len() {
let mut remove_count: usize = self.chains.len() - chains_to_store;
// We only count finished chains towards the list of stored chains
let chain_len: usize = self.chain_len.try_into().unwrap();
if self.chains.last().unwrap().len() < chain_len {
remove_count -= 1;
}
for chain in self.chains.drain(..remove_count) {
for backup in chain {
let path = Backup::path(&self.backup_dir, backup.start_time);
std::fs::remove_file(path)?;
}
}
self.save()?;
}
Ok(())
}
/// Write the in-memory state to disk.
pub fn save(&self) -> io::Result<()> {
let json_file = File::create(self.backup_dir.join(Self::METADATA_FILE))?;
serde_json::to_writer(json_file, &self.chains)?;
Ok(())
}
/// Overwrite the in-memory state with the on-disk state.
pub fn load(&mut self) -> io::Result<()> {
let json_file = match File::open(self.backup_dir.join(Self::METADATA_FILE)) {
Ok(f) => f,
Err(e) => {
// Don't error out if the file isn't there, it will be created when necessary
if e.kind() == io::ErrorKind::NotFound {
self.chains = Vec::new();
return Ok(());
} else {
return Err(e);
}
}
};
self.chains = serde_json::from_reader(json_file)?;
Ok(())
}
}

160
src/backup/mod.rs 100644
View File

@ -0,0 +1,160 @@
mod delta;
mod manager;
mod path;
use delta::Delta;
pub use manager::Manager;
use chrono::Utc;
use flate2::write::GzEncoder;
use flate2::Compression;
use path::PathExt;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub enum BackupType {
Full,
Incremental,
}
/// Represents a successful backup
#[derive(Serialize, Deserialize)]
pub struct Backup<T: Clone> {
/// When the backup was started (also corresponds to the name)
pub start_time: chrono::DateTime<Utc>,
/// Type of the backup
pub type_: BackupType,
pub delta: Delta,
/// Additional metadata that can be associated with a given backup
pub metadata: Option<T>,
}
impl Backup<()> {
/// Return the path to a backup file by properly formatting the data.
pub fn path<P: AsRef<Path>>(backup_dir: P, start_time: chrono::DateTime<Utc>) -> PathBuf {
let backup_dir = backup_dir.as_ref();
let filename = format!("{}", start_time.format(Self::FILENAME_FORMAT));
backup_dir.join(filename)
}
}
impl<T: Clone> Backup<T> {
const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
pub fn set_metadata(&mut self, metadata: T) {
self.metadata = Some(metadata);
}
/// Resolve the state of the list of backups by applying their deltas in-order to an initially
/// empty state.
pub fn state(backups: &Vec<Self>) -> HashMap<PathBuf, HashSet<PathBuf>> {
let mut state: HashMap<PathBuf, HashSet<PathBuf>> = HashMap::new();
for backup in backups {
backup.delta.apply(&mut state);
}
state
}
/// Create a new Full backup, populated with the given directories.
///
/// # Arguments
///
/// * `backup_dir` - Directory to store archive in
/// * `dirs` - list of tuples `(path_in_tar, src_dir)` with `path_in_tar` the directory name
/// under which `src_dir`'s contents should be stored in the archive
///
/// # Returns
///
/// The `Backup` instance describing this new backup.
pub fn create<P: AsRef<Path>>(
backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> {
let start_time = chrono::offset::Utc::now();
let path = Backup::path(backup_dir, start_time);
let tar_gz = File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
let mut delta = Delta::new();
for (dir_in_tar, src_dir) in dirs {
let mut added_files: HashSet<PathBuf> = HashSet::new();
for entry in src_dir.read_dir_recursive()?.ignored("cache").files() {
let path = entry?.path();
let stripped = path.strip_prefix(&src_dir).unwrap();
ar.append_path_with_name(&path, dir_in_tar.join(stripped))?;
added_files.insert(stripped.to_path_buf());
}
delta.added.insert(dir_in_tar, added_files);
}
Ok(Backup {
type_: BackupType::Full,
start_time,
delta,
metadata: None,
})
}
/// Create a new incremental backup from a given previous backup
pub fn create_from<P: AsRef<Path>>(
previous_state: HashMap<PathBuf, HashSet<PathBuf>>,
previous_start_time: chrono::DateTime<Utc>,
backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> {
let start_time = chrono::offset::Utc::now();
let path = Backup::path(backup_dir, start_time);
let tar_gz = File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
let mut delta = Delta::new();
for (dir_in_tar, src_dir) in dirs {
let mut all_files: HashSet<PathBuf> = HashSet::new();
let mut added_files: HashSet<PathBuf> = HashSet::new();
for entry in src_dir.read_dir_recursive()?.ignored("cache").files() {
let path = entry?.path();
let stripped = path.strip_prefix(&src_dir).unwrap();
if !path.not_modified_since(previous_start_time) {
ar.append_path_with_name(&path, dir_in_tar.join(stripped))?;
added_files.insert(stripped.to_path_buf());
}
all_files.insert(stripped.to_path_buf());
}
delta.added.insert(dir_in_tar.clone(), added_files);
if let Some(previous_files) = previous_state.get(&dir_in_tar) {
delta.removed.insert(
dir_in_tar,
previous_files.difference(&all_files).cloned().collect(),
);
}
}
Ok(Backup {
type_: BackupType::Incremental,
start_time,
delta,
metadata: None,
})
}
}

147
src/backup/path.rs 100644
View File

@ -0,0 +1,147 @@
use chrono::{Local, Utc};
use std::collections::HashSet;
use std::ffi::OsString;
use std::fs::DirEntry;
use std::path::{Path, PathBuf};
use std::{fs, io};
pub struct ReadDirRecursive {
ignored: HashSet<OsString>,
read_dir: fs::ReadDir,
dir_stack: Vec<PathBuf>,
files_only: bool,
}
impl ReadDirRecursive {
/// Start the iterator for a new directory
pub fn start<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let path = path.as_ref();
let read_dir = path.read_dir()?;
Ok(ReadDirRecursive {
ignored: HashSet::new(),
read_dir,
dir_stack: Vec::new(),
files_only: false,
})
}
pub fn ignored<S: Into<OsString>>(mut self, s: S) -> Self {
self.ignored.insert(s.into());
self
}
pub fn files(mut self) -> Self {
self.files_only = true;
self
}
/// Tries to populate the `read_dir` field with a new `ReadDir` instance to consume.
fn next_read_dir(&mut self) -> io::Result<bool> {
if let Some(path) = self.dir_stack.pop() {
self.read_dir = path.read_dir()?;
Ok(true)
} else {
Ok(false)
}
}
/// Convenience method to add a new directory to the stack.
fn push_entry(&mut self, entry: &io::Result<DirEntry>) {
if let Ok(entry) = entry {
if entry.path().is_dir() {
self.dir_stack.push(entry.path());
}
}
}
/// Determine whether an entry should be returned by the iterator.
fn should_return(&self, entry: &io::Result<DirEntry>) -> bool {
if let Ok(entry) = entry {
let mut res = !self.ignored.contains(&entry.file_name());
// Please just let me combine these already
if self.files_only {
if let Ok(file_type) = entry.file_type() {
res = res && file_type.is_file();
}
// We couldn't determine if it's a file, so we don't return it
else {
res = false;
}
}
res
} else {
true
}
}
}
impl Iterator for ReadDirRecursive {
type Item = io::Result<DirEntry>;
fn next(&mut self) -> Option<Self::Item> {
loop {
// First, we try to consume the current directory's items
while let Some(entry) = self.read_dir.next() {
self.push_entry(&entry);
if self.should_return(&entry) {
return Some(entry);
}
}
// If we get an error while setting up a new directory, we return this, otherwise we
// keep trying to consume the directories
match self.next_read_dir() {
Ok(true) => (),
// There's no more directories to traverse, so the iterator is done
Ok(false) => return None,
Err(e) => return Some(Err(e)),
}
}
}
}
pub trait PathExt {
/// Confirm whether the file has not been modified since the given timestamp.
///
/// This function will only return true if it can determine with certainty that the file hasn't
/// been modified.
///
/// # Args
///
/// * `timestamp` - Timestamp to compare modified time with
///
/// # Returns
///
/// True if the file has not been modified for sure, false otherwise.
fn not_modified_since(&self, timestamp: chrono::DateTime<Utc>) -> bool;
/// An extension of the `read_dir` command that runs through the entire underlying directory
/// structure using breadth-first search
fn read_dir_recursive(&self) -> io::Result<ReadDirRecursive>;
}
impl PathExt for Path {
fn not_modified_since(&self, timestamp: chrono::DateTime<Utc>) -> bool {
if let Ok(metadata) = self.metadata() {
if let Ok(last_modified) = metadata.modified() {
let t: chrono::DateTime<Utc> = last_modified.into();
let t = t.with_timezone(&Local);
return t < timestamp;
}
}
false
}
fn read_dir_recursive(&self) -> io::Result<ReadDirRecursive> {
ReadDirRecursive::start(self)
}
}

106
src/cli.rs 100644
View File

@ -0,0 +1,106 @@
use crate::server::ServerType;
use clap::{Args, Parser, Subcommand};
use std::path::PathBuf;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
pub struct Cli {
#[command(subcommand)]
pub command: Commands,
/// Directory where configs are stored, and where the server will run
#[arg(
long,
value_name = "CONFIG_DIR",
default_value = ".",
env = "ALEX_CONFIG_DIR",
global = true
)]
pub config: PathBuf,
/// Directory where world files will be saved
#[arg(
long,
value_name = "WORLD_DIR",
default_value = "../worlds",
env = "ALEX_WORLD_DIR",
global = true
)]
pub world: PathBuf,
/// Directory where backups will be stored
#[arg(
long,
value_name = "BACKUP_DIR",
default_value = "../backups",
env = "ALEX_BACKUP_DIR",
global = true
)]
pub backup: PathBuf,
/// Length of a backup chain
#[arg(
short = 'l',
long,
default_value_t = 4,
env = "ALEX_CHAIN_LEN",
global = true
)]
pub chain_len: u64,
/// How many backup chains to keep
#[arg(
short = 'n',
long,
default_value_t = 7,
env = "ALEX_CHAINS",
global = true
)]
pub chains: u64,
/// Type of server
#[arg(long, default_value = "unknown", env = "ALEX_SERVER")]
pub server: ServerType,
/// Version string for the server, e.g. 1.19.4-545
#[arg(long, default_value = "", env = "ALEX_SERVER_VERSION")]
pub server_version: String,
}
#[derive(Subcommand)]
pub enum Commands {
/// Run the server
Run(RunArgs),
/// Create a new backup of the server. This command should only be used when the server is not
/// running.
Backup(BackupArgs),
}
#[derive(Args)]
pub struct RunArgs {
/// Server jar to execute
#[arg(
long,
value_name = "JAR_PATH",
default_value = "server.jar",
env = "ALEX_JAR"
)]
pub jar: PathBuf,
/// Java command to run the server jar with
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"), env = "ALEX_JAVA")]
pub java: String,
/// XMS value in megabytes for the server instance
#[arg(long, default_value_t = 1024, env = "ALEX_XMS")]
pub xms: u64,
/// XMX value in megabytes for the server instance
#[arg(long, default_value_t = 2048, env = "ALEX_XMX")]
pub xmx: u64,
/// How frequently to perform a backup, in minutes; 0 to disable.
#[arg(short = 't', long, default_value_t = 0, env = "ALEX_FREQUENCY")]
pub frequency: u64,
/// Don't actually run the server, but simply output the server configuration that would have
/// been ran
#[arg(short, long, default_value_t = false)]
pub dry: bool,
}
#[derive(Args)]
pub struct BackupArgs {}

View File

@ -1,78 +1,14 @@
mod backup;
mod cli;
mod server;
mod signals;
mod stdin;
use clap::Parser;
use server::ServerType;
use cli::{BackupArgs, Cli, Commands, RunArgs};
use std::io;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Cli {
/// Type of server
type_: ServerType,
/// Version string for the server, e.g. 1.19.4-545
#[arg(env = "ALEX_SERVER_VERSION")]
server_version: String,
/// Server jar to execute
#[arg(
long,
value_name = "JAR_PATH",
default_value = "server.jar",
env = "ALEX_JAR"
)]
jar: PathBuf,
/// Directory where configs are stored, and where the server will run
#[arg(
long,
value_name = "CONFIG_DIR",
default_value = ".",
env = "ALEX_CONFIG_DIR"
)]
config: PathBuf,
/// Directory where world files will be saved
#[arg(
long,
value_name = "WORLD_DIR",
default_value = "../worlds",
env = "ALEX_WORLD_DIR"
)]
world: PathBuf,
/// Directory where backups will be stored
#[arg(
long,
value_name = "BACKUP_DIR",
default_value = "../backups",
env = "ALEX_BACKUP_DIR"
)]
backup: PathBuf,
/// Java command to run the server jar with
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"), env = "ALEX_JAVA")]
java: String,
/// XMS value in megabytes for the server instance
#[arg(long, default_value_t = 1024, env = "ALEX_XMS")]
xms: u64,
/// XMX value in megabytes for the server instance
#[arg(long, default_value_t = 2048, env = "ALEX_XMX")]
xmx: u64,
/// How many backups to keep
#[arg(short = 'n', long, default_value_t = 7, env = "ALEX_MAX_BACKUPS")]
max_backups: u64,
/// How frequently to perform a backup, in minutes; 0 to disable.
#[arg(short = 't', long, default_value_t = 0, env = "ALEX_FREQUENCY")]
frequency: u64,
/// Don't actually run the server, but simply output the server configuration that would have
/// been ran
#[arg(short, long, default_value_t = false)]
dry: bool,
}
fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
loop {
std::thread::sleep(std::time::Duration::from_secs(frequency * 60));
@ -86,22 +22,22 @@ fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
}
}
fn main() -> io::Result<()> {
fn command_run(cli: &Cli, args: &RunArgs) -> io::Result<()> {
let (_, mut signals) = signals::install_signal_handlers()?;
let cli = Cli::parse();
let mut cmd = server::ServerCommand::new(cli.type_, &cli.server_version)
.java(&cli.java)
.jar(cli.jar)
.config(cli.config)
.world(cli.world)
.backup(cli.backup)
.xms(cli.xms)
.xmx(cli.xmx)
.max_backups(cli.max_backups);
let mut cmd = server::ServerCommand::new(cli.server, &cli.server_version)
.java(&args.java)
.jar(args.jar.clone())
.config(cli.config.clone())
.world(cli.world.clone())
.backup(cli.backup.clone())
.xms(args.xms)
.xmx(args.xmx)
.chain_len(cli.chain_len)
.chains_to_keep(cli.chains);
cmd.canonicalize()?;
if cli.dry {
if args.dry {
print!("{}", cmd);
return Ok(());
@ -109,9 +45,10 @@ fn main() -> io::Result<()> {
let counter = Arc::new(Mutex::new(cmd.spawn()?));
if cli.frequency > 0 {
if args.frequency > 0 {
let clone = Arc::clone(&counter);
std::thread::spawn(move || backups_thread(clone, cli.frequency));
let frequency = args.frequency;
std::thread::spawn(move || backups_thread(clone, frequency));
}
// Spawn thread that handles the main stdin loop
@ -121,3 +58,32 @@ fn main() -> io::Result<()> {
// Signal handler loop exits the process when necessary
signals::handle_signals(&mut signals, counter)
}
fn commands_backup(cli: &Cli, _args: &BackupArgs) -> io::Result<()> {
let metadata = server::Metadata {
server_type: cli.server,
server_version: cli.server_version.clone(),
};
let mut manager = backup::Manager::new(
cli.backup.clone(),
cli.config.clone(),
cli.world.clone(),
metadata,
cli.chain_len,
cli.chains,
);
manager.load()?;
manager.create_backup()?;
manager.remove_old_backups()
}
fn main() -> io::Result<()> {
let cli = Cli::parse();
match &cli.command {
Commands::Run(args) => command_run(&cli, args),
Commands::Backup(args) => commands_backup(&cli, args),
}
}

View File

@ -1,13 +1,16 @@
use crate::backup::Manager as BackupManager;
use crate::server::ServerProcess;
use clap::ValueEnum;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum, Serialize, Deserialize)]
pub enum ServerType {
Unknown,
Paper,
Forge,
Vanilla,
@ -16,6 +19,7 @@ pub enum ServerType {
impl fmt::Display for ServerType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
ServerType::Unknown => "Unknown",
ServerType::Paper => "PaperMC",
ServerType::Forge => "Forge",
ServerType::Vanilla => "Vanilla",
@ -25,6 +29,12 @@ impl fmt::Display for ServerType {
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Metadata {
pub server_type: ServerType,
pub server_version: String,
}
pub struct ServerCommand {
type_: ServerType,
version: String,
@ -35,7 +45,8 @@ pub struct ServerCommand {
backup_dir: PathBuf,
xms: u64,
xmx: u64,
max_backups: u64,
chain_len: u64,
chains_to_keep: u64,
}
impl ServerCommand {
@ -50,7 +61,8 @@ impl ServerCommand {
backup_dir: PathBuf::from("backups"),
xms: 1024,
xmx: 2048,
max_backups: 7,
chain_len: 4,
chains_to_keep: 7,
}
}
@ -91,8 +103,13 @@ impl ServerCommand {
self
}
pub fn max_backups(mut self, v: u64) -> Self {
self.max_backups = v;
pub fn chain_len(mut self, v: u64) -> Self {
self.chain_len = v;
self
}
pub fn chains_to_keep(mut self, v: u64) -> Self {
self.chains_to_keep = v;
self
}
@ -179,19 +196,25 @@ impl ServerCommand {
}
pub fn spawn(&mut self) -> std::io::Result<ServerProcess> {
let metadata = Metadata {
server_type: self.type_,
server_version: self.version.clone(),
};
let mut manager = BackupManager::new(
self.backup_dir.clone(),
self.config_dir.clone(),
self.world_dir.clone(),
metadata,
self.chain_len,
self.chains_to_keep,
);
manager.load()?;
let mut cmd = self.create_cmd();
self.accept_eula()?;
let child = cmd.spawn()?;
Ok(ServerProcess::new(
self.type_,
self.version.clone(),
self.config_dir.clone(),
self.world_dir.clone(),
self.backup_dir.clone(),
self.max_backups,
child,
))
Ok(ServerProcess::new(manager, child))
}
}

View File

@ -1,5 +1,5 @@
mod command;
mod process;
pub use command::{ServerCommand, ServerType};
pub use command::{Metadata, ServerCommand, ServerType};
pub use process::ServerProcess;

View File

@ -1,44 +1,18 @@
use crate::server::ServerType;
use flate2::write::GzEncoder;
use flate2::Compression;
use crate::backup::Manager as BackupManager;
use crate::server::Metadata;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::Child;
#[link(name = "c")]
extern "C" {
fn geteuid() -> u32;
fn getegid() -> u32;
}
pub struct ServerProcess {
type_: ServerType,
version: String,
config_dir: PathBuf,
world_dir: PathBuf,
backup_dir: PathBuf,
max_backups: u64,
child: Child,
backups: BackupManager<Metadata>,
}
impl ServerProcess {
pub fn new(
type_: ServerType,
version: String,
config_dir: PathBuf,
world_dir: PathBuf,
backup_dir: PathBuf,
max_backups: u64,
child: Child,
) -> ServerProcess {
pub fn new(manager: BackupManager<Metadata>, child: Child) -> ServerProcess {
ServerProcess {
type_,
version,
config_dir,
world_dir,
backup_dir,
max_backups,
child,
backups: manager,
}
}
@ -84,94 +58,33 @@ impl ServerProcess {
// We wait some time to (hopefully) ensure the save-all call has completed
std::thread::sleep(std::time::Duration::from_secs(10));
let res = self.create_backup_archive();
if res.is_ok() {
self.remove_old_backups()?;
}
let start_time = chrono::offset::Utc::now();
let res = self.backups.create_backup();
// The server's save feature needs to be enabled again even if the archive failed to create
self.custom("save-on")?;
self.custom("save-all")?;
if res.is_ok() {
self.custom("say server backed up successfully")?;
self.backups.remove_old_backups()?;
}
let duration = chrono::offset::Utc::now() - start_time;
let duration_str = format!(
"{}m{}s",
duration.num_seconds() / 60,
duration.num_seconds() % 60
);
if res.is_ok() {
self.custom(&format!("say server backed up in {}", duration_str))?;
} else {
self.custom("an error occured while backing up the server")?;
self.custom(&format!(
"an error occured after {} while backing up the server",
duration_str
))?;
}
res
}
/// Create a new compressed backup archive of the server's data.
fn create_backup_archive(&mut self) -> std::io::Result<()> {
// Create a gzip-compressed tarball of the worlds folder
let filename = format!(
"{}",
chrono::offset::Local::now().format("%Y-%m-%d_%H-%M-%S.tar.gz")
);
let path = self.backup_dir.join(filename);
let tar_gz = std::fs::File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut tar = tar::Builder::new(enc);
tar.append_dir_all("worlds", &self.world_dir)?;
// Add all files from the config directory that aren't the cache
for entry in self
.config_dir
.read_dir()?
.filter_map(|e| e.ok())
.filter(|e| e.file_name() != "cache")
{
let tar_path = Path::new("config").join(entry.file_name());
if entry.file_type()?.is_dir() {
tar.append_dir_all(tar_path, entry.path())?;
} else {
tar.append_path_with_name(entry.path(), tar_path)?;
}
}
// We add a file to the backup describing for what version it was made
let info = format!("{} {}", self.type_, self.version);
let info_bytes = info.as_bytes();
let mut header = tar::Header::new_gnu();
header.set_size(info_bytes.len().try_into().unwrap());
header.set_mode(0o100644);
unsafe {
header.set_gid(getegid().into());
header.set_uid(geteuid().into());
}
tar.append_data(&mut header, "info.txt", info_bytes)?;
// Backup file gets finalized in the drop
Ok(())
}
/// Remove the oldest backups
fn remove_old_backups(&mut self) -> std::io::Result<()> {
// The naming format used allows us to sort the backups by name and still get a sorting by
// creation time
let mut backups = self
.backup_dir
.read_dir()?
.filter_map(|res| res.map(|e| e.path()).ok())
.collect::<Vec<PathBuf>>();
backups.sort();
let max_backups: usize = self.max_backups.try_into().unwrap();
if backups.len() > max_backups {
let excess_backups = backups.len() - max_backups;
for backup in &backups[0..excess_backups] {
std::fs::remove_file(backup)?;
}
}
Ok(())
}
}