Compare commits
5 Commits
d204c68400
...
42c7a7cc5b
| Author | SHA1 | Date |
|---|---|---|
|
|
42c7a7cc5b | |
|
|
b1c0bbb3af | |
|
|
3e0324703d | |
|
|
90033aa91e | |
|
|
19d255b98c |
|
|
@ -1,3 +1,3 @@
|
||||||
[alias]
|
[alias]
|
||||||
runs = "run -- run paper 1.19.4-550 --config data/config --backup data/backups --world data/worlds --jar paper-1.19.4-550.jar"
|
runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper-1.19.4-525.jar"
|
||||||
runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper-1.19.4-525.jar"
|
runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper-1.19.4-525.jar"
|
||||||
|
|
|
||||||
22
CHANGELOG.md
22
CHANGELOG.md
|
|
@ -7,28 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||||
|
|
||||||
## [Unreleased](https://git.rustybever.be/Chewing_Bever/alex/src/branch/dev)
|
## [Unreleased](https://git.rustybever.be/Chewing_Bever/alex/src/branch/dev)
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
* Running the server now uses the `run` CLI subcommand
|
|
||||||
|
|
||||||
## [0.2.2](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.2)
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
* Use correct env var for backup directory
|
|
||||||
|
|
||||||
## [0.2.1](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.1)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
* `--dry` flag to inspect command that will be run
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
* JVM flags now narrowely follow Aikar's specifications
|
|
||||||
|
|
||||||
## [0.2.0](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.0)
|
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
* Rudimentary signal handling for gently stopping server
|
* Rudimentary signal handling for gently stopping server
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "alex"
|
name = "alex"
|
||||||
version = "0.2.2"
|
version = "0.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap",
|
"clap",
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "alex"
|
name = "alex"
|
||||||
version = "0.2.2"
|
version = "0.2.0"
|
||||||
description = "Wrapper around Minecraft server processes, designed to complement Docker image installations."
|
description = "Wrapper around Minecraft server processes, designed to complement Docker image installations."
|
||||||
authors = ["Jef Roosens"]
|
authors = ["Jef Roosens"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
|
||||||
|
|
@ -18,11 +18,11 @@ RUN cargo build && \
|
||||||
|
|
||||||
# We use ${:-} instead of a default value because the argument is always passed
|
# We use ${:-} instead of a default value because the argument is always passed
|
||||||
# to the build, it'll just be blank most likely
|
# to the build, it'll just be blank most likely
|
||||||
FROM eclipse-temurin:18-jre-alpine
|
FROM eclipse-temurin:17-jre-alpine
|
||||||
|
|
||||||
# Build arguments
|
# Build arguments
|
||||||
ARG MC_VERSION=1.19.4
|
ARG MC_VERSION=1.19.4
|
||||||
ARG PAPERMC_VERSION=525
|
ARG PAPERMC_VERSION=545
|
||||||
|
|
||||||
RUN addgroup -Sg 1000 paper && \
|
RUN addgroup -Sg 1000 paper && \
|
||||||
adduser -SHG paper -u 1000 paper
|
adduser -SHG paper -u 1000 paper
|
||||||
|
|
@ -61,5 +61,4 @@ EXPOSE 25565
|
||||||
# Switch to non-root user
|
# Switch to non-root user
|
||||||
USER paper:paper
|
USER paper:paper
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/dumb-init", "--"]
|
ENTRYPOINT ["/bin/alex", "paper"]
|
||||||
CMD ["/bin/alex", "paper"]
|
|
||||||
|
|
|
||||||
27
README.md
27
README.md
|
|
@ -1,26 +1,3 @@
|
||||||
# Alex
|
# mc-wrapper
|
||||||
|
|
||||||
Alex is a wrapper around a typical Minecraft server process. It acts as the
|
A wrapper around a standard Minecraft server, written in Rust.
|
||||||
parent process, and sits in between the user's input and the server's stdin.
|
|
||||||
This allows Alex to support additional commands that execute Rust code.
|
|
||||||
|
|
||||||
## Why
|
|
||||||
|
|
||||||
The primary usecase for this is backups. A common problem I've had with
|
|
||||||
Minecraft backups is that they fail, because the server is writing to one of
|
|
||||||
the region files as the backup is being created. Alex solves this be sending
|
|
||||||
`save-off` and `save-all` to the server, before creating the tarball.
|
|
||||||
Afterwards, saving is enabled again with `save-on`.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
* Create safe backups as gzip-compressed tarballs using the `backup` command
|
|
||||||
* Automatically create backups periodically
|
|
||||||
* Properly configures the process (working directory, optimisation flags)
|
|
||||||
* Configure everything as CLI arguments or environment variables
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
Alex is distributed as statically compiled binaries for Linux amd64 and arm64.
|
|
||||||
These can be found
|
|
||||||
[here](https://git.rustybever.be/Chewing_Bever/alex/packages).
|
|
||||||
91
src/cli.rs
91
src/cli.rs
|
|
@ -1,91 +0,0 @@
|
||||||
use crate::server::ServerType;
|
|
||||||
use clap::{Args, Parser, Subcommand};
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[command(author, version, about, long_about = None)]
|
|
||||||
pub struct Cli {
|
|
||||||
#[command(subcommand)]
|
|
||||||
pub command: Commands,
|
|
||||||
/// Directory where configs are stored, and where the server will run
|
|
||||||
#[arg(
|
|
||||||
long,
|
|
||||||
value_name = "CONFIG_DIR",
|
|
||||||
default_value = ".",
|
|
||||||
env = "ALEX_CONFIG_DIR",
|
|
||||||
global = true
|
|
||||||
)]
|
|
||||||
pub config: PathBuf,
|
|
||||||
/// Directory where world files will be saved
|
|
||||||
#[arg(
|
|
||||||
long,
|
|
||||||
value_name = "WORLD_DIR",
|
|
||||||
default_value = "../worlds",
|
|
||||||
env = "ALEX_WORLD_DIR",
|
|
||||||
global = true
|
|
||||||
)]
|
|
||||||
pub world: PathBuf,
|
|
||||||
/// Directory where backups will be stored
|
|
||||||
#[arg(
|
|
||||||
long,
|
|
||||||
value_name = "BACKUP_DIR",
|
|
||||||
default_value = "../backups",
|
|
||||||
env = "ALEX_BACKUP_DIR",
|
|
||||||
global = true
|
|
||||||
)]
|
|
||||||
pub backup: PathBuf,
|
|
||||||
|
|
||||||
/// How many backups to keep
|
|
||||||
#[arg(
|
|
||||||
short = 'n',
|
|
||||||
long,
|
|
||||||
default_value_t = 7,
|
|
||||||
env = "ALEX_MAX_BACKUPS",
|
|
||||||
global = true
|
|
||||||
)]
|
|
||||||
pub max_backups: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
pub enum Commands {
|
|
||||||
/// Run the server
|
|
||||||
Run(RunArgs),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Args)]
|
|
||||||
pub struct RunArgs {
|
|
||||||
/// Type of server
|
|
||||||
pub type_: ServerType,
|
|
||||||
/// Version string for the server, e.g. 1.19.4-545
|
|
||||||
#[arg(env = "ALEX_SERVER_VERSION")]
|
|
||||||
pub server_version: String,
|
|
||||||
|
|
||||||
/// Server jar to execute
|
|
||||||
#[arg(
|
|
||||||
long,
|
|
||||||
value_name = "JAR_PATH",
|
|
||||||
default_value = "server.jar",
|
|
||||||
env = "ALEX_JAR"
|
|
||||||
)]
|
|
||||||
pub jar: PathBuf,
|
|
||||||
|
|
||||||
/// Java command to run the server jar with
|
|
||||||
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"), env = "ALEX_JAVA")]
|
|
||||||
pub java: String,
|
|
||||||
|
|
||||||
/// XMS value in megabytes for the server instance
|
|
||||||
#[arg(long, default_value_t = 1024, env = "ALEX_XMS")]
|
|
||||||
pub xms: u64,
|
|
||||||
/// XMX value in megabytes for the server instance
|
|
||||||
#[arg(long, default_value_t = 2048, env = "ALEX_XMX")]
|
|
||||||
pub xmx: u64,
|
|
||||||
|
|
||||||
/// How frequently to perform a backup, in minutes; 0 to disable.
|
|
||||||
#[arg(short = 't', long, default_value_t = 0, env = "ALEX_FREQUENCY")]
|
|
||||||
pub frequency: u64,
|
|
||||||
|
|
||||||
/// Don't actually run the server, but simply output the server configuration that would have
|
|
||||||
/// been ran
|
|
||||||
#[arg(short, long, default_value_t = false)]
|
|
||||||
pub dry: bool,
|
|
||||||
}
|
|
||||||
104
src/main.rs
104
src/main.rs
|
|
@ -1,13 +1,73 @@
|
||||||
mod cli;
|
|
||||||
mod server;
|
mod server;
|
||||||
mod signals;
|
mod signals;
|
||||||
mod stdin;
|
mod stdin;
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use cli::{Cli, Commands, RunArgs};
|
use server::ServerType;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
use std::path::PathBuf;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(author, version, about, long_about = None)]
|
||||||
|
struct Cli {
|
||||||
|
/// Type of server
|
||||||
|
type_: ServerType,
|
||||||
|
/// Version string for the server, e.g. 1.19.4-545
|
||||||
|
#[arg(env = "ALEX_SERVER_VERSION")]
|
||||||
|
server_version: String,
|
||||||
|
|
||||||
|
/// Server jar to execute
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
value_name = "JAR_PATH",
|
||||||
|
default_value = "server.jar",
|
||||||
|
env = "ALEX_JAR"
|
||||||
|
)]
|
||||||
|
jar: PathBuf,
|
||||||
|
/// Directory where configs are stored, and where the server will run
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
value_name = "CONFIG_DIR",
|
||||||
|
default_value = ".",
|
||||||
|
env = "ALEX_CONFIG_DIR"
|
||||||
|
)]
|
||||||
|
config: PathBuf,
|
||||||
|
/// Directory where world files will be saved
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
value_name = "WORLD_DIR",
|
||||||
|
default_value = "../worlds",
|
||||||
|
env = "ALEX_WORLD_DIR"
|
||||||
|
)]
|
||||||
|
world: PathBuf,
|
||||||
|
/// Directory where backups will be stored
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
value_name = "BACKUP_DIR",
|
||||||
|
default_value = "../backups",
|
||||||
|
env = "ALEX_WORLD_DIR"
|
||||||
|
)]
|
||||||
|
backup: PathBuf,
|
||||||
|
/// Java command to run the server jar with
|
||||||
|
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"), env = "ALEX_JAVA")]
|
||||||
|
java: String,
|
||||||
|
|
||||||
|
/// XMS value in megabytes for the server instance
|
||||||
|
#[arg(long, default_value_t = 1024, env = "ALEX_XMS")]
|
||||||
|
xms: u64,
|
||||||
|
/// XMX value in megabytes for the server instance
|
||||||
|
#[arg(long, default_value_t = 2048, env = "ALEX_XMX")]
|
||||||
|
xmx: u64,
|
||||||
|
|
||||||
|
/// How many backups to keep
|
||||||
|
#[arg(short = 'n', long, default_value_t = 7, env = "ALEX_MAX_BACKUPS")]
|
||||||
|
max_backups: u64,
|
||||||
|
/// How frequently to perform a backup, in minutes; 0 to disable.
|
||||||
|
#[arg(short = 't', long, default_value_t = 0, env = "ALEX_FREQUENCY")]
|
||||||
|
frequency: u64,
|
||||||
|
}
|
||||||
|
|
||||||
fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
|
fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
|
||||||
loop {
|
loop {
|
||||||
std::thread::sleep(std::time::Duration::from_secs(frequency * 60));
|
std::thread::sleep(std::time::Duration::from_secs(frequency * 60));
|
||||||
|
|
@ -21,32 +81,24 @@ fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn command_run(cli: &Cli, args: &RunArgs) -> io::Result<()> {
|
fn main() -> io::Result<()> {
|
||||||
let (_, mut signals) = signals::install_signal_handlers()?;
|
let (_, mut signals) = signals::install_signal_handlers()?;
|
||||||
|
let cli = Cli::parse();
|
||||||
|
|
||||||
let mut cmd = server::ServerCommand::new(args.type_, &args.server_version)
|
let cmd = server::ServerCommand::new(cli.type_, &cli.server_version)
|
||||||
.java(&args.java)
|
.java(&cli.java)
|
||||||
.jar(args.jar.clone())
|
.jar(cli.jar)
|
||||||
.config(cli.config.clone())
|
.config(cli.config)
|
||||||
.world(cli.world.clone())
|
.world(cli.world)
|
||||||
.backup(cli.backup.clone())
|
.backup(cli.backup)
|
||||||
.xms(args.xms)
|
.xms(cli.xms)
|
||||||
.xmx(args.xmx)
|
.xmx(cli.xmx)
|
||||||
.max_backups(cli.max_backups);
|
.max_backups(cli.max_backups);
|
||||||
cmd.canonicalize()?;
|
|
||||||
|
|
||||||
if args.dry {
|
|
||||||
print!("{}", cmd);
|
|
||||||
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let counter = Arc::new(Mutex::new(cmd.spawn()?));
|
let counter = Arc::new(Mutex::new(cmd.spawn()?));
|
||||||
|
|
||||||
if args.frequency > 0 {
|
if cli.frequency > 0 {
|
||||||
let clone = Arc::clone(&counter);
|
let clone = Arc::clone(&counter);
|
||||||
let frequency = args.frequency;
|
std::thread::spawn(move || backups_thread(clone, cli.frequency));
|
||||||
std::thread::spawn(move || backups_thread(clone, frequency));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spawn thread that handles the main stdin loop
|
// Spawn thread that handles the main stdin loop
|
||||||
|
|
@ -56,11 +108,3 @@ fn command_run(cli: &Cli, args: &RunArgs) -> io::Result<()> {
|
||||||
// Signal handler loop exits the process when necessary
|
// Signal handler loop exits the process when necessary
|
||||||
signals::handle_signals(&mut signals, counter)
|
signals::handle_signals(&mut signals, counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> io::Result<()> {
|
|
||||||
let cli = Cli::parse();
|
|
||||||
|
|
||||||
match &cli.command {
|
|
||||||
Commands::Run(args) => command_run(&cli, args),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,10 @@
|
||||||
use chrono::{Local, Utc};
|
|
||||||
use flate2::write::GzEncoder;
|
use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use std::collections::{HashMap, HashSet};
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use chrono::{Utc, Local};
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
#[link(name = "c")]
|
#[link(name = "c")]
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
|
@ -13,272 +12,17 @@ extern "C" {
|
||||||
fn getegid() -> u32;
|
fn getegid() -> u32;
|
||||||
}
|
}
|
||||||
|
|
||||||
const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
|
static FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
|
||||||
|
|
||||||
fn files(src_dir: PathBuf) -> io::Result<HashSet<PathBuf>> {
|
|
||||||
let mut dirs = vec![src_dir.clone()];
|
|
||||||
let mut files: HashSet<PathBuf> = HashSet::new();
|
|
||||||
|
|
||||||
while let Some(dir) = dirs.pop() {
|
|
||||||
for res in dir.read_dir()? {
|
|
||||||
let entry = res?;
|
|
||||||
|
|
||||||
if entry.file_name() == "cache" {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if entry.file_type()?.is_dir() {
|
|
||||||
dirs.push(entry.path());
|
|
||||||
} else {
|
|
||||||
files.insert(entry.path().strip_prefix(&src_dir).unwrap().to_path_buf());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(files)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check whether a file has been modified since the given timestamp.
|
|
||||||
///
|
|
||||||
/// Note that this function will *only* return true if it can determine with certainty that the
|
|
||||||
/// file has not been modified. If any errors occur while obtaining the required metadata (e.g. if
|
|
||||||
/// the file system does not support this metadata), this function will return false.
|
|
||||||
fn not_modified_since<T: AsRef<Path>>(time: chrono::DateTime<Utc>, path: T) -> bool {
|
|
||||||
let path = path.as_ref();
|
|
||||||
|
|
||||||
if let Ok(metadata) = path.metadata() {
|
|
||||||
let last_modified = metadata.modified();
|
|
||||||
|
|
||||||
if let Ok(last_modified) = last_modified {
|
|
||||||
let t: chrono::DateTime<Utc> = last_modified.into();
|
|
||||||
let t = t.with_timezone(&Local);
|
|
||||||
|
|
||||||
return t < time;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum BackupType {
|
|
||||||
Full,
|
|
||||||
Incremental,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum BackupError {
|
|
||||||
NoFullAncestor,
|
|
||||||
}
|
|
||||||
|
|
||||||
type BackupResult<T> = Result<T, BackupError>;
|
|
||||||
|
|
||||||
/// Represents the changes relative to the previous backup
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct BackupDelta {
|
|
||||||
/// What files were added/modified in each part of the tarball.
|
|
||||||
pub added: HashMap<PathBuf, HashSet<PathBuf>>,
|
|
||||||
/// What files were removed in this backup, in comparison to the previous backup. For full
|
|
||||||
/// backups, this will always be empty, as they do not consider previous backups.
|
|
||||||
/// The map stores a separate list for each top-level directory, as the contents of these
|
|
||||||
/// directories can come for different source directories.
|
|
||||||
pub removed: HashMap<PathBuf, HashSet<PathBuf>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BackupDelta {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
BackupDelta {
|
|
||||||
added: HashMap::new(),
|
|
||||||
removed: HashMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update the current state so that its result becomes the merge of itself and the other
|
|
||||||
/// state.
|
|
||||||
pub fn merge(&mut self, delta: &BackupDelta) {
|
|
||||||
for (dir, added) in delta.added.iter() {
|
|
||||||
// Files that were removed in the current state, but added in the new state, are no
|
|
||||||
// longer removed
|
|
||||||
if let Some(orig_removed) = self.removed.get_mut(dir) {
|
|
||||||
orig_removed.retain(|k| !added.contains(k));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Newly added files are added to the state as well
|
|
||||||
if let Some(orig_added) = self.added.get_mut(dir) {
|
|
||||||
orig_added.extend(added.iter().cloned());
|
|
||||||
} else {
|
|
||||||
self.added.insert(dir.clone(), added.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (dir, removed) in delta.removed.iter() {
|
|
||||||
// Files that were originally added, but now deleted are removed from the added list
|
|
||||||
if let Some(orig_added) = self.added.get_mut(dir) {
|
|
||||||
orig_added.retain(|k| !removed.contains(k));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Newly removed files are added to the state as well
|
|
||||||
if let Some(orig_removed) = self.removed.get_mut(dir) {
|
|
||||||
orig_removed.extend(removed.iter().cloned());
|
|
||||||
} else {
|
|
||||||
self.removed.insert(dir.clone(), removed.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Modify the given state by applying this delta's changes to it
|
|
||||||
pub fn apply(&self, state: &mut HashMap<PathBuf, HashSet<PathBuf>>) {
|
|
||||||
// First we add new files, then we remove the old ones
|
|
||||||
for (dir, added) in self.added.iter() {
|
|
||||||
if let Some(current) = state.get_mut(dir) {
|
|
||||||
current.extend(added.iter().cloned());
|
|
||||||
} else {
|
|
||||||
state.insert(dir.clone(), added.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (dir, removed) in self.removed.iter() {
|
|
||||||
if let Some(current) = state.get_mut(dir) {
|
|
||||||
current.retain(|k| !removed.contains(k));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a successful backup
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Backup {
|
|
||||||
previous: Option<Arc<Backup>>,
|
|
||||||
/// When the backup was started (also corresponds to the name)
|
|
||||||
start_time: chrono::DateTime<Utc>,
|
|
||||||
/// Type of the backup
|
|
||||||
type_: BackupType,
|
|
||||||
delta: BackupDelta,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Backup {
|
|
||||||
/// Calculate the full state of the backup by applying all its ancestors delta's in order,
|
|
||||||
/// starting from the last full ancestor.
|
|
||||||
pub fn state(&self) -> BackupResult<HashMap<PathBuf, HashSet<PathBuf>>> {
|
|
||||||
if self.type_ == BackupType::Full {
|
|
||||||
let mut state = HashMap::new();
|
|
||||||
self.delta.apply(&mut state);
|
|
||||||
|
|
||||||
Ok(state)
|
|
||||||
} else if let Some(previous) = &self.previous {
|
|
||||||
let mut state = previous.state()?;
|
|
||||||
self.delta.apply(&mut state);
|
|
||||||
|
|
||||||
Ok(state)
|
|
||||||
} else {
|
|
||||||
return Err(BackupError::NoFullAncestor);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// Create a new Full backup, populated with the given directories.
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
///
|
|
||||||
/// * `backup_dir` - Directory to store archive in
|
|
||||||
/// * `dirs` - list of tuples `(path_in_tar, src_dir)` with `path_in_tar` the directory name
|
|
||||||
/// under which `src_dir`'s contents should be stored in the archive
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
///
|
|
||||||
/// The `Backup` instance describing this new backup.
|
|
||||||
pub fn create<P: AsRef<Path>>(
|
|
||||||
backup_dir: P,
|
|
||||||
dirs: Vec<(PathBuf, PathBuf)>,
|
|
||||||
) -> io::Result<Self> {
|
|
||||||
let backup_dir = backup_dir.as_ref();
|
|
||||||
let start_time = chrono::offset::Utc::now();
|
|
||||||
|
|
||||||
let filename = format!("{}", start_time.format(FILENAME_FORMAT));
|
|
||||||
let path = backup_dir.join(filename);
|
|
||||||
let tar_gz = File::create(path)?;
|
|
||||||
let enc = GzEncoder::new(tar_gz, Compression::default());
|
|
||||||
let mut ar = tar::Builder::new(enc);
|
|
||||||
|
|
||||||
let mut added: HashMap<PathBuf, HashSet<PathBuf>> = HashMap::new();
|
|
||||||
|
|
||||||
for (dir_in_tar, src_dir) in dirs {
|
|
||||||
let files = files(src_dir.clone())?;
|
|
||||||
|
|
||||||
for path in &files {
|
|
||||||
ar.append_path_with_name(src_dir.join(path), dir_in_tar.join(path))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
added.insert(dir_in_tar, files);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Backup {
|
|
||||||
previous: None,
|
|
||||||
type_: BackupType::Full,
|
|
||||||
start_time,
|
|
||||||
delta: BackupDelta {
|
|
||||||
added,
|
|
||||||
removed: HashMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new incremental backup from a given previous backup
|
|
||||||
pub fn create_from<P: AsRef<Path>>(
|
|
||||||
previous: Arc<Backup>,
|
|
||||||
backup_dir: P,
|
|
||||||
dirs: Vec<(PathBuf, PathBuf)>,
|
|
||||||
) -> io::Result<Self> {
|
|
||||||
let backup_dir = backup_dir.as_ref();
|
|
||||||
let start_time = chrono::offset::Utc::now();
|
|
||||||
|
|
||||||
let filename = format!("{}", start_time.format(FILENAME_FORMAT));
|
|
||||||
let path = backup_dir.join(filename);
|
|
||||||
let tar_gz = File::create(path)?;
|
|
||||||
let enc = GzEncoder::new(tar_gz, Compression::default());
|
|
||||||
let mut ar = tar::Builder::new(enc);
|
|
||||||
|
|
||||||
// TODO remove unwrap
|
|
||||||
let previous_state = previous.state().unwrap();
|
|
||||||
let mut delta = BackupDelta::new();
|
|
||||||
|
|
||||||
for (dir_in_tar, src_dir) in dirs {
|
|
||||||
let files = files(src_dir.clone())?;
|
|
||||||
let added_files = files
|
|
||||||
.iter()
|
|
||||||
// This explicit negation is because we wish to also include files for which we
|
|
||||||
// couldn't determine the last modified time
|
|
||||||
.filter(|p| !not_modified_since(previous.start_time, src_dir.join(p)))
|
|
||||||
.cloned()
|
|
||||||
.collect::<HashSet<PathBuf>>();
|
|
||||||
|
|
||||||
for path in added_files.iter() {
|
|
||||||
ar.append_path_with_name(src_dir.join(path), dir_in_tar.join(path))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
delta.added.insert(dir_in_tar.clone(), added_files);
|
|
||||||
|
|
||||||
if let Some(previous_files) = previous_state.get(&dir_in_tar) {
|
|
||||||
delta.removed.insert(
|
|
||||||
dir_in_tar,
|
|
||||||
previous_files.difference(&files).cloned().collect(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Backup {
|
|
||||||
previous: Some(previous),
|
|
||||||
type_: BackupType::Incremental,
|
|
||||||
start_time,
|
|
||||||
delta,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct BackupManager {
|
pub struct BackupManager {
|
||||||
backup_dir: PathBuf,
|
backup_dir: PathBuf,
|
||||||
config_dir: PathBuf,
|
config_dir: PathBuf,
|
||||||
world_dir: PathBuf,
|
world_dir: PathBuf,
|
||||||
max_backups: u64,
|
max_backups: u64,
|
||||||
last_backup: Option<Arc<Backup>>,
|
/// Start time of the last successful backup
|
||||||
|
last_start_time: Option<chrono::DateTime<chrono::Utc>>,
|
||||||
|
/// Files contained in the last successful backup
|
||||||
|
last_files: HashSet<(PathBuf, PathBuf)>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BackupManager {
|
impl BackupManager {
|
||||||
|
|
@ -293,23 +37,96 @@ impl BackupManager {
|
||||||
config_dir,
|
config_dir,
|
||||||
world_dir,
|
world_dir,
|
||||||
max_backups,
|
max_backups,
|
||||||
last_backup: None,
|
last_start_time: None,
|
||||||
|
last_files: HashSet::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_backup(&mut self) -> io::Result<()> {
|
fn files_to_backup(&mut self) -> io::Result<HashSet<(PathBuf, PathBuf)>> {
|
||||||
let dirs = vec![
|
let mut dirs = vec![
|
||||||
(PathBuf::from("config"), self.config_dir.clone()),
|
|
||||||
(PathBuf::from("worlds"), self.world_dir.clone()),
|
(PathBuf::from("worlds"), self.world_dir.clone()),
|
||||||
|
(PathBuf::from("config"), self.config_dir.clone()),
|
||||||
];
|
];
|
||||||
|
let mut files: HashSet<(PathBuf, PathBuf)> = HashSet::new();
|
||||||
|
|
||||||
let backup = if let Some(last_backup) = &self.last_backup {
|
while let Some((path_in_tar, path)) = dirs.pop() {
|
||||||
Backup::create_from(Arc::clone(last_backup), &self.backup_dir, dirs)?
|
for res in path.read_dir()? {
|
||||||
} else {
|
let entry = res?;
|
||||||
Backup::create(&self.backup_dir, dirs)?
|
|
||||||
};
|
|
||||||
|
|
||||||
self.last_backup = Some(Arc::new(backup));
|
if entry.file_name() == "cache" {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let new_path_in_tar = path_in_tar.join(entry.file_name());
|
||||||
|
|
||||||
|
// All dirs get expanded recursively, while all files get returned as output
|
||||||
|
// NOTE: does this remove empty directories from backups? Is this a problem?
|
||||||
|
if entry.file_type()?.is_dir() {
|
||||||
|
dirs.push((new_path_in_tar, entry.path()));
|
||||||
|
} else {
|
||||||
|
// Only add files that have been updated since the last backup (incremental backup)
|
||||||
|
if let Some(last_start_time) = self.last_start_time {
|
||||||
|
let last_modified = entry.path().metadata()?.modified();
|
||||||
|
|
||||||
|
if let Ok(last_modified) = last_modified {
|
||||||
|
let t: chrono::DateTime<Utc> = last_modified.into();
|
||||||
|
let t = t.with_timezone(&Local);
|
||||||
|
|
||||||
|
if t < last_start_time {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
files.insert((new_path_in_tar, entry.path()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(files)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_archive(&mut self) -> io::Result<()> {
|
||||||
|
let start_time = chrono::offset::Utc::now();
|
||||||
|
|
||||||
|
let filename = format!("{}", start_time.format(FILENAME_FORMAT));
|
||||||
|
let path = self.backup_dir.join(filename);
|
||||||
|
let tar_gz = File::create(path)?;
|
||||||
|
let enc = GzEncoder::new(tar_gz, Compression::default());
|
||||||
|
let mut ar = tar::Builder::new(enc);
|
||||||
|
|
||||||
|
let files = self.files_to_backup()?;
|
||||||
|
|
||||||
|
for (path_in_tar, path) in &files {
|
||||||
|
ar.append_path_with_name(path, path_in_tar)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let deleted_files = self.last_files.difference(&files);
|
||||||
|
|
||||||
|
println!("{} {}", files.len(), self.last_files.len());
|
||||||
|
|
||||||
|
for (path_in_tar, path) in deleted_files {
|
||||||
|
println!("{path_in_tar:?}: {path:?}");
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO re-add this info file in some way
|
||||||
|
// We add a file to the backup describing for what version it was made
|
||||||
|
// let info = format!("{} {}", self.type_, self.version);
|
||||||
|
// let info_bytes = info.as_bytes();
|
||||||
|
|
||||||
|
// let mut header = tar::Header::new_gnu();
|
||||||
|
// header.set_size(info_bytes.len().try_into().unwrap());
|
||||||
|
// header.set_mode(0o100644);
|
||||||
|
// unsafe {
|
||||||
|
// header.set_gid(getegid().into());
|
||||||
|
// header.set_uid(geteuid().into());
|
||||||
|
// }
|
||||||
|
|
||||||
|
// tar.append_data(&mut header, "info.txt", info_bytes)?;
|
||||||
|
|
||||||
|
// After a successful backup, we store the original metadata
|
||||||
|
self.last_start_time = Some(start_time);
|
||||||
|
self.last_files = files;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -105,19 +105,14 @@ impl ServerCommand {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Canonicalize all paths to absolute paths. Without this command, all paths will be
|
pub fn spawn(self) -> std::io::Result<ServerProcess> {
|
||||||
/// interpreted relatively from the config directory.
|
|
||||||
pub fn canonicalize(&mut self) -> std::io::Result<()> {
|
|
||||||
// To avoid any issues, we use absolute paths for everything when spawning the process
|
// To avoid any issues, we use absolute paths for everything when spawning the process
|
||||||
self.jar = self.jar.canonicalize()?;
|
let jar = self.jar.canonicalize()?;
|
||||||
self.config_dir = self.config_dir.canonicalize()?;
|
let config_dir = self.config_dir.canonicalize()?;
|
||||||
self.world_dir = self.world_dir.canonicalize()?;
|
let world_dir = self.world_dir.canonicalize()?;
|
||||||
self.backup_dir = self.backup_dir.canonicalize()?;
|
let backup_dir = self.backup_dir.canonicalize()?;
|
||||||
|
|
||||||
Ok(())
|
self.accept_eula()?;
|
||||||
}
|
|
||||||
|
|
||||||
fn create_cmd(&self) -> std::process::Command {
|
|
||||||
let mut cmd = Command::new(&self.java);
|
let mut cmd = Command::new(&self.java);
|
||||||
|
|
||||||
// Apply JVM optimisation flags
|
// Apply JVM optimisation flags
|
||||||
|
|
@ -131,6 +126,15 @@ impl ServerCommand {
|
||||||
"-XX:+UnlockExperimentalVMOptions",
|
"-XX:+UnlockExperimentalVMOptions",
|
||||||
"-XX:+DisableExplicitGC",
|
"-XX:+DisableExplicitGC",
|
||||||
"-XX:+AlwaysPreTouch",
|
"-XX:+AlwaysPreTouch",
|
||||||
|
"-XX:G1HeapWastePercent=5",
|
||||||
|
"-XX:G1MixedGCCountTarget=4",
|
||||||
|
"-XX:G1MixedGCLiveThresholdPercent=90",
|
||||||
|
"-XX:G1RSetUpdatingPauseTimePercent=5",
|
||||||
|
"-XX:SurvivorRatio=32",
|
||||||
|
"-XX:+PerfDisableSharedMem",
|
||||||
|
"-XX:MaxTenuringThreshold=1",
|
||||||
|
"-Dusing.aikars.flags=https://mcflags.emc.gs",
|
||||||
|
"-Daikars.new.flags=true",
|
||||||
]);
|
]);
|
||||||
|
|
||||||
if self.xms > 12 * 1024 {
|
if self.xms > 12 * 1024 {
|
||||||
|
|
@ -139,84 +143,36 @@ impl ServerCommand {
|
||||||
"-XX:G1MaxNewSizePercent=50",
|
"-XX:G1MaxNewSizePercent=50",
|
||||||
"-XX:G1HeapRegionSize=16M",
|
"-XX:G1HeapRegionSize=16M",
|
||||||
"-XX:G1ReservePercent=15",
|
"-XX:G1ReservePercent=15",
|
||||||
|
"-XX:InitiatingHeapOccupancyPercent=20",
|
||||||
]);
|
]);
|
||||||
} else {
|
} else {
|
||||||
cmd.args([
|
cmd.args([
|
||||||
"-XX:G1NewSizePercent=30",
|
"-XX:G1NewSizePercent=30",
|
||||||
"-XX:G1MaxNewSizePercent=40",
|
"-XX:G1MaxNewSizePercent=40",
|
||||||
"-XX:G1HeapRegionSize=8M",
|
"-XX:G1HeapRegionSize=8M",
|
||||||
"-XX:G1ReservePercent=20",
|
"-XX:G1ReservePercent=15",
|
||||||
|
"-XX:InitiatingHeapOccupancyPercent=15",
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.args(["-XX:G1HeapWastePercent=5", "-XX:G1MixedGCCountTarget=4"]);
|
cmd.current_dir(&config_dir)
|
||||||
|
|
||||||
if self.xms > 12 * 1024 {
|
|
||||||
cmd.args(["-XX:InitiatingHeapOccupancyPercent=20"]);
|
|
||||||
} else {
|
|
||||||
cmd.args(["-XX:InitiatingHeapOccupancyPercent=15"]);
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.args([
|
|
||||||
"-XX:G1MixedGCLiveThresholdPercent=90",
|
|
||||||
"-XX:G1RSetUpdatingPauseTimePercent=5",
|
|
||||||
"-XX:SurvivorRatio=32",
|
|
||||||
"-XX:+PerfDisableSharedMem",
|
|
||||||
"-XX:MaxTenuringThreshold=1",
|
|
||||||
"-Dusing.aikars.flags=https://mcflags.emc.gs",
|
|
||||||
"-Daikars.new.flags=true",
|
|
||||||
]);
|
|
||||||
|
|
||||||
cmd.current_dir(&self.config_dir)
|
|
||||||
.arg("-jar")
|
.arg("-jar")
|
||||||
.arg(&self.jar)
|
.arg(&jar)
|
||||||
.arg("--universe")
|
.arg("--universe")
|
||||||
.arg(&self.world_dir)
|
.arg(&world_dir)
|
||||||
.arg("--nogui")
|
.arg("--nogui")
|
||||||
.stdin(Stdio::piped());
|
.stdin(Stdio::piped());
|
||||||
|
|
||||||
cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn spawn(&mut self) -> std::io::Result<ServerProcess> {
|
|
||||||
let mut cmd = self.create_cmd();
|
|
||||||
self.accept_eula()?;
|
|
||||||
let child = cmd.spawn()?;
|
let child = cmd.spawn()?;
|
||||||
|
|
||||||
Ok(ServerProcess::new(
|
Ok(ServerProcess::new(
|
||||||
self.type_,
|
self.type_,
|
||||||
self.version.clone(),
|
self.version,
|
||||||
self.config_dir.clone(),
|
config_dir,
|
||||||
self.world_dir.clone(),
|
world_dir,
|
||||||
self.backup_dir.clone(),
|
backup_dir,
|
||||||
self.max_backups,
|
self.max_backups,
|
||||||
child,
|
child,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for ServerCommand {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
let cmd = self.create_cmd();
|
|
||||||
|
|
||||||
writeln!(f, "Command: {}", self.java)?;
|
|
||||||
writeln!(f, "Working dir: {}", self.config_dir.as_path().display())?;
|
|
||||||
|
|
||||||
// Print command env vars
|
|
||||||
writeln!(f, "Environment:")?;
|
|
||||||
|
|
||||||
for (key, val) in cmd.get_envs().filter(|(_, v)| v.is_some()) {
|
|
||||||
let val = val.unwrap();
|
|
||||||
writeln!(f, " {}={}", key.to_string_lossy(), val.to_string_lossy())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print command arguments
|
|
||||||
writeln!(f, "Arguments:")?;
|
|
||||||
|
|
||||||
for arg in cmd.get_args() {
|
|
||||||
writeln!(f, " {}", arg.to_string_lossy())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
mod backups;
|
mod backups;
|
||||||
mod command;
|
mod command;
|
||||||
mod path;
|
|
||||||
mod process;
|
mod process;
|
||||||
|
|
||||||
pub use backups::BackupManager;
|
pub use backups::BackupManager;
|
||||||
|
|
|
||||||
|
|
@ -1,19 +0,0 @@
|
||||||
use chrono::Utc;
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::{fs, io};
|
|
||||||
|
|
||||||
struct ReadDirRecursive {
|
|
||||||
ignored_dirs: HashSet<PathBuf>,
|
|
||||||
read_dir: Option<fs::ReadDir>,
|
|
||||||
stack: Vec<fs::ReadDir>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadDirRecursive {
|
|
||||||
// pub fn new()
|
|
||||||
}
|
|
||||||
|
|
||||||
trait PathExt {
|
|
||||||
fn modified_since(timestamp: chrono::DateTime<Utc>) -> bool;
|
|
||||||
fn read_dir_recusive() -> ReadDirRecursive;
|
|
||||||
}
|
|
||||||
|
|
@ -89,7 +89,7 @@ impl ServerProcess {
|
||||||
std::thread::sleep(std::time::Duration::from_secs(10));
|
std::thread::sleep(std::time::Duration::from_secs(10));
|
||||||
|
|
||||||
let start_time = chrono::offset::Utc::now();
|
let start_time = chrono::offset::Utc::now();
|
||||||
let res = self.backups.create_backup();
|
let res = self.backups.create_archive();
|
||||||
|
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
self.backups.remove_old_backups()?;
|
self.backups.remove_old_backups()?;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue