Compare commits

...

15 Commits

Author SHA1 Message Date
Jef Roosens d204c68400
fix: actually working incremental backup
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
2023-06-15 09:56:40 +02:00
Jef Roosens a9e7b215d1
feat: move running server to subcommand 2023-06-14 22:17:53 +02:00
Jef Roosens fcc111b4ef
feat: possible incremental backup implementation using new abstraction 2023-06-14 21:47:59 +02:00
Jef Roosens b7a678e32f
feat: lots of backup stuff 2023-06-13 17:43:47 +02:00
Jef Roosens 703a25e8be
refactor: use utc time 2023-06-13 15:12:30 +02:00
Jef Roosens 29d6713486
feat: implement own listing of files 2023-06-13 15:12:30 +02:00
Jef Roosens 4958257f6e
refactor: move backup logic to separate module 2023-06-13 15:12:30 +02:00
Jef Roosens 90aa929b73
feat: show backup time in message 2023-06-13 15:12:26 +02:00
Jef Roosens 9ce8199d5f
fix: use correct env var for backup dir
ci/woodpecker/push/release Pipeline was successful Details
ci/woodpecker/tag/clippy Pipeline was successful Details
ci/woodpecker/tag/lint Pipeline was successful Details
ci/woodpecker/tag/build Pipeline was successful Details
ci/woodpecker/tag/release Pipeline was successful Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
2023-06-13 13:44:08 +02:00
Jef Roosens 375a68fbd6
chore: bump versions
ci/woodpecker/push/build unknown status Details
ci/woodpecker/push/clippy unknown status Details
ci/woodpecker/push/lint unknown status Details
ci/woodpecker/push/release Pipeline was successful Details
ci/woodpecker/tag/clippy Pipeline was successful Details
ci/woodpecker/tag/lint Pipeline was successful Details
ci/woodpecker/tag/build Pipeline was successful Details
ci/woodpecker/tag/release Pipeline was successful Details
2023-06-13 13:02:27 +02:00
Jef Roosens ce3dcdd4b1
chore: please clippy
ci/woodpecker/push/build unknown status Details
ci/woodpecker/push/clippy unknown status Details
ci/woodpecker/push/lint unknown status Details
ci/woodpecker/push/release Pipeline was successful Details
2023-06-13 13:01:47 +02:00
Jef Roosens 5ae23c931a
feat: change jvm flags order 2023-06-13 13:00:42 +02:00
Jef Roosens b08ba3853f
feat: add --dry flag 2023-06-13 12:53:50 +02:00
Jef Roosens acb3cfd8e6
chore: update readme
ci/woodpecker/push/release unknown status Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-13 11:51:18 +02:00
Jef Roosens 45d736d1bb
chore: bump version
ci/woodpecker/push/build unknown status Details
ci/woodpecker/push/clippy unknown status Details
ci/woodpecker/push/lint unknown status Details
ci/woodpecker/push/release Pipeline was successful Details
ci/woodpecker/tag/lint Pipeline was successful Details
ci/woodpecker/tag/build Pipeline was successful Details
ci/woodpecker/tag/clippy Pipeline was successful Details
ci/woodpecker/tag/release Pipeline was successful Details
2023-06-13 11:40:18 +02:00
14 changed files with 633 additions and 192 deletions

View File

@ -1,3 +1,3 @@
[alias]
runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar"
runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar"
runs = "run -- run paper 1.19.4-550 --config data/config --backup data/backups --world data/worlds --jar paper-1.19.4-550.jar"
runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper-1.19.4-525.jar"

2
.gitignore vendored
View File

@ -19,4 +19,4 @@ target/
# testing files
*.jar
data/
data*/

View File

@ -7,6 +7,28 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/Chewing_Bever/alex/src/branch/dev)
### Changed
* Running the server now uses the `run` CLI subcommand
## [0.2.2](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.2)
### Fixed
* Use correct env var for backup directory
## [0.2.1](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.1)
### Added
* `--dry` flag to inspect command that will be run
### Changed
* JVM flags now narrowely follow Aikar's specifications
## [0.2.0](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.2.0)
### Added
* Rudimentary signal handling for gently stopping server

2
Cargo.lock generated
View File

@ -10,7 +10,7 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "alex"
version = "0.1.0"
version = "0.2.2"
dependencies = [
"chrono",
"clap",

View File

@ -1,6 +1,6 @@
[package]
name = "alex"
version = "0.1.0"
version = "0.2.2"
description = "Wrapper around Minecraft server processes, designed to complement Docker image installations."
authors = ["Jef Roosens"]
edition = "2021"

View File

@ -18,11 +18,11 @@ RUN cargo build && \
# We use ${:-} instead of a default value because the argument is always passed
# to the build, it'll just be blank most likely
FROM eclipse-temurin:17-jre-alpine
FROM eclipse-temurin:18-jre-alpine
# Build arguments
ARG MC_VERSION=1.19.4
ARG PAPERMC_VERSION=545
ARG PAPERMC_VERSION=525
RUN addgroup -Sg 1000 paper && \
adduser -SHG paper -u 1000 paper
@ -61,4 +61,5 @@ EXPOSE 25565
# Switch to non-root user
USER paper:paper
ENTRYPOINT ["/bin/alex", "paper"]
ENTRYPOINT ["/bin/dumb-init", "--"]
CMD ["/bin/alex", "paper"]

View File

@ -1,3 +1,26 @@
# mc-wrapper
# Alex
A wrapper around a standard Minecraft server, written in Rust.
Alex is a wrapper around a typical Minecraft server process. It acts as the
parent process, and sits in between the user's input and the server's stdin.
This allows Alex to support additional commands that execute Rust code.
## Why
The primary usecase for this is backups. A common problem I've had with
Minecraft backups is that they fail, because the server is writing to one of
the region files as the backup is being created. Alex solves this be sending
`save-off` and `save-all` to the server, before creating the tarball.
Afterwards, saving is enabled again with `save-on`.
## Features
* Create safe backups as gzip-compressed tarballs using the `backup` command
* Automatically create backups periodically
* Properly configures the process (working directory, optimisation flags)
* Configure everything as CLI arguments or environment variables
## Installation
Alex is distributed as statically compiled binaries for Linux amd64 and arm64.
These can be found
[here](https://git.rustybever.be/Chewing_Bever/alex/packages).

91
src/cli.rs 100644
View File

@ -0,0 +1,91 @@
use crate::server::ServerType;
use clap::{Args, Parser, Subcommand};
use std::path::PathBuf;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
pub struct Cli {
#[command(subcommand)]
pub command: Commands,
/// Directory where configs are stored, and where the server will run
#[arg(
long,
value_name = "CONFIG_DIR",
default_value = ".",
env = "ALEX_CONFIG_DIR",
global = true
)]
pub config: PathBuf,
/// Directory where world files will be saved
#[arg(
long,
value_name = "WORLD_DIR",
default_value = "../worlds",
env = "ALEX_WORLD_DIR",
global = true
)]
pub world: PathBuf,
/// Directory where backups will be stored
#[arg(
long,
value_name = "BACKUP_DIR",
default_value = "../backups",
env = "ALEX_BACKUP_DIR",
global = true
)]
pub backup: PathBuf,
/// How many backups to keep
#[arg(
short = 'n',
long,
default_value_t = 7,
env = "ALEX_MAX_BACKUPS",
global = true
)]
pub max_backups: u64,
}
#[derive(Subcommand)]
pub enum Commands {
/// Run the server
Run(RunArgs),
}
#[derive(Args)]
pub struct RunArgs {
/// Type of server
pub type_: ServerType,
/// Version string for the server, e.g. 1.19.4-545
#[arg(env = "ALEX_SERVER_VERSION")]
pub server_version: String,
/// Server jar to execute
#[arg(
long,
value_name = "JAR_PATH",
default_value = "server.jar",
env = "ALEX_JAR"
)]
pub jar: PathBuf,
/// Java command to run the server jar with
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"), env = "ALEX_JAVA")]
pub java: String,
/// XMS value in megabytes for the server instance
#[arg(long, default_value_t = 1024, env = "ALEX_XMS")]
pub xms: u64,
/// XMX value in megabytes for the server instance
#[arg(long, default_value_t = 2048, env = "ALEX_XMX")]
pub xmx: u64,
/// How frequently to perform a backup, in minutes; 0 to disable.
#[arg(short = 't', long, default_value_t = 0, env = "ALEX_FREQUENCY")]
pub frequency: u64,
/// Don't actually run the server, but simply output the server configuration that would have
/// been ran
#[arg(short, long, default_value_t = false)]
pub dry: bool,
}

View File

@ -1,73 +1,13 @@
mod cli;
mod server;
mod signals;
mod stdin;
use clap::Parser;
use server::ServerType;
use cli::{Cli, Commands, RunArgs};
use std::io;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Cli {
/// Type of server
type_: ServerType,
/// Version string for the server, e.g. 1.19.4-545
#[arg(env = "ALEX_SERVER_VERSION")]
server_version: String,
/// Server jar to execute
#[arg(
long,
value_name = "JAR_PATH",
default_value = "server.jar",
env = "ALEX_JAR"
)]
jar: PathBuf,
/// Directory where configs are stored, and where the server will run
#[arg(
long,
value_name = "CONFIG_DIR",
default_value = ".",
env = "ALEX_CONFIG_DIR"
)]
config: PathBuf,
/// Directory where world files will be saved
#[arg(
long,
value_name = "WORLD_DIR",
default_value = "../worlds",
env = "ALEX_WORLD_DIR"
)]
world: PathBuf,
/// Directory where backups will be stored
#[arg(
long,
value_name = "BACKUP_DIR",
default_value = "../backups",
env = "ALEX_WORLD_DIR"
)]
backup: PathBuf,
/// Java command to run the server jar with
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"), env = "ALEX_JAVA")]
java: String,
/// XMS value in megabytes for the server instance
#[arg(long, default_value_t = 1024, env = "ALEX_XMS")]
xms: u64,
/// XMX value in megabytes for the server instance
#[arg(long, default_value_t = 2048, env = "ALEX_XMX")]
xmx: u64,
/// How many backups to keep
#[arg(short = 'n', long, default_value_t = 7, env = "ALEX_MAX_BACKUPS")]
max_backups: u64,
/// How frequently to perform a backup, in minutes; 0 to disable.
#[arg(short = 't', long, default_value_t = 0, env = "ALEX_FREQUENCY")]
frequency: u64,
}
fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
loop {
std::thread::sleep(std::time::Duration::from_secs(frequency * 60));
@ -81,24 +21,32 @@ fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
}
}
fn main() -> io::Result<()> {
fn command_run(cli: &Cli, args: &RunArgs) -> io::Result<()> {
let (_, mut signals) = signals::install_signal_handlers()?;
let cli = Cli::parse();
let cmd = server::ServerCommand::new(cli.type_, &cli.server_version)
.java(&cli.java)
.jar(cli.jar)
.config(cli.config)
.world(cli.world)
.backup(cli.backup)
.xms(cli.xms)
.xmx(cli.xmx)
let mut cmd = server::ServerCommand::new(args.type_, &args.server_version)
.java(&args.java)
.jar(args.jar.clone())
.config(cli.config.clone())
.world(cli.world.clone())
.backup(cli.backup.clone())
.xms(args.xms)
.xmx(args.xmx)
.max_backups(cli.max_backups);
cmd.canonicalize()?;
if args.dry {
print!("{}", cmd);
return Ok(());
}
let counter = Arc::new(Mutex::new(cmd.spawn()?));
if cli.frequency > 0 {
if args.frequency > 0 {
let clone = Arc::clone(&counter);
std::thread::spawn(move || backups_thread(clone, cli.frequency));
let frequency = args.frequency;
std::thread::spawn(move || backups_thread(clone, frequency));
}
// Spawn thread that handles the main stdin loop
@ -108,3 +56,11 @@ fn main() -> io::Result<()> {
// Signal handler loop exits the process when necessary
signals::handle_signals(&mut signals, counter)
}
fn main() -> io::Result<()> {
let cli = Cli::parse();
match &cli.command {
Commands::Run(args) => command_run(&cli, args),
}
}

View File

@ -0,0 +1,340 @@
use chrono::{Local, Utc};
use flate2::write::GzEncoder;
use flate2::Compression;
use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
use std::sync::Arc;
#[link(name = "c")]
extern "C" {
fn geteuid() -> u32;
fn getegid() -> u32;
}
const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
fn files(src_dir: PathBuf) -> io::Result<HashSet<PathBuf>> {
let mut dirs = vec![src_dir.clone()];
let mut files: HashSet<PathBuf> = HashSet::new();
while let Some(dir) = dirs.pop() {
for res in dir.read_dir()? {
let entry = res?;
if entry.file_name() == "cache" {
continue;
}
if entry.file_type()?.is_dir() {
dirs.push(entry.path());
} else {
files.insert(entry.path().strip_prefix(&src_dir).unwrap().to_path_buf());
}
}
}
Ok(files)
}
/// Check whether a file has been modified since the given timestamp.
///
/// Note that this function will *only* return true if it can determine with certainty that the
/// file has not been modified. If any errors occur while obtaining the required metadata (e.g. if
/// the file system does not support this metadata), this function will return false.
fn not_modified_since<T: AsRef<Path>>(time: chrono::DateTime<Utc>, path: T) -> bool {
let path = path.as_ref();
if let Ok(metadata) = path.metadata() {
let last_modified = metadata.modified();
if let Ok(last_modified) = last_modified {
let t: chrono::DateTime<Utc> = last_modified.into();
let t = t.with_timezone(&Local);
return t < time;
}
}
false
}
#[derive(Debug, PartialEq)]
pub enum BackupType {
Full,
Incremental,
}
#[derive(Debug)]
pub enum BackupError {
NoFullAncestor,
}
type BackupResult<T> = Result<T, BackupError>;
/// Represents the changes relative to the previous backup
#[derive(Debug)]
pub struct BackupDelta {
/// What files were added/modified in each part of the tarball.
pub added: HashMap<PathBuf, HashSet<PathBuf>>,
/// What files were removed in this backup, in comparison to the previous backup. For full
/// backups, this will always be empty, as they do not consider previous backups.
/// The map stores a separate list for each top-level directory, as the contents of these
/// directories can come for different source directories.
pub removed: HashMap<PathBuf, HashSet<PathBuf>>,
}
impl BackupDelta {
pub fn new() -> Self {
BackupDelta {
added: HashMap::new(),
removed: HashMap::new(),
}
}
/// Update the current state so that its result becomes the merge of itself and the other
/// state.
pub fn merge(&mut self, delta: &BackupDelta) {
for (dir, added) in delta.added.iter() {
// Files that were removed in the current state, but added in the new state, are no
// longer removed
if let Some(orig_removed) = self.removed.get_mut(dir) {
orig_removed.retain(|k| !added.contains(k));
}
// Newly added files are added to the state as well
if let Some(orig_added) = self.added.get_mut(dir) {
orig_added.extend(added.iter().cloned());
} else {
self.added.insert(dir.clone(), added.clone());
}
}
for (dir, removed) in delta.removed.iter() {
// Files that were originally added, but now deleted are removed from the added list
if let Some(orig_added) = self.added.get_mut(dir) {
orig_added.retain(|k| !removed.contains(k));
}
// Newly removed files are added to the state as well
if let Some(orig_removed) = self.removed.get_mut(dir) {
orig_removed.extend(removed.iter().cloned());
} else {
self.removed.insert(dir.clone(), removed.clone());
}
}
}
/// Modify the given state by applying this delta's changes to it
pub fn apply(&self, state: &mut HashMap<PathBuf, HashSet<PathBuf>>) {
// First we add new files, then we remove the old ones
for (dir, added) in self.added.iter() {
if let Some(current) = state.get_mut(dir) {
current.extend(added.iter().cloned());
} else {
state.insert(dir.clone(), added.clone());
}
}
for (dir, removed) in self.removed.iter() {
if let Some(current) = state.get_mut(dir) {
current.retain(|k| !removed.contains(k));
}
}
}
}
/// Represents a successful backup
#[derive(Debug)]
pub struct Backup {
previous: Option<Arc<Backup>>,
/// When the backup was started (also corresponds to the name)
start_time: chrono::DateTime<Utc>,
/// Type of the backup
type_: BackupType,
delta: BackupDelta,
}
impl Backup {
/// Calculate the full state of the backup by applying all its ancestors delta's in order,
/// starting from the last full ancestor.
pub fn state(&self) -> BackupResult<HashMap<PathBuf, HashSet<PathBuf>>> {
if self.type_ == BackupType::Full {
let mut state = HashMap::new();
self.delta.apply(&mut state);
Ok(state)
} else if let Some(previous) = &self.previous {
let mut state = previous.state()?;
self.delta.apply(&mut state);
Ok(state)
} else {
return Err(BackupError::NoFullAncestor);
}
}
/// Create a new Full backup, populated with the given directories.
///
/// # Arguments
///
/// * `backup_dir` - Directory to store archive in
/// * `dirs` - list of tuples `(path_in_tar, src_dir)` with `path_in_tar` the directory name
/// under which `src_dir`'s contents should be stored in the archive
///
/// # Returns
///
/// The `Backup` instance describing this new backup.
pub fn create<P: AsRef<Path>>(
backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> {
let backup_dir = backup_dir.as_ref();
let start_time = chrono::offset::Utc::now();
let filename = format!("{}", start_time.format(FILENAME_FORMAT));
let path = backup_dir.join(filename);
let tar_gz = File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
let mut added: HashMap<PathBuf, HashSet<PathBuf>> = HashMap::new();
for (dir_in_tar, src_dir) in dirs {
let files = files(src_dir.clone())?;
for path in &files {
ar.append_path_with_name(src_dir.join(path), dir_in_tar.join(path))?;
}
added.insert(dir_in_tar, files);
}
Ok(Backup {
previous: None,
type_: BackupType::Full,
start_time,
delta: BackupDelta {
added,
removed: HashMap::new(),
},
})
}
/// Create a new incremental backup from a given previous backup
pub fn create_from<P: AsRef<Path>>(
previous: Arc<Backup>,
backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> {
let backup_dir = backup_dir.as_ref();
let start_time = chrono::offset::Utc::now();
let filename = format!("{}", start_time.format(FILENAME_FORMAT));
let path = backup_dir.join(filename);
let tar_gz = File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
// TODO remove unwrap
let previous_state = previous.state().unwrap();
let mut delta = BackupDelta::new();
for (dir_in_tar, src_dir) in dirs {
let files = files(src_dir.clone())?;
let added_files = files
.iter()
// This explicit negation is because we wish to also include files for which we
// couldn't determine the last modified time
.filter(|p| !not_modified_since(previous.start_time, src_dir.join(p)))
.cloned()
.collect::<HashSet<PathBuf>>();
for path in added_files.iter() {
ar.append_path_with_name(src_dir.join(path), dir_in_tar.join(path))?;
}
delta.added.insert(dir_in_tar.clone(), added_files);
if let Some(previous_files) = previous_state.get(&dir_in_tar) {
delta.removed.insert(
dir_in_tar,
previous_files.difference(&files).cloned().collect(),
);
}
}
Ok(Backup {
previous: Some(previous),
type_: BackupType::Incremental,
start_time,
delta,
})
}
}
pub struct BackupManager {
backup_dir: PathBuf,
config_dir: PathBuf,
world_dir: PathBuf,
max_backups: u64,
last_backup: Option<Arc<Backup>>,
}
impl BackupManager {
pub fn open(
backup_dir: PathBuf,
config_dir: PathBuf,
world_dir: PathBuf,
max_backups: u64,
) -> Self {
BackupManager {
backup_dir,
config_dir,
world_dir,
max_backups,
last_backup: None,
}
}
pub fn create_backup(&mut self) -> io::Result<()> {
let dirs = vec![
(PathBuf::from("config"), self.config_dir.clone()),
(PathBuf::from("worlds"), self.world_dir.clone()),
];
let backup = if let Some(last_backup) = &self.last_backup {
Backup::create_from(Arc::clone(last_backup), &self.backup_dir, dirs)?
} else {
Backup::create(&self.backup_dir, dirs)?
};
self.last_backup = Some(Arc::new(backup));
Ok(())
}
/// Remove the oldest backups
pub fn remove_old_backups(&mut self) -> std::io::Result<()> {
// The naming format used allows us to sort the backups by name and still get a sorting by
// creation time
let mut backups = self
.backup_dir
.read_dir()?
.filter_map(|res| res.map(|e| e.path()).ok())
.collect::<Vec<PathBuf>>();
backups.sort();
let max_backups: usize = self.max_backups.try_into().unwrap();
if backups.len() > max_backups {
let excess_backups = backups.len() - max_backups;
for backup in &backups[0..excess_backups] {
std::fs::remove_file(backup)?;
}
}
Ok(())
}
}

View File

@ -105,14 +105,19 @@ impl ServerCommand {
Ok(())
}
pub fn spawn(self) -> std::io::Result<ServerProcess> {
/// Canonicalize all paths to absolute paths. Without this command, all paths will be
/// interpreted relatively from the config directory.
pub fn canonicalize(&mut self) -> std::io::Result<()> {
// To avoid any issues, we use absolute paths for everything when spawning the process
let jar = self.jar.canonicalize()?;
let config_dir = self.config_dir.canonicalize()?;
let world_dir = self.world_dir.canonicalize()?;
let backup_dir = self.backup_dir.canonicalize()?;
self.jar = self.jar.canonicalize()?;
self.config_dir = self.config_dir.canonicalize()?;
self.world_dir = self.world_dir.canonicalize()?;
self.backup_dir = self.backup_dir.canonicalize()?;
self.accept_eula()?;
Ok(())
}
fn create_cmd(&self) -> std::process::Command {
let mut cmd = Command::new(&self.java);
// Apply JVM optimisation flags
@ -126,15 +131,6 @@ impl ServerCommand {
"-XX:+UnlockExperimentalVMOptions",
"-XX:+DisableExplicitGC",
"-XX:+AlwaysPreTouch",
"-XX:G1HeapWastePercent=5",
"-XX:G1MixedGCCountTarget=4",
"-XX:G1MixedGCLiveThresholdPercent=90",
"-XX:G1RSetUpdatingPauseTimePercent=5",
"-XX:SurvivorRatio=32",
"-XX:+PerfDisableSharedMem",
"-XX:MaxTenuringThreshold=1",
"-Dusing.aikars.flags=https://mcflags.emc.gs",
"-Daikars.new.flags=true",
]);
if self.xms > 12 * 1024 {
@ -143,36 +139,84 @@ impl ServerCommand {
"-XX:G1MaxNewSizePercent=50",
"-XX:G1HeapRegionSize=16M",
"-XX:G1ReservePercent=15",
"-XX:InitiatingHeapOccupancyPercent=20",
]);
} else {
cmd.args([
"-XX:G1NewSizePercent=30",
"-XX:G1MaxNewSizePercent=40",
"-XX:G1HeapRegionSize=8M",
"-XX:G1ReservePercent=15",
"-XX:InitiatingHeapOccupancyPercent=15",
"-XX:G1ReservePercent=20",
]);
}
cmd.current_dir(&config_dir)
cmd.args(["-XX:G1HeapWastePercent=5", "-XX:G1MixedGCCountTarget=4"]);
if self.xms > 12 * 1024 {
cmd.args(["-XX:InitiatingHeapOccupancyPercent=20"]);
} else {
cmd.args(["-XX:InitiatingHeapOccupancyPercent=15"]);
}
cmd.args([
"-XX:G1MixedGCLiveThresholdPercent=90",
"-XX:G1RSetUpdatingPauseTimePercent=5",
"-XX:SurvivorRatio=32",
"-XX:+PerfDisableSharedMem",
"-XX:MaxTenuringThreshold=1",
"-Dusing.aikars.flags=https://mcflags.emc.gs",
"-Daikars.new.flags=true",
]);
cmd.current_dir(&self.config_dir)
.arg("-jar")
.arg(&jar)
.arg(&self.jar)
.arg("--universe")
.arg(&world_dir)
.arg(&self.world_dir)
.arg("--nogui")
.stdin(Stdio::piped());
cmd
}
pub fn spawn(&mut self) -> std::io::Result<ServerProcess> {
let mut cmd = self.create_cmd();
self.accept_eula()?;
let child = cmd.spawn()?;
Ok(ServerProcess::new(
self.type_,
self.version,
config_dir,
world_dir,
backup_dir,
self.version.clone(),
self.config_dir.clone(),
self.world_dir.clone(),
self.backup_dir.clone(),
self.max_backups,
child,
))
}
}
impl fmt::Display for ServerCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let cmd = self.create_cmd();
writeln!(f, "Command: {}", self.java)?;
writeln!(f, "Working dir: {}", self.config_dir.as_path().display())?;
// Print command env vars
writeln!(f, "Environment:")?;
for (key, val) in cmd.get_envs().filter(|(_, v)| v.is_some()) {
let val = val.unwrap();
writeln!(f, " {}={}", key.to_string_lossy(), val.to_string_lossy())?;
}
// Print command arguments
writeln!(f, "Arguments:")?;
for arg in cmd.get_args() {
writeln!(f, " {}", arg.to_string_lossy())?;
}
Ok(())
}
}

View File

@ -1,5 +1,8 @@
mod backups;
mod command;
mod path;
mod process;
pub use backups::BackupManager;
pub use command::{ServerCommand, ServerType};
pub use process::ServerProcess;

19
src/server/path.rs 100644
View File

@ -0,0 +1,19 @@
use chrono::Utc;
use std::collections::HashSet;
use std::path::PathBuf;
use std::{fs, io};
struct ReadDirRecursive {
ignored_dirs: HashSet<PathBuf>,
read_dir: Option<fs::ReadDir>,
stack: Vec<fs::ReadDir>,
}
impl ReadDirRecursive {
// pub fn new()
}
trait PathExt {
fn modified_since(timestamp: chrono::DateTime<Utc>) -> bool;
fn read_dir_recusive() -> ReadDirRecursive;
}

View File

@ -1,3 +1,4 @@
use crate::server::BackupManager;
use crate::server::ServerType;
use flate2::write::GzEncoder;
use flate2::Compression;
@ -5,12 +6,6 @@ use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::Child;
#[link(name = "c")]
extern "C" {
fn geteuid() -> u32;
fn getegid() -> u32;
}
pub struct ServerProcess {
type_: ServerType,
version: String,
@ -19,6 +14,7 @@ pub struct ServerProcess {
backup_dir: PathBuf,
max_backups: u64,
child: Child,
backups: BackupManager,
}
impl ServerProcess {
@ -31,6 +27,13 @@ impl ServerProcess {
max_backups: u64,
child: Child,
) -> ServerProcess {
let backup_manager = BackupManager::open(
backup_dir.clone(),
config_dir.clone(),
world_dir.clone(),
max_backups,
);
ServerProcess {
type_,
version,
@ -39,6 +42,7 @@ impl ServerProcess {
backup_dir,
max_backups,
child,
backups: backup_manager,
}
}
@ -84,94 +88,32 @@ impl ServerProcess {
// We wait some time to (hopefully) ensure the save-all call has completed
std::thread::sleep(std::time::Duration::from_secs(10));
let res = self.create_backup_archive();
let start_time = chrono::offset::Utc::now();
let res = self.backups.create_backup();
if res.is_ok() {
self.remove_old_backups()?;
self.backups.remove_old_backups()?;
}
// The server's save feature needs to be enabled again even if the archive failed to create
self.custom("save-on")?;
let duration = chrono::offset::Utc::now() - start_time;
let duration_str = format!(
"{}m{}s",
duration.num_seconds() / 60,
duration.num_seconds() % 60
);
if res.is_ok() {
self.custom("say server backed up successfully")?;
self.custom(&format!("say server backed up in {}", duration_str))?;
} else {
self.custom("an error occured while backing up the server")?;
self.custom(&format!(
"an error occured after {} while backing up the server",
duration_str
))?;
}
res
}
/// Create a new compressed backup archive of the server's data.
fn create_backup_archive(&mut self) -> std::io::Result<()> {
// Create a gzip-compressed tarball of the worlds folder
let filename = format!(
"{}",
chrono::offset::Local::now().format("%Y-%m-%d_%H-%M-%S.tar.gz")
);
let path = self.backup_dir.join(filename);
let tar_gz = std::fs::File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut tar = tar::Builder::new(enc);
tar.append_dir_all("worlds", &self.world_dir)?;
// Add all files from the config directory that aren't the cache
for entry in self
.config_dir
.read_dir()?
.filter_map(|e| e.ok())
.filter(|e| e.file_name() != "cache")
{
let tar_path = Path::new("config").join(entry.file_name());
if entry.file_type()?.is_dir() {
tar.append_dir_all(tar_path, entry.path())?;
} else {
tar.append_path_with_name(entry.path(), tar_path)?;
}
}
// We add a file to the backup describing for what version it was made
let info = format!("{} {}", self.type_, self.version);
let info_bytes = info.as_bytes();
let mut header = tar::Header::new_gnu();
header.set_size(info_bytes.len().try_into().unwrap());
header.set_mode(0o100644);
unsafe {
header.set_gid(getegid().into());
header.set_uid(geteuid().into());
}
tar.append_data(&mut header, "info.txt", info_bytes)?;
// Backup file gets finalized in the drop
Ok(())
}
/// Remove the oldest backups
fn remove_old_backups(&mut self) -> std::io::Result<()> {
// The naming format used allows us to sort the backups by name and still get a sorting by
// creation time
let mut backups = self
.backup_dir
.read_dir()?
.filter_map(|res| res.map(|e| e.path()).ok())
.collect::<Vec<PathBuf>>();
backups.sort();
let max_backups: usize = self.max_backups.try_into().unwrap();
if backups.len() > max_backups {
let excess_backups = backups.len() - max_backups;
for backup in &backups[0..excess_backups] {
std::fs::remove_file(backup)?;
}
}
Ok(())
}
}