feat: add periodic backups thread
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details

signal-handling
Jef Roosens 2023-06-03 23:36:43 +02:00
parent 7248ea8b90
commit 49546a449e
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
4 changed files with 129 additions and 40 deletions

View File

@ -0,0 +1,2 @@
[alias]
runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds data/paper-1.19.4-545.jar"

View File

@ -1,9 +1,10 @@
mod server; mod server;
use clap::{Parser, Subcommand}; use clap::Parser;
use server::{ServerCommand, ServerType}; use server::ServerType;
use std::io; use std::io;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{Arc, Mutex};
#[derive(Parser)] #[derive(Parser)]
#[command(author, version, about, long_about = None)] #[command(author, version, about, long_about = None)]
@ -15,43 +16,61 @@ struct Cli {
/// Server jar to execute /// Server jar to execute
jar: PathBuf, jar: PathBuf,
/// Directory where configs are stored, and where the server will run; defaults to the current /// Directory where configs are stored, and where the server will run [default: .]
/// directory.
#[arg(long, value_name = "CONFIG_DIR")] #[arg(long, value_name = "CONFIG_DIR")]
config: Option<PathBuf>, config: Option<PathBuf>,
/// Directory where world files will be saved; defaults to ../worlds /// Directory where world files will be saved [default: ../worlds]
#[arg(long, value_name = "WORLD_DIR")] #[arg(long, value_name = "WORLD_DIR")]
world: Option<PathBuf>, world: Option<PathBuf>,
/// Directory where backups will be stored; defaults to ../backups /// Directory where backups will be stored [default: ../backups]
#[arg(long, value_name = "BACKUP_DIR")] #[arg(long, value_name = "BACKUP_DIR")]
backup: Option<PathBuf>, backup: Option<PathBuf>,
/// Java command to run the server jar with
#[arg(long, value_name = "JAVA_CMD", default_value_t = String::from("java"))]
java: String,
/// XMS value for the server instance /// XMS value in megabytes for the server instance
#[arg(long)] #[arg(long, default_value_t = 1024)]
xms: Option<u32>, xms: u64,
/// XMX value for the server instance /// XMX value in megabytes for the server instance
#[arg(long)] #[arg(long, default_value_t = 2048)]
xmx: Option<u32>, xmx: u64,
/// How many backups to keep
#[arg(short = 'n', long, default_value_t = 7)]
max_backups: u64,
/// How frequently to perform a backup, in minutes
#[arg(short = 't', long, default_value_t = 720)]
frequency: u64,
}
fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>, frequency: u64) {
loop {
std::thread::sleep(std::time::Duration::from_secs(frequency * 60));
{
let mut server = counter.lock().unwrap();
server.backup();
}
}
} }
fn main() { fn main() {
let cli = Cli::parse(); let cli = Cli::parse();
let mut cmd = server::ServerCommand::new(cli.type_, &cli.server_version) let cmd = server::ServerCommand::new(cli.type_, &cli.server_version)
.java(&cli.java)
.jar(cli.jar) .jar(cli.jar)
.config(cli.config.unwrap_or(".".into())) .config(cli.config.unwrap_or(".".into()))
.world(cli.world.unwrap_or("../worlds".into())) .world(cli.world.unwrap_or("../worlds".into()))
.backup(cli.backup.unwrap_or("../backups".into())); .backup(cli.backup.unwrap_or("../backups".into()))
.xms(cli.xms)
.xmx(cli.xmx)
.max_backups(cli.max_backups);
let counter = Arc::new(Mutex::new(cmd.spawn().expect("Failed to start server.")));
if let Some(xms) = cli.xms { let clone = Arc::clone(&counter);
cmd = cmd.xms(xms); std::thread::spawn(move || backups_thread(clone, cli.frequency));
}
if let Some(xmx) = cli.xmx {
cmd = cmd.xmx(xmx);
}
let mut server = cmd.spawn().expect("Failed to start server.");
let stdin = io::stdin(); let stdin = io::stdin();
let input = &mut String::new(); let input = &mut String::new();
@ -59,10 +78,12 @@ fn main() {
loop { loop {
input.clear(); input.clear();
stdin.read_line(input); stdin.read_line(input);
println!("input: {}", input.trim()); {
if let Err(e) = server.send_command(input) { let mut server = counter.lock().unwrap();
println!("{}", e); if let Err(e) = server.send_command(input) {
}; println!("{}", e);
};
}
if input.trim() == "stop" { if input.trim() == "stop" {
break; break;

View File

@ -4,7 +4,7 @@ use std::fmt;
use std::fs::File; use std::fs::File;
use std::io::Write; use std::io::Write;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::{Child, Command, Stdio}; use std::process::{Command, Stdio};
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)] #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
pub enum ServerType { pub enum ServerType {
@ -33,8 +33,9 @@ pub struct ServerCommand {
config_dir: PathBuf, config_dir: PathBuf,
world_dir: PathBuf, world_dir: PathBuf,
backup_dir: PathBuf, backup_dir: PathBuf,
xms: u32, xms: u64,
xmx: u32, xmx: u64,
max_backups: u64,
} }
impl ServerCommand { impl ServerCommand {
@ -49,6 +50,7 @@ impl ServerCommand {
backup_dir: PathBuf::from("backups"), backup_dir: PathBuf::from("backups"),
xms: 1024, xms: 1024,
xmx: 2048, xmx: 2048,
max_backups: 7,
} }
} }
@ -79,21 +81,26 @@ impl ServerCommand {
self self
} }
pub fn xms(mut self, v: u32) -> Self { pub fn xms(mut self, v: u64) -> Self {
self.xms = v; self.xms = v;
self self
} }
pub fn xmx(mut self, v: u32) -> Self { pub fn xmx(mut self, v: u64) -> Self {
self.xmx = v; self.xmx = v;
self self
} }
pub fn max_backups(mut self, v: u64) -> Self {
self.max_backups = v;
self
}
fn accept_eula(&self) -> std::io::Result<()> { fn accept_eula(&self) -> std::io::Result<()> {
let mut eula_path = self.config_dir.clone(); let mut eula_path = self.config_dir.clone();
eula_path.push("eula.txt"); eula_path.push("eula.txt");
let mut eula_file = File::create(eula_path)?; let mut eula_file = File::create(eula_path)?;
eula_file.write(b"eula=true")?; eula_file.write_all(b"eula=true")?;
Ok(()) Ok(())
} }
@ -123,6 +130,7 @@ impl ServerCommand {
config_dir, config_dir,
world_dir, world_dir,
backup_dir, backup_dir,
self.max_backups,
child, child,
)) ))
} }

View File

@ -1,12 +1,15 @@
use crate::server::ServerType; use crate::server::ServerType;
use flate2::write::GzEncoder; use flate2::write::GzEncoder;
use flate2::Compression; use flate2::Compression;
use std::fs::File;
use std::io;
use std::io::Write; use std::io::Write;
use std::path::{Path, PathBuf}; use std::path::PathBuf;
use std::process::Child; use std::process::Child;
use std::process::{Command, Stdio};
#[link(name = "c")]
extern "C" {
fn geteuid() -> u32;
fn getegid() -> u32;
}
pub struct ServerProcess { pub struct ServerProcess {
type_: ServerType, type_: ServerType,
@ -14,6 +17,7 @@ pub struct ServerProcess {
config_dir: PathBuf, config_dir: PathBuf,
world_dir: PathBuf, world_dir: PathBuf,
backup_dir: PathBuf, backup_dir: PathBuf,
max_backups: u64,
child: Child, child: Child,
} }
@ -24,6 +28,7 @@ impl ServerProcess {
config_dir: PathBuf, config_dir: PathBuf,
world_dir: PathBuf, world_dir: PathBuf,
backup_dir: PathBuf, backup_dir: PathBuf,
max_backups: u64,
child: Child, child: Child,
) -> ServerProcess { ) -> ServerProcess {
ServerProcess { ServerProcess {
@ -32,6 +37,7 @@ impl ServerProcess {
config_dir, config_dir,
world_dir, world_dir,
backup_dir, backup_dir,
max_backups,
child, child,
} }
} }
@ -61,17 +67,41 @@ impl ServerProcess {
Ok(()) Ok(())
} }
/// Perform a backup by disabling the server's save feature and flushing its data, before
/// creating an archive file.
pub fn backup(&mut self) -> std::io::Result<()> { pub fn backup(&mut self) -> std::io::Result<()> {
self.custom("say backing up server")?;
// Make sure the server isn't modifying the files during the backup // Make sure the server isn't modifying the files during the backup
self.custom("save-off")?; self.custom("save-off")?;
self.custom("save-all")?; self.custom("save-all")?;
// TODO implement a better mechanism
// We wait some time to (hopefully) ensure the save-all call has completed
std::thread::sleep(std::time::Duration::from_secs(10));
let res = self.create_backup_archive();
if res.is_ok() {
self.remove_old_backups()?;
}
// The server's save feature needs to be enabled again even if the archive failed to create
self.custom("save-on")?;
self.custom("say server backed up successfully")?;
res
}
/// Create a new compressed backup archive of the server's data.
fn create_backup_archive(&mut self) -> std::io::Result<()> {
// Create a gzip-compressed tarball of the worlds folder // Create a gzip-compressed tarball of the worlds folder
let filename = format!( let filename = format!(
"{}", "{}",
chrono::offset::Local::now().format("%Y-%m-%d_%H-%M-%S.tar.gz") chrono::offset::Local::now().format("%Y-%m-%d_%H-%M-%S.tar.gz")
); );
let path = self.backup_dir.join(&filename); let path = self.backup_dir.join(filename);
let tar_gz = std::fs::File::create(path)?; let tar_gz = std::fs::File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default()); let enc = GzEncoder::new(tar_gz, Compression::default());
let mut tar = tar::Builder::new(enc); let mut tar = tar::Builder::new(enc);
@ -87,15 +117,43 @@ impl ServerProcess {
// We add a file to the backup describing for what version it was made // We add a file to the backup describing for what version it was made
let info = format!("{} {}", self.type_, self.version); let info = format!("{} {}", self.type_, self.version);
let info_bytes = info.as_bytes(); let info_bytes = info.as_bytes();
let mut header = tar::Header::new_gnu(); let mut header = tar::Header::new_gnu();
header.set_size(info_bytes.len().try_into().unwrap()); header.set_size(info_bytes.len().try_into().unwrap());
header.set_mode(0o100644);
unsafe {
header.set_gid(getegid().into());
header.set_uid(geteuid().into());
}
tar.append_data(&mut header, "info.txt", info_bytes)?; tar.append_data(&mut header, "info.txt", info_bytes)?;
// tar.append_dir_all("config", &self.config_dir)?; // tar.append_dir_all("config", &self.config_dir)?;
//
// Backup file gets finalized in the drop // Backup file gets finalized in the drop
self.custom("save-on") Ok(())
}
/// Remove the oldest backups
fn remove_old_backups(&mut self) -> std::io::Result<()> {
// The naming format used allows us to sort the backups by name and still get a sorting by
// creation time
let mut backups = std::fs::read_dir(&self.backup_dir)?
.filter_map(|res| res.map(|e| e.path()).ok())
.collect::<Vec<PathBuf>>();
backups.sort();
let max_backups: usize = self.max_backups.try_into().unwrap();
if backups.len() > max_backups {
let excess_backups = backups.len() - max_backups;
for backup in &backups[0..excess_backups] {
std::fs::remove_file(backup)?;
}
}
Ok(())
} }
} }