Compare commits

...

5 Commits

Author SHA1 Message Date
Jef Roosens 42c7a7cc5b wip
ci/woodpecker/push/lint Pipeline failed Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-13 15:09:07 +02:00
Jef Roosens b1c0bbb3af refactor: use utc time 2023-06-09 10:42:17 +02:00
Jef Roosens 3e0324703d feat: implement own listing of files
ci/woodpecker/push/release unknown status Details
ci/woodpecker/push/lint Pipeline failed Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-06-09 10:11:02 +02:00
Jef Roosens 90033aa91e refactor: move backup logic to separate module 2023-06-09 09:25:51 +02:00
Jef Roosens 19d255b98c feat: show backup time in message 2023-06-07 21:16:35 +02:00
7 changed files with 189 additions and 88 deletions

View File

@ -1,3 +1,3 @@
[alias]
runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar"
runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar"
runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper-1.19.4-525.jar"
runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper-1.19.4-525.jar"

2
.gitignore vendored
View File

@ -19,4 +19,4 @@ target/
# testing files
*.jar
data/
data*/

2
Cargo.lock generated
View File

@ -10,7 +10,7 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "alex"
version = "0.1.0"
version = "0.2.0"
dependencies = [
"chrono",
"clap",

View File

@ -1,6 +1,6 @@
[package]
name = "alex"
version = "0.1.0"
version = "0.2.0"
description = "Wrapper around Minecraft server processes, designed to complement Docker image installations."
authors = ["Jef Roosens"]
edition = "2021"

View File

@ -0,0 +1,157 @@
use flate2::write::GzEncoder;
use flate2::Compression;
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
use chrono::{Utc, Local};
use std::collections::HashSet;
#[link(name = "c")]
extern "C" {
fn geteuid() -> u32;
fn getegid() -> u32;
}
static FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
pub struct BackupManager {
backup_dir: PathBuf,
config_dir: PathBuf,
world_dir: PathBuf,
max_backups: u64,
/// Start time of the last successful backup
last_start_time: Option<chrono::DateTime<chrono::Utc>>,
/// Files contained in the last successful backup
last_files: HashSet<(PathBuf, PathBuf)>
}
impl BackupManager {
pub fn open(
backup_dir: PathBuf,
config_dir: PathBuf,
world_dir: PathBuf,
max_backups: u64,
) -> Self {
BackupManager {
backup_dir,
config_dir,
world_dir,
max_backups,
last_start_time: None,
last_files: HashSet::new()
}
}
fn files_to_backup(&mut self) -> io::Result<HashSet<(PathBuf, PathBuf)>> {
let mut dirs = vec![
(PathBuf::from("worlds"), self.world_dir.clone()),
(PathBuf::from("config"), self.config_dir.clone()),
];
let mut files: HashSet<(PathBuf, PathBuf)> = HashSet::new();
while let Some((path_in_tar, path)) = dirs.pop() {
for res in path.read_dir()? {
let entry = res?;
if entry.file_name() == "cache" {
continue;
}
let new_path_in_tar = path_in_tar.join(entry.file_name());
// All dirs get expanded recursively, while all files get returned as output
// NOTE: does this remove empty directories from backups? Is this a problem?
if entry.file_type()?.is_dir() {
dirs.push((new_path_in_tar, entry.path()));
} else {
// Only add files that have been updated since the last backup (incremental backup)
if let Some(last_start_time) = self.last_start_time {
let last_modified = entry.path().metadata()?.modified();
if let Ok(last_modified) = last_modified {
let t: chrono::DateTime<Utc> = last_modified.into();
let t = t.with_timezone(&Local);
if t < last_start_time {
continue
}
}
}
files.insert((new_path_in_tar, entry.path()));
}
}
}
Ok(files)
}
pub fn create_archive(&mut self) -> io::Result<()> {
let start_time = chrono::offset::Utc::now();
let filename = format!("{}", start_time.format(FILENAME_FORMAT));
let path = self.backup_dir.join(filename);
let tar_gz = File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc);
let files = self.files_to_backup()?;
for (path_in_tar, path) in &files {
ar.append_path_with_name(path, path_in_tar)?;
}
let deleted_files = self.last_files.difference(&files);
println!("{} {}", files.len(), self.last_files.len());
for (path_in_tar, path) in deleted_files {
println!("{path_in_tar:?}: {path:?}");
}
// TODO re-add this info file in some way
// We add a file to the backup describing for what version it was made
// let info = format!("{} {}", self.type_, self.version);
// let info_bytes = info.as_bytes();
// let mut header = tar::Header::new_gnu();
// header.set_size(info_bytes.len().try_into().unwrap());
// header.set_mode(0o100644);
// unsafe {
// header.set_gid(getegid().into());
// header.set_uid(geteuid().into());
// }
// tar.append_data(&mut header, "info.txt", info_bytes)?;
// After a successful backup, we store the original metadata
self.last_start_time = Some(start_time);
self.last_files = files;
Ok(())
}
/// Remove the oldest backups
pub fn remove_old_backups(&mut self) -> std::io::Result<()> {
// The naming format used allows us to sort the backups by name and still get a sorting by
// creation time
let mut backups = self
.backup_dir
.read_dir()?
.filter_map(|res| res.map(|e| e.path()).ok())
.collect::<Vec<PathBuf>>();
backups.sort();
let max_backups: usize = self.max_backups.try_into().unwrap();
if backups.len() > max_backups {
let excess_backups = backups.len() - max_backups;
for backup in &backups[0..excess_backups] {
std::fs::remove_file(backup)?;
}
}
Ok(())
}
}

View File

@ -1,5 +1,7 @@
mod backups;
mod command;
mod process;
pub use backups::BackupManager;
pub use command::{ServerCommand, ServerType};
pub use process::ServerProcess;

View File

@ -1,3 +1,4 @@
use crate::server::BackupManager;
use crate::server::ServerType;
use flate2::write::GzEncoder;
use flate2::Compression;
@ -5,12 +6,6 @@ use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::Child;
#[link(name = "c")]
extern "C" {
fn geteuid() -> u32;
fn getegid() -> u32;
}
pub struct ServerProcess {
type_: ServerType,
version: String,
@ -19,6 +14,7 @@ pub struct ServerProcess {
backup_dir: PathBuf,
max_backups: u64,
child: Child,
backups: BackupManager,
}
impl ServerProcess {
@ -31,6 +27,13 @@ impl ServerProcess {
max_backups: u64,
child: Child,
) -> ServerProcess {
let backup_manager = BackupManager::open(
backup_dir.clone(),
config_dir.clone(),
world_dir.clone(),
max_backups,
);
ServerProcess {
type_,
version,
@ -39,6 +42,7 @@ impl ServerProcess {
backup_dir,
max_backups,
child,
backups: backup_manager,
}
}
@ -84,94 +88,32 @@ impl ServerProcess {
// We wait some time to (hopefully) ensure the save-all call has completed
std::thread::sleep(std::time::Duration::from_secs(10));
let res = self.create_backup_archive();
let start_time = chrono::offset::Utc::now();
let res = self.backups.create_archive();
if res.is_ok() {
self.remove_old_backups()?;
self.backups.remove_old_backups()?;
}
// The server's save feature needs to be enabled again even if the archive failed to create
self.custom("save-on")?;
let duration = chrono::offset::Utc::now() - start_time;
let duration_str = format!(
"{}m{}s",
duration.num_seconds() / 60,
duration.num_seconds() % 60
);
if res.is_ok() {
self.custom("say server backed up successfully")?;
self.custom(&format!("say server backed up in {}", duration_str))?;
} else {
self.custom("an error occured while backing up the server")?;
self.custom(&format!(
"an error occured after {} while backing up the server",
duration_str
))?;
}
res
}
/// Create a new compressed backup archive of the server's data.
fn create_backup_archive(&mut self) -> std::io::Result<()> {
// Create a gzip-compressed tarball of the worlds folder
let filename = format!(
"{}",
chrono::offset::Local::now().format("%Y-%m-%d_%H-%M-%S.tar.gz")
);
let path = self.backup_dir.join(filename);
let tar_gz = std::fs::File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut tar = tar::Builder::new(enc);
tar.append_dir_all("worlds", &self.world_dir)?;
// Add all files from the config directory that aren't the cache
for entry in self
.config_dir
.read_dir()?
.filter_map(|e| e.ok())
.filter(|e| e.file_name() != "cache")
{
let tar_path = Path::new("config").join(entry.file_name());
if entry.file_type()?.is_dir() {
tar.append_dir_all(tar_path, entry.path())?;
} else {
tar.append_path_with_name(entry.path(), tar_path)?;
}
}
// We add a file to the backup describing for what version it was made
let info = format!("{} {}", self.type_, self.version);
let info_bytes = info.as_bytes();
let mut header = tar::Header::new_gnu();
header.set_size(info_bytes.len().try_into().unwrap());
header.set_mode(0o100644);
unsafe {
header.set_gid(getegid().into());
header.set_uid(geteuid().into());
}
tar.append_data(&mut header, "info.txt", info_bytes)?;
// Backup file gets finalized in the drop
Ok(())
}
/// Remove the oldest backups
fn remove_old_backups(&mut self) -> std::io::Result<()> {
// The naming format used allows us to sort the backups by name and still get a sorting by
// creation time
let mut backups = self
.backup_dir
.read_dir()?
.filter_map(|res| res.map(|e| e.path()).ok())
.collect::<Vec<PathBuf>>();
backups.sort();
let max_backups: usize = self.max_backups.try_into().unwrap();
if backups.len() > max_backups {
let excess_backups = backups.len() - max_backups;
for backup in &backups[0..excess_backups] {
std::fs::remove_file(backup)?;
}
}
Ok(())
}
}