Compare commits
No commits in common. "3e0324703d5b4d666ff1ca2a3b03d250191b6de0" and "69ce8616d5232a08ebb4ceac3fa3e9062d7245ff" have entirely different histories.
3e0324703d
...
69ce8616d5
|
|
@ -19,4 +19,4 @@ target/
|
||||||
|
|
||||||
# testing files
|
# testing files
|
||||||
*.jar
|
*.jar
|
||||||
data*/
|
data/
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "alex"
|
name = "alex"
|
||||||
version = "0.2.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap",
|
"clap",
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "alex"
|
name = "alex"
|
||||||
version = "0.2.0"
|
version = "0.1.0"
|
||||||
description = "Wrapper around Minecraft server processes, designed to complement Docker image installations."
|
description = "Wrapper around Minecraft server processes, designed to complement Docker image installations."
|
||||||
authors = ["Jef Roosens"]
|
authors = ["Jef Roosens"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
|
||||||
|
|
@ -1,128 +0,0 @@
|
||||||
use flate2::write::GzEncoder;
|
|
||||||
use flate2::Compression;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
#[link(name = "c")]
|
|
||||||
extern "C" {
|
|
||||||
fn geteuid() -> u32;
|
|
||||||
fn getegid() -> u32;
|
|
||||||
}
|
|
||||||
|
|
||||||
static FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
|
|
||||||
|
|
||||||
pub struct BackupManager {
|
|
||||||
backup_dir: PathBuf,
|
|
||||||
config_dir: PathBuf,
|
|
||||||
world_dir: PathBuf,
|
|
||||||
max_backups: u64,
|
|
||||||
start_time: Option<chrono::DateTime<chrono::Local>>,
|
|
||||||
files: Vec<(PathBuf, PathBuf)>
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BackupManager {
|
|
||||||
pub fn open(
|
|
||||||
backup_dir: PathBuf,
|
|
||||||
config_dir: PathBuf,
|
|
||||||
world_dir: PathBuf,
|
|
||||||
max_backups: u64,
|
|
||||||
) -> Self {
|
|
||||||
BackupManager {
|
|
||||||
backup_dir,
|
|
||||||
config_dir,
|
|
||||||
world_dir,
|
|
||||||
max_backups,
|
|
||||||
start_time: None,
|
|
||||||
files: Vec::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_files_to_backup(&mut self) -> io::Result<()> {
|
|
||||||
let mut dirs = vec![
|
|
||||||
(PathBuf::from("worlds"), self.world_dir.clone()),
|
|
||||||
(PathBuf::from("config"), self.config_dir.clone()),
|
|
||||||
];
|
|
||||||
self.files.clear();
|
|
||||||
|
|
||||||
while let Some((path_in_tar, path)) = dirs.pop() {
|
|
||||||
for res in path.read_dir()? {
|
|
||||||
let entry = res?;
|
|
||||||
|
|
||||||
if entry.file_name() == "cache" {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let new_path_in_tar = path_in_tar.join(entry.file_name());
|
|
||||||
|
|
||||||
// All dirs get expanded recursively, while all files get returned as output
|
|
||||||
// NOTE: does this remove empty directories from backups? Is this a problem?
|
|
||||||
if entry.file_type()?.is_dir() {
|
|
||||||
dirs.push((new_path_in_tar, entry.path()));
|
|
||||||
} else {
|
|
||||||
self.files.push((new_path_in_tar, entry.path()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_archive(&mut self) -> io::Result<()> {
|
|
||||||
let start_time = chrono::offset::Local::now();
|
|
||||||
self.start_time = Some(start_time);
|
|
||||||
|
|
||||||
let filename = format!("{}", start_time.format(FILENAME_FORMAT));
|
|
||||||
let path = self.backup_dir.join(filename);
|
|
||||||
let tar_gz = File::create(path)?;
|
|
||||||
let enc = GzEncoder::new(tar_gz, Compression::default());
|
|
||||||
let mut tar = tar::Builder::new(enc);
|
|
||||||
|
|
||||||
self.set_files_to_backup()?;
|
|
||||||
|
|
||||||
for (path_in_tar, path) in &self.files {
|
|
||||||
tar.append_path_with_name(path, path_in_tar)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO re-add this info file in some way
|
|
||||||
// We add a file to the backup describing for what version it was made
|
|
||||||
// let info = format!("{} {}", self.type_, self.version);
|
|
||||||
// let info_bytes = info.as_bytes();
|
|
||||||
|
|
||||||
// let mut header = tar::Header::new_gnu();
|
|
||||||
// header.set_size(info_bytes.len().try_into().unwrap());
|
|
||||||
// header.set_mode(0o100644);
|
|
||||||
// unsafe {
|
|
||||||
// header.set_gid(getegid().into());
|
|
||||||
// header.set_uid(geteuid().into());
|
|
||||||
// }
|
|
||||||
|
|
||||||
// tar.append_data(&mut header, "info.txt", info_bytes)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove the oldest backups
|
|
||||||
pub fn remove_old_backups(&mut self) -> std::io::Result<()> {
|
|
||||||
// The naming format used allows us to sort the backups by name and still get a sorting by
|
|
||||||
// creation time
|
|
||||||
let mut backups = self
|
|
||||||
.backup_dir
|
|
||||||
.read_dir()?
|
|
||||||
.filter_map(|res| res.map(|e| e.path()).ok())
|
|
||||||
.collect::<Vec<PathBuf>>();
|
|
||||||
backups.sort();
|
|
||||||
|
|
||||||
let max_backups: usize = self.max_backups.try_into().unwrap();
|
|
||||||
|
|
||||||
if backups.len() > max_backups {
|
|
||||||
let excess_backups = backups.len() - max_backups;
|
|
||||||
|
|
||||||
for backup in &backups[0..excess_backups] {
|
|
||||||
std::fs::remove_file(backup)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,7 +1,5 @@
|
||||||
mod backups;
|
|
||||||
mod command;
|
mod command;
|
||||||
mod process;
|
mod process;
|
||||||
|
|
||||||
pub use backups::BackupManager;
|
|
||||||
pub use command::{ServerCommand, ServerType};
|
pub use command::{ServerCommand, ServerType};
|
||||||
pub use process::ServerProcess;
|
pub use process::ServerProcess;
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,3 @@
|
||||||
use crate::server::BackupManager;
|
|
||||||
use crate::server::ServerType;
|
use crate::server::ServerType;
|
||||||
use flate2::write::GzEncoder;
|
use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
|
|
@ -6,6 +5,12 @@ use std::io::Write;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::Child;
|
use std::process::Child;
|
||||||
|
|
||||||
|
#[link(name = "c")]
|
||||||
|
extern "C" {
|
||||||
|
fn geteuid() -> u32;
|
||||||
|
fn getegid() -> u32;
|
||||||
|
}
|
||||||
|
|
||||||
pub struct ServerProcess {
|
pub struct ServerProcess {
|
||||||
type_: ServerType,
|
type_: ServerType,
|
||||||
version: String,
|
version: String,
|
||||||
|
|
@ -14,7 +19,6 @@ pub struct ServerProcess {
|
||||||
backup_dir: PathBuf,
|
backup_dir: PathBuf,
|
||||||
max_backups: u64,
|
max_backups: u64,
|
||||||
child: Child,
|
child: Child,
|
||||||
backups: BackupManager,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServerProcess {
|
impl ServerProcess {
|
||||||
|
|
@ -27,13 +31,6 @@ impl ServerProcess {
|
||||||
max_backups: u64,
|
max_backups: u64,
|
||||||
child: Child,
|
child: Child,
|
||||||
) -> ServerProcess {
|
) -> ServerProcess {
|
||||||
let backup_manager = BackupManager::open(
|
|
||||||
backup_dir.clone(),
|
|
||||||
config_dir.clone(),
|
|
||||||
world_dir.clone(),
|
|
||||||
max_backups,
|
|
||||||
);
|
|
||||||
|
|
||||||
ServerProcess {
|
ServerProcess {
|
||||||
type_,
|
type_,
|
||||||
version,
|
version,
|
||||||
|
|
@ -42,7 +39,6 @@ impl ServerProcess {
|
||||||
backup_dir,
|
backup_dir,
|
||||||
max_backups,
|
max_backups,
|
||||||
child,
|
child,
|
||||||
backups: backup_manager,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -88,32 +84,94 @@ impl ServerProcess {
|
||||||
// We wait some time to (hopefully) ensure the save-all call has completed
|
// We wait some time to (hopefully) ensure the save-all call has completed
|
||||||
std::thread::sleep(std::time::Duration::from_secs(10));
|
std::thread::sleep(std::time::Duration::from_secs(10));
|
||||||
|
|
||||||
let start_time = chrono::offset::Local::now();
|
let res = self.create_backup_archive();
|
||||||
let res = self.backups.create_archive();
|
|
||||||
|
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
self.backups.remove_old_backups()?;
|
self.remove_old_backups()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The server's save feature needs to be enabled again even if the archive failed to create
|
// The server's save feature needs to be enabled again even if the archive failed to create
|
||||||
self.custom("save-on")?;
|
self.custom("save-on")?;
|
||||||
|
|
||||||
let duration = chrono::offset::Local::now() - start_time;
|
|
||||||
let duration_str = format!(
|
|
||||||
"{}m{}s",
|
|
||||||
duration.num_seconds() / 60,
|
|
||||||
duration.num_seconds() % 60
|
|
||||||
);
|
|
||||||
|
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
self.custom(&format!("say server backed up in {}", duration_str))?;
|
self.custom("say server backed up successfully")?;
|
||||||
} else {
|
} else {
|
||||||
self.custom(&format!(
|
self.custom("an error occured while backing up the server")?;
|
||||||
"an error occured after {} while backing up the server",
|
|
||||||
duration_str
|
|
||||||
))?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a new compressed backup archive of the server's data.
|
||||||
|
fn create_backup_archive(&mut self) -> std::io::Result<()> {
|
||||||
|
// Create a gzip-compressed tarball of the worlds folder
|
||||||
|
let filename = format!(
|
||||||
|
"{}",
|
||||||
|
chrono::offset::Local::now().format("%Y-%m-%d_%H-%M-%S.tar.gz")
|
||||||
|
);
|
||||||
|
let path = self.backup_dir.join(filename);
|
||||||
|
let tar_gz = std::fs::File::create(path)?;
|
||||||
|
let enc = GzEncoder::new(tar_gz, Compression::default());
|
||||||
|
let mut tar = tar::Builder::new(enc);
|
||||||
|
|
||||||
|
tar.append_dir_all("worlds", &self.world_dir)?;
|
||||||
|
|
||||||
|
// Add all files from the config directory that aren't the cache
|
||||||
|
for entry in self
|
||||||
|
.config_dir
|
||||||
|
.read_dir()?
|
||||||
|
.filter_map(|e| e.ok())
|
||||||
|
.filter(|e| e.file_name() != "cache")
|
||||||
|
{
|
||||||
|
let tar_path = Path::new("config").join(entry.file_name());
|
||||||
|
|
||||||
|
if entry.file_type()?.is_dir() {
|
||||||
|
tar.append_dir_all(tar_path, entry.path())?;
|
||||||
|
} else {
|
||||||
|
tar.append_path_with_name(entry.path(), tar_path)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We add a file to the backup describing for what version it was made
|
||||||
|
let info = format!("{} {}", self.type_, self.version);
|
||||||
|
let info_bytes = info.as_bytes();
|
||||||
|
|
||||||
|
let mut header = tar::Header::new_gnu();
|
||||||
|
header.set_size(info_bytes.len().try_into().unwrap());
|
||||||
|
header.set_mode(0o100644);
|
||||||
|
unsafe {
|
||||||
|
header.set_gid(getegid().into());
|
||||||
|
header.set_uid(geteuid().into());
|
||||||
|
}
|
||||||
|
|
||||||
|
tar.append_data(&mut header, "info.txt", info_bytes)?;
|
||||||
|
|
||||||
|
// Backup file gets finalized in the drop
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove the oldest backups
|
||||||
|
fn remove_old_backups(&mut self) -> std::io::Result<()> {
|
||||||
|
// The naming format used allows us to sort the backups by name and still get a sorting by
|
||||||
|
// creation time
|
||||||
|
let mut backups = self
|
||||||
|
.backup_dir
|
||||||
|
.read_dir()?
|
||||||
|
.filter_map(|res| res.map(|e| e.path()).ok())
|
||||||
|
.collect::<Vec<PathBuf>>();
|
||||||
|
backups.sort();
|
||||||
|
|
||||||
|
let max_backups: usize = self.max_backups.try_into().unwrap();
|
||||||
|
|
||||||
|
if backups.len() > max_backups {
|
||||||
|
let excess_backups = backups.len() - max_backups;
|
||||||
|
|
||||||
|
for backup in &backups[0..excess_backups] {
|
||||||
|
std::fs::remove_file(backup)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue