refactor: move backup logic to separate module
parent
90aa929b73
commit
4958257f6e
|
@ -19,4 +19,4 @@ target/
|
|||
|
||||
# testing files
|
||||
*.jar
|
||||
data/
|
||||
data*/
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
#[link(name = "c")]
|
||||
extern "C" {
|
||||
fn geteuid() -> u32;
|
||||
fn getegid() -> u32;
|
||||
}
|
||||
|
||||
static FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
|
||||
|
||||
pub struct BackupManager {
|
||||
backup_dir: PathBuf,
|
||||
config_dir: PathBuf,
|
||||
world_dir: PathBuf,
|
||||
max_backups: u64,
|
||||
start_time: Option<chrono::DateTime<chrono::Local>>,
|
||||
}
|
||||
|
||||
impl BackupManager {
|
||||
pub fn open(
|
||||
backup_dir: PathBuf,
|
||||
config_dir: PathBuf,
|
||||
world_dir: PathBuf,
|
||||
max_backups: u64,
|
||||
) -> Self {
|
||||
BackupManager {
|
||||
backup_dir,
|
||||
config_dir,
|
||||
world_dir,
|
||||
max_backups,
|
||||
start_time: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_archive(&mut self) -> io::Result<()> {
|
||||
let start_time = chrono::offset::Local::now();
|
||||
self.start_time = Some(start_time);
|
||||
|
||||
let filename = format!("{}", start_time.format(FILENAME_FORMAT));
|
||||
let path = self.backup_dir.join(filename);
|
||||
let tar_gz = File::create(path)?;
|
||||
let enc = GzEncoder::new(tar_gz, Compression::default());
|
||||
let mut tar = tar::Builder::new(enc);
|
||||
|
||||
tar.append_dir_all("worlds", &self.world_dir)?;
|
||||
|
||||
// Add all files from the config directory that aren't the cache
|
||||
for entry in self
|
||||
.config_dir
|
||||
.read_dir()?
|
||||
.filter_map(|e| e.ok())
|
||||
.filter(|e| e.file_name() != "cache")
|
||||
{
|
||||
let tar_path = Path::new("config").join(entry.file_name());
|
||||
|
||||
if entry.file_type()?.is_dir() {
|
||||
tar.append_dir_all(tar_path, entry.path())?;
|
||||
} else {
|
||||
tar.append_path_with_name(entry.path(), tar_path)?;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO re-add this info file in some way
|
||||
// We add a file to the backup describing for what version it was made
|
||||
// let info = format!("{} {}", self.type_, self.version);
|
||||
// let info_bytes = info.as_bytes();
|
||||
|
||||
// let mut header = tar::Header::new_gnu();
|
||||
// header.set_size(info_bytes.len().try_into().unwrap());
|
||||
// header.set_mode(0o100644);
|
||||
// unsafe {
|
||||
// header.set_gid(getegid().into());
|
||||
// header.set_uid(geteuid().into());
|
||||
// }
|
||||
|
||||
// tar.append_data(&mut header, "info.txt", info_bytes)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove the oldest backups
|
||||
pub fn remove_old_backups(&mut self) -> std::io::Result<()> {
|
||||
// The naming format used allows us to sort the backups by name and still get a sorting by
|
||||
// creation time
|
||||
let mut backups = self
|
||||
.backup_dir
|
||||
.read_dir()?
|
||||
.filter_map(|res| res.map(|e| e.path()).ok())
|
||||
.collect::<Vec<PathBuf>>();
|
||||
backups.sort();
|
||||
|
||||
let max_backups: usize = self.max_backups.try_into().unwrap();
|
||||
|
||||
if backups.len() > max_backups {
|
||||
let excess_backups = backups.len() - max_backups;
|
||||
|
||||
for backup in &backups[0..excess_backups] {
|
||||
std::fs::remove_file(backup)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
mod backups;
|
||||
mod command;
|
||||
mod process;
|
||||
|
||||
pub use backups::BackupManager;
|
||||
pub use command::{ServerCommand, ServerType};
|
||||
pub use process::ServerProcess;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use crate::server::BackupManager;
|
||||
use crate::server::ServerType;
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
|
@ -5,12 +6,6 @@ use std::io::Write;
|
|||
use std::path::{Path, PathBuf};
|
||||
use std::process::Child;
|
||||
|
||||
#[link(name = "c")]
|
||||
extern "C" {
|
||||
fn geteuid() -> u32;
|
||||
fn getegid() -> u32;
|
||||
}
|
||||
|
||||
pub struct ServerProcess {
|
||||
type_: ServerType,
|
||||
version: String,
|
||||
|
@ -19,6 +14,7 @@ pub struct ServerProcess {
|
|||
backup_dir: PathBuf,
|
||||
max_backups: u64,
|
||||
child: Child,
|
||||
backups: BackupManager,
|
||||
}
|
||||
|
||||
impl ServerProcess {
|
||||
|
@ -31,6 +27,13 @@ impl ServerProcess {
|
|||
max_backups: u64,
|
||||
child: Child,
|
||||
) -> ServerProcess {
|
||||
let backup_manager = BackupManager::open(
|
||||
backup_dir.clone(),
|
||||
config_dir.clone(),
|
||||
world_dir.clone(),
|
||||
max_backups,
|
||||
);
|
||||
|
||||
ServerProcess {
|
||||
type_,
|
||||
version,
|
||||
|
@ -39,6 +42,7 @@ impl ServerProcess {
|
|||
backup_dir,
|
||||
max_backups,
|
||||
child,
|
||||
backups: backup_manager,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,10 +89,10 @@ impl ServerProcess {
|
|||
std::thread::sleep(std::time::Duration::from_secs(10));
|
||||
|
||||
let start_time = chrono::offset::Local::now();
|
||||
let res = self.create_backup_archive();
|
||||
let res = self.backups.create_archive();
|
||||
|
||||
if res.is_ok() {
|
||||
self.remove_old_backups()?;
|
||||
self.backups.remove_old_backups()?;
|
||||
}
|
||||
|
||||
// The server's save feature needs to be enabled again even if the archive failed to create
|
||||
|
@ -112,77 +116,4 @@ impl ServerProcess {
|
|||
|
||||
res
|
||||
}
|
||||
|
||||
/// Create a new compressed backup archive of the server's data.
|
||||
fn create_backup_archive(&mut self) -> std::io::Result<()> {
|
||||
// Create a gzip-compressed tarball of the worlds folder
|
||||
let filename = format!(
|
||||
"{}",
|
||||
chrono::offset::Local::now().format("%Y-%m-%d_%H-%M-%S.tar.gz")
|
||||
);
|
||||
let path = self.backup_dir.join(filename);
|
||||
let tar_gz = std::fs::File::create(path)?;
|
||||
let enc = GzEncoder::new(tar_gz, Compression::default());
|
||||
let mut tar = tar::Builder::new(enc);
|
||||
|
||||
tar.append_dir_all("worlds", &self.world_dir)?;
|
||||
|
||||
// Add all files from the config directory that aren't the cache
|
||||
for entry in self
|
||||
.config_dir
|
||||
.read_dir()?
|
||||
.filter_map(|e| e.ok())
|
||||
.filter(|e| e.file_name() != "cache")
|
||||
{
|
||||
let tar_path = Path::new("config").join(entry.file_name());
|
||||
|
||||
if entry.file_type()?.is_dir() {
|
||||
tar.append_dir_all(tar_path, entry.path())?;
|
||||
} else {
|
||||
tar.append_path_with_name(entry.path(), tar_path)?;
|
||||
}
|
||||
}
|
||||
|
||||
// We add a file to the backup describing for what version it was made
|
||||
let info = format!("{} {}", self.type_, self.version);
|
||||
let info_bytes = info.as_bytes();
|
||||
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_size(info_bytes.len().try_into().unwrap());
|
||||
header.set_mode(0o100644);
|
||||
unsafe {
|
||||
header.set_gid(getegid().into());
|
||||
header.set_uid(geteuid().into());
|
||||
}
|
||||
|
||||
tar.append_data(&mut header, "info.txt", info_bytes)?;
|
||||
|
||||
// Backup file gets finalized in the drop
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove the oldest backups
|
||||
fn remove_old_backups(&mut self) -> std::io::Result<()> {
|
||||
// The naming format used allows us to sort the backups by name and still get a sorting by
|
||||
// creation time
|
||||
let mut backups = self
|
||||
.backup_dir
|
||||
.read_dir()?
|
||||
.filter_map(|res| res.map(|e| e.path()).ok())
|
||||
.collect::<Vec<PathBuf>>();
|
||||
backups.sort();
|
||||
|
||||
let max_backups: usize = self.max_backups.try_into().unwrap();
|
||||
|
||||
if backups.len() > max_backups {
|
||||
let excess_backups = backups.len() - max_backups;
|
||||
|
||||
for backup in &backups[0..excess_backups] {
|
||||
std::fs::remove_file(backup)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue