158 lines
5.0 KiB
Rust
158 lines
5.0 KiB
Rust
use flate2::write::GzEncoder;
|
|
use flate2::Compression;
|
|
use std::fs::File;
|
|
use std::io;
|
|
use std::path::{Path, PathBuf};
|
|
use chrono::{Utc, Local};
|
|
use std::collections::HashSet;
|
|
|
|
#[link(name = "c")]
|
|
extern "C" {
|
|
fn geteuid() -> u32;
|
|
fn getegid() -> u32;
|
|
}
|
|
|
|
static FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
|
|
|
|
pub struct BackupManager {
|
|
backup_dir: PathBuf,
|
|
config_dir: PathBuf,
|
|
world_dir: PathBuf,
|
|
max_backups: u64,
|
|
/// Start time of the last successful backup
|
|
last_start_time: Option<chrono::DateTime<chrono::Utc>>,
|
|
/// Files contained in the last successful backup
|
|
last_files: HashSet<(PathBuf, PathBuf)>
|
|
}
|
|
|
|
impl BackupManager {
|
|
pub fn open(
|
|
backup_dir: PathBuf,
|
|
config_dir: PathBuf,
|
|
world_dir: PathBuf,
|
|
max_backups: u64,
|
|
) -> Self {
|
|
BackupManager {
|
|
backup_dir,
|
|
config_dir,
|
|
world_dir,
|
|
max_backups,
|
|
last_start_time: None,
|
|
last_files: HashSet::new()
|
|
}
|
|
}
|
|
|
|
fn files_to_backup(&mut self) -> io::Result<HashSet<(PathBuf, PathBuf)>> {
|
|
let mut dirs = vec![
|
|
(PathBuf::from("worlds"), self.world_dir.clone()),
|
|
(PathBuf::from("config"), self.config_dir.clone()),
|
|
];
|
|
let mut files: HashSet<(PathBuf, PathBuf)> = HashSet::new();
|
|
|
|
while let Some((path_in_tar, path)) = dirs.pop() {
|
|
for res in path.read_dir()? {
|
|
let entry = res?;
|
|
|
|
if entry.file_name() == "cache" {
|
|
continue;
|
|
}
|
|
|
|
let new_path_in_tar = path_in_tar.join(entry.file_name());
|
|
|
|
// All dirs get expanded recursively, while all files get returned as output
|
|
// NOTE: does this remove empty directories from backups? Is this a problem?
|
|
if entry.file_type()?.is_dir() {
|
|
dirs.push((new_path_in_tar, entry.path()));
|
|
} else {
|
|
// Only add files that have been updated since the last backup (incremental backup)
|
|
if let Some(last_start_time) = self.last_start_time {
|
|
let last_modified = entry.path().metadata()?.modified();
|
|
|
|
if let Ok(last_modified) = last_modified {
|
|
let t: chrono::DateTime<Utc> = last_modified.into();
|
|
let t = t.with_timezone(&Local);
|
|
|
|
if t < last_start_time {
|
|
continue
|
|
}
|
|
}
|
|
}
|
|
|
|
files.insert((new_path_in_tar, entry.path()));
|
|
}
|
|
}
|
|
}
|
|
|
|
Ok(files)
|
|
}
|
|
|
|
pub fn create_archive(&mut self) -> io::Result<()> {
|
|
let start_time = chrono::offset::Utc::now();
|
|
|
|
let filename = format!("{}", start_time.format(FILENAME_FORMAT));
|
|
let path = self.backup_dir.join(filename);
|
|
let tar_gz = File::create(path)?;
|
|
let enc = GzEncoder::new(tar_gz, Compression::default());
|
|
let mut ar = tar::Builder::new(enc);
|
|
|
|
let files = self.files_to_backup()?;
|
|
|
|
for (path_in_tar, path) in &files {
|
|
ar.append_path_with_name(path, path_in_tar)?;
|
|
}
|
|
|
|
let deleted_files = self.last_files.difference(&files);
|
|
|
|
println!("{} {}", files.len(), self.last_files.len());
|
|
|
|
for (path_in_tar, path) in deleted_files {
|
|
println!("{path_in_tar:?}: {path:?}");
|
|
}
|
|
|
|
// TODO re-add this info file in some way
|
|
// We add a file to the backup describing for what version it was made
|
|
// let info = format!("{} {}", self.type_, self.version);
|
|
// let info_bytes = info.as_bytes();
|
|
|
|
// let mut header = tar::Header::new_gnu();
|
|
// header.set_size(info_bytes.len().try_into().unwrap());
|
|
// header.set_mode(0o100644);
|
|
// unsafe {
|
|
// header.set_gid(getegid().into());
|
|
// header.set_uid(geteuid().into());
|
|
// }
|
|
|
|
// tar.append_data(&mut header, "info.txt", info_bytes)?;
|
|
|
|
// After a successful backup, we store the original metadata
|
|
self.last_start_time = Some(start_time);
|
|
self.last_files = files;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Remove the oldest backups
|
|
pub fn remove_old_backups(&mut self) -> std::io::Result<()> {
|
|
// The naming format used allows us to sort the backups by name and still get a sorting by
|
|
// creation time
|
|
let mut backups = self
|
|
.backup_dir
|
|
.read_dir()?
|
|
.filter_map(|res| res.map(|e| e.path()).ok())
|
|
.collect::<Vec<PathBuf>>();
|
|
backups.sort();
|
|
|
|
let max_backups: usize = self.max_backups.try_into().unwrap();
|
|
|
|
if backups.len() > max_backups {
|
|
let excess_backups = backups.len() - max_backups;
|
|
|
|
for backup in &backups[0..excess_backups] {
|
|
std::fs::remove_file(backup)?;
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
}
|