feat: temporarily disable "remove old backups"
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details

incremental-backups
Jef Roosens 2023-06-15 22:54:17 +02:00
parent 8add96b39b
commit 27d7e681c3
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
2 changed files with 71 additions and 37 deletions

View File

@ -14,8 +14,7 @@ extern "C" {
fn getegid() -> u32; fn getegid() -> u32;
} }
const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz"; /// List all files in `src_dir` and all child directories.
fn files(src_dir: PathBuf) -> io::Result<HashSet<PathBuf>> { fn files(src_dir: PathBuf) -> io::Result<HashSet<PathBuf>> {
let mut dirs = vec![src_dir.clone()]; let mut dirs = vec![src_dir.clone()];
let mut files: HashSet<PathBuf> = HashSet::new(); let mut files: HashSet<PathBuf> = HashSet::new();
@ -159,7 +158,18 @@ pub struct Backup {
} }
impl Backup { impl Backup {
/// Calculate the full state of the backup by applying all its ancestors delta's in order, const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
/// Returns a pointer to this backup's previous backup by cloning the Arc pointer.
pub fn previous(&self) -> Option<Arc<Self>> {
if let Some(previous) = &self.previous {
Some(Arc::clone(&previous))
} else {
None
}
}
/// Calculate the full state of the backup by applying all its ancestors' delta's in order,
/// starting from the last full ancestor. /// starting from the last full ancestor.
pub fn state(&self) -> BackupResult<HashMap<PathBuf, HashSet<PathBuf>>> { pub fn state(&self) -> BackupResult<HashMap<PathBuf, HashSet<PathBuf>>> {
if self.type_ == BackupType::Full { if self.type_ == BackupType::Full {
@ -177,8 +187,27 @@ impl Backup {
} }
} }
pub fn set_previous(&mut self, previous: Arc<Self>) { /// Returns the n'th ancestor of the given backup, if it exists.
self.previous = Some(previous); pub fn ancestor(&self, n: u64) -> Option<Arc<Self>> {
if n == 0 {
None
} else if let Some(previous) = &self.previous {
if n == 1 {
Some(Arc::clone(&previous))
} else {
previous.ancestor(n - 1)
}
} else {
None
}
}
/// Return the path to a backup file by properly formatting the data.
pub fn path<P: AsRef<Path>>(backup_dir: P, start_time: chrono::DateTime<Utc>) -> PathBuf {
let backup_dir = backup_dir.as_ref();
let filename = format!("{}", start_time.format(Self::FILENAME_FORMAT));
backup_dir.join(filename)
} }
/// Create a new Full backup, populated with the given directories. /// Create a new Full backup, populated with the given directories.
@ -196,11 +225,9 @@ impl Backup {
backup_dir: P, backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>, dirs: Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> { ) -> io::Result<Self> {
let backup_dir = backup_dir.as_ref();
let start_time = chrono::offset::Utc::now(); let start_time = chrono::offset::Utc::now();
let filename = format!("{}", start_time.format(FILENAME_FORMAT)); let path = Self::path(backup_dir, start_time);
let path = backup_dir.join(filename);
let tar_gz = File::create(path)?; let tar_gz = File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default()); let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc); let mut ar = tar::Builder::new(enc);
@ -234,17 +261,16 @@ impl Backup {
backup_dir: P, backup_dir: P,
dirs: Vec<(PathBuf, PathBuf)>, dirs: Vec<(PathBuf, PathBuf)>,
) -> io::Result<Self> { ) -> io::Result<Self> {
let backup_dir = backup_dir.as_ref();
let start_time = chrono::offset::Utc::now(); let start_time = chrono::offset::Utc::now();
let filename = format!("{}", start_time.format(FILENAME_FORMAT)); let path = Self::path(backup_dir, start_time);
let path = backup_dir.join(filename);
let tar_gz = File::create(path)?; let tar_gz = File::create(path)?;
let enc = GzEncoder::new(tar_gz, Compression::default()); let enc = GzEncoder::new(tar_gz, Compression::default());
let mut ar = tar::Builder::new(enc); let mut ar = tar::Builder::new(enc);
// TODO remove unwrap let previous_state = previous
let previous_state = previous.state().unwrap(); .state()
.map_err(|_| io::Error::new(io::ErrorKind::Other, "No Full ancestor"))?;
let mut delta = BackupDelta::new(); let mut delta = BackupDelta::new();
for (dir_in_tar, src_dir) in dirs { for (dir_in_tar, src_dir) in dirs {
@ -337,28 +363,38 @@ impl BackupManager {
} }
/// Remove the oldest backups /// Remove the oldest backups
pub fn remove_old_backups(&mut self) -> std::io::Result<()> { // pub fn remove_old_backups(&mut self) -> std::io::Result<()> {
// The naming format used allows us to sort the backups by name and still get a sorting by // if let Some(last_backup) = &self.last_backup {
// creation time // let last_valid_ancestor = last_backup.ancestor(self.max_backups - 1);
let mut backups = self // let ancestor = last_valid_ancestor.previous();
.backup_dir
.read_dir()?
.filter_map(|res| res.map(|e| e.path()).ok())
.collect::<Vec<PathBuf>>();
backups.sort();
let max_backups: usize = self.max_backups.try_into().unwrap(); // while let Some(backup) = &ancestor {
// let path = Backup::path(&self.backup_dir, backup.start_time);
// std::fs::remove_file(path)?;
// }
// }
if backups.len() > max_backups { // // The naming format used allows us to sort the backups by name and still get a sorting by
let excess_backups = backups.len() - max_backups; // // creation time
// let mut backups = self
// .backup_dir
// .read_dir()?
// .filter_map(|res| res.map(|e| e.path()).ok())
// .collect::<Vec<PathBuf>>();
// backups.sort();
for backup in &backups[0..excess_backups] { // let max_backups: usize = self.max_backups.try_into().unwrap();
std::fs::remove_file(backup)?;
}
}
Ok(()) // if backups.len() > max_backups {
} // let excess_backups = backups.len() - max_backups;
// for backup in &backups[0..excess_backups] {
// std::fs::remove_file(backup)?;
// }
// }
// Ok(())
// }
pub fn write_json(&self) -> std::io::Result<()> { pub fn write_json(&self) -> std::io::Result<()> {
// Put the backup chain into a list that can be serialized // Put the backup chain into a list that can be serialized
@ -385,9 +421,7 @@ impl BackupManager {
let previous = Arc::clone(&backups[i - 1]); let previous = Arc::clone(&backups[i - 1]);
// We can unwrap here, as this function creates the first instance of each Arc, // We can unwrap here, as this function creates the first instance of each Arc,
// meaning we're definitely the only pointer. // meaning we're definitely the only pointer.
Arc::get_mut(&mut backups[i]) Arc::get_mut(&mut backups[i]).unwrap().previous = Some(previous);
.unwrap()
.set_previous(previous);
} }
self.last_backup = Some(Arc::clone(backups.last().unwrap())); self.last_backup = Some(Arc::clone(backups.last().unwrap()));

View File

@ -70,9 +70,9 @@ impl ServerProcess {
let start_time = chrono::offset::Utc::now(); let start_time = chrono::offset::Utc::now();
let res = self.backups.create_backup(); let res = self.backups.create_backup();
if res.is_ok() { // if res.is_ok() {
self.backups.remove_old_backups()?; // self.backups.remove_old_backups()?;
} // }
// The server's save feature needs to be enabled again even if the archive failed to create // The server's save feature needs to be enabled again even if the archive failed to create
self.custom("save-on")?; self.custom("save-on")?;