feat: re-implement remove old backups
parent
bb7b57899b
commit
b51d951688
|
@ -65,7 +65,8 @@ fn commands_backup(cli: &Cli, _args: &BackupArgs) -> io::Result<()> {
|
|||
cli.max_backups,
|
||||
)?;
|
||||
|
||||
manager.create_backup()
|
||||
manager.create_backup()?;
|
||||
manager.remove_old_backups()
|
||||
}
|
||||
|
||||
fn main() -> io::Result<()> {
|
||||
|
|
|
@ -230,7 +230,8 @@ pub struct BackupManager {
|
|||
backup_dir: PathBuf,
|
||||
config_dir: PathBuf,
|
||||
world_dir: PathBuf,
|
||||
max_chain_length: u64,
|
||||
chain_len: u64,
|
||||
chains_to_keep: u64,
|
||||
max_backups: u64,
|
||||
last_backup: Option<Arc<Backup>>,
|
||||
chains: Vec<Vec<Backup>>,
|
||||
|
@ -250,7 +251,8 @@ impl BackupManager {
|
|||
config_dir,
|
||||
world_dir,
|
||||
max_backups,
|
||||
max_chain_length: 2,
|
||||
chain_len: 2,
|
||||
chains_to_keep: 1,
|
||||
chains: Vec::new(),
|
||||
last_backup: None,
|
||||
}
|
||||
|
@ -279,7 +281,7 @@ impl BackupManager {
|
|||
let backup = if let Some(current_chain) = self.chains.last() {
|
||||
let current_chain_len: u64 = current_chain.len().try_into().unwrap();
|
||||
|
||||
if current_chain_len < self.max_chain_length {
|
||||
if current_chain_len < self.chain_len {
|
||||
if let Some(previous_backup) = current_chain.last() {
|
||||
let state = Backup::state(current_chain);
|
||||
|
||||
|
@ -307,38 +309,30 @@ impl BackupManager {
|
|||
}
|
||||
|
||||
/// Remove the oldest backups
|
||||
// pub fn remove_old_backups(&mut self) -> std::io::Result<()> {
|
||||
// if let Some(last_backup) = &self.last_backup {
|
||||
// let last_valid_ancestor = last_backup.ancestor(self.max_backups - 1);
|
||||
// let ancestor = last_valid_ancestor.previous();
|
||||
pub fn remove_old_backups(&mut self) -> std::io::Result<()> {
|
||||
let chains_to_store: usize = self.chains_to_keep.try_into().unwrap();
|
||||
|
||||
// while let Some(backup) = &ancestor {
|
||||
// let path = Backup::path(&self.backup_dir, backup.start_time);
|
||||
// std::fs::remove_file(path)?;
|
||||
// }
|
||||
// }
|
||||
if chains_to_store < self.chains.len() {
|
||||
let mut remove_count: usize = self.chains.len() - chains_to_store;
|
||||
|
||||
// // The naming format used allows us to sort the backups by name and still get a sorting by
|
||||
// // creation time
|
||||
// let mut backups = self
|
||||
// .backup_dir
|
||||
// .read_dir()?
|
||||
// .filter_map(|res| res.map(|e| e.path()).ok())
|
||||
// .collect::<Vec<PathBuf>>();
|
||||
// backups.sort();
|
||||
// We only count finished chains towards the list of stored chains
|
||||
let chain_len: usize = self.chain_len.try_into().unwrap();
|
||||
if self.chains.last().unwrap().len() < chain_len {
|
||||
remove_count -= 1;
|
||||
}
|
||||
|
||||
// let max_backups: usize = self.max_backups.try_into().unwrap();
|
||||
for chain in self.chains.drain(..remove_count) {
|
||||
for backup in chain {
|
||||
let path = Backup::path(&self.backup_dir, backup.start_time);
|
||||
std::fs::remove_file(path)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if backups.len() > max_backups {
|
||||
// let excess_backups = backups.len() - max_backups;
|
||||
self.write_json()?;
|
||||
|
||||
// for backup in &backups[0..excess_backups] {
|
||||
// std::fs::remove_file(backup)?;
|
||||
// }
|
||||
// }
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_json(&self) -> std::io::Result<()> {
|
||||
let json_file = File::create(self.backup_dir.join(Self::METADATA_FILE))?;
|
||||
|
|
|
@ -70,12 +70,13 @@ impl ServerProcess {
|
|||
let start_time = chrono::offset::Utc::now();
|
||||
let res = self.backups.create_backup();
|
||||
|
||||
// if res.is_ok() {
|
||||
// self.backups.remove_old_backups()?;
|
||||
// }
|
||||
|
||||
// The server's save feature needs to be enabled again even if the archive failed to create
|
||||
self.custom("save-on")?;
|
||||
self.custom("save-all")?;
|
||||
|
||||
if res.is_ok() {
|
||||
self.backups.remove_old_backups()?;
|
||||
}
|
||||
|
||||
let duration = chrono::offset::Utc::now() - start_time;
|
||||
let duration_str = format!(
|
||||
|
|
Loading…
Reference in New Issue