feat: re-implement remove old backups
							parent
							
								
									bb7b57899b
								
							
						
					
					
						commit
						b51d951688
					
				|  | @ -65,7 +65,8 @@ fn commands_backup(cli: &Cli, _args: &BackupArgs) -> io::Result<()> { | ||||||
|         cli.max_backups, |         cli.max_backups, | ||||||
|     )?; |     )?; | ||||||
| 
 | 
 | ||||||
|     manager.create_backup() |     manager.create_backup()?; | ||||||
|  |     manager.remove_old_backups() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| fn main() -> io::Result<()> { | fn main() -> io::Result<()> { | ||||||
|  |  | ||||||
|  | @ -230,7 +230,8 @@ pub struct BackupManager { | ||||||
|     backup_dir: PathBuf, |     backup_dir: PathBuf, | ||||||
|     config_dir: PathBuf, |     config_dir: PathBuf, | ||||||
|     world_dir: PathBuf, |     world_dir: PathBuf, | ||||||
|     max_chain_length: u64, |     chain_len: u64, | ||||||
|  |     chains_to_keep: u64, | ||||||
|     max_backups: u64, |     max_backups: u64, | ||||||
|     last_backup: Option<Arc<Backup>>, |     last_backup: Option<Arc<Backup>>, | ||||||
|     chains: Vec<Vec<Backup>>, |     chains: Vec<Vec<Backup>>, | ||||||
|  | @ -250,7 +251,8 @@ impl BackupManager { | ||||||
|             config_dir, |             config_dir, | ||||||
|             world_dir, |             world_dir, | ||||||
|             max_backups, |             max_backups, | ||||||
|             max_chain_length: 2, |             chain_len: 2, | ||||||
|  |             chains_to_keep: 1, | ||||||
|             chains: Vec::new(), |             chains: Vec::new(), | ||||||
|             last_backup: None, |             last_backup: None, | ||||||
|         } |         } | ||||||
|  | @ -279,7 +281,7 @@ impl BackupManager { | ||||||
|         let backup = if let Some(current_chain) = self.chains.last() { |         let backup = if let Some(current_chain) = self.chains.last() { | ||||||
|             let current_chain_len: u64 = current_chain.len().try_into().unwrap(); |             let current_chain_len: u64 = current_chain.len().try_into().unwrap(); | ||||||
| 
 | 
 | ||||||
|             if current_chain_len < self.max_chain_length { |             if current_chain_len < self.chain_len { | ||||||
|                 if let Some(previous_backup) = current_chain.last() { |                 if let Some(previous_backup) = current_chain.last() { | ||||||
|                     let state = Backup::state(current_chain); |                     let state = Backup::state(current_chain); | ||||||
| 
 | 
 | ||||||
|  | @ -307,38 +309,30 @@ impl BackupManager { | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Remove the oldest backups
 |     /// Remove the oldest backups
 | ||||||
|     // pub fn remove_old_backups(&mut self) -> std::io::Result<()> {
 |     pub fn remove_old_backups(&mut self) -> std::io::Result<()> { | ||||||
|     //     if let Some(last_backup) = &self.last_backup {
 |         let chains_to_store: usize = self.chains_to_keep.try_into().unwrap(); | ||||||
|     //         let last_valid_ancestor = last_backup.ancestor(self.max_backups - 1);
 |  | ||||||
|     //         let ancestor = last_valid_ancestor.previous();
 |  | ||||||
| 
 | 
 | ||||||
|     //         while let Some(backup) = &ancestor {
 |         if chains_to_store < self.chains.len() { | ||||||
|     //             let path = Backup::path(&self.backup_dir, backup.start_time);
 |             let mut remove_count: usize = self.chains.len() - chains_to_store; | ||||||
|     //             std::fs::remove_file(path)?;
 |  | ||||||
|     //         }
 |  | ||||||
|     //     }
 |  | ||||||
| 
 | 
 | ||||||
|     //     // The naming format used allows us to sort the backups by name and still get a sorting by
 |             // We only count finished chains towards the list of stored chains
 | ||||||
|     //     // creation time
 |             let chain_len: usize = self.chain_len.try_into().unwrap(); | ||||||
|     //     let mut backups = self
 |             if self.chains.last().unwrap().len() < chain_len { | ||||||
|     //         .backup_dir
 |                 remove_count -= 1; | ||||||
|     //         .read_dir()?
 |             } | ||||||
|     //         .filter_map(|res| res.map(|e| e.path()).ok())
 |  | ||||||
|     //         .collect::<Vec<PathBuf>>();
 |  | ||||||
|     //     backups.sort();
 |  | ||||||
| 
 | 
 | ||||||
|     //     let max_backups: usize = self.max_backups.try_into().unwrap();
 |             for chain in self.chains.drain(..remove_count) { | ||||||
|  |                 for backup in chain { | ||||||
|  |                     let path = Backup::path(&self.backup_dir, backup.start_time); | ||||||
|  |                     std::fs::remove_file(path)?; | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |         } | ||||||
| 
 | 
 | ||||||
|     //     if backups.len() > max_backups {
 |         self.write_json()?; | ||||||
|     //         let excess_backups = backups.len() - max_backups;
 |  | ||||||
| 
 | 
 | ||||||
|     //         for backup in &backups[0..excess_backups] {
 |         Ok(()) | ||||||
|     //             std::fs::remove_file(backup)?;
 |     } | ||||||
|     //         }
 |  | ||||||
|     //     }
 |  | ||||||
| 
 |  | ||||||
|     //     Ok(())
 |  | ||||||
|     // }
 |  | ||||||
| 
 | 
 | ||||||
|     pub fn write_json(&self) -> std::io::Result<()> { |     pub fn write_json(&self) -> std::io::Result<()> { | ||||||
|         let json_file = File::create(self.backup_dir.join(Self::METADATA_FILE))?; |         let json_file = File::create(self.backup_dir.join(Self::METADATA_FILE))?; | ||||||
|  |  | ||||||
|  | @ -70,12 +70,13 @@ impl ServerProcess { | ||||||
|         let start_time = chrono::offset::Utc::now(); |         let start_time = chrono::offset::Utc::now(); | ||||||
|         let res = self.backups.create_backup(); |         let res = self.backups.create_backup(); | ||||||
| 
 | 
 | ||||||
|         // if res.is_ok() {
 |  | ||||||
|         //     self.backups.remove_old_backups()?;
 |  | ||||||
|         // }
 |  | ||||||
| 
 |  | ||||||
|         // The server's save feature needs to be enabled again even if the archive failed to create
 |         // The server's save feature needs to be enabled again even if the archive failed to create
 | ||||||
|         self.custom("save-on")?; |         self.custom("save-on")?; | ||||||
|  |         self.custom("save-all")?; | ||||||
|  | 
 | ||||||
|  |         if res.is_ok() { | ||||||
|  |             self.backups.remove_old_backups()?; | ||||||
|  |         } | ||||||
| 
 | 
 | ||||||
|         let duration = chrono::offset::Utc::now() - start_time; |         let duration = chrono::offset::Utc::now() - start_time; | ||||||
|         let duration_str = format!( |         let duration_str = format!( | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue