refactor: store backups in nested vecs instead; introduce concept of
chainsincremental-backups
parent
f7235fb342
commit
bb7b57899b
|
@ -103,8 +103,6 @@ impl BackupDelta {
|
|||
/// Represents a successful backup
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Backup {
|
||||
#[serde(skip)]
|
||||
previous: Option<Arc<Backup>>,
|
||||
/// When the backup was started (also corresponds to the name)
|
||||
start_time: chrono::DateTime<Utc>,
|
||||
/// Type of the backup
|
||||
|
@ -115,42 +113,14 @@ pub struct Backup {
|
|||
impl Backup {
|
||||
const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
|
||||
|
||||
/// Returns a pointer to this backup's previous backup by cloning the Arc pointer.
|
||||
pub fn previous(&self) -> Option<Arc<Self>> {
|
||||
self.previous.as_ref().map(Arc::clone)
|
||||
}
|
||||
pub fn state(backups: &Vec<Backup>) -> HashMap<PathBuf, HashSet<PathBuf>> {
|
||||
let mut state: HashMap<PathBuf, HashSet<PathBuf>> = HashMap::new();
|
||||
|
||||
/// Calculate the full state of the backup by applying all its ancestors' delta's in order,
|
||||
/// starting from the last full ancestor.
|
||||
pub fn state(&self) -> BackupResult<HashMap<PathBuf, HashSet<PathBuf>>> {
|
||||
if self.type_ == BackupType::Full {
|
||||
let mut state = HashMap::new();
|
||||
self.delta.apply(&mut state);
|
||||
|
||||
Ok(state)
|
||||
} else if let Some(previous) = &self.previous {
|
||||
let mut state = previous.state()?;
|
||||
self.delta.apply(&mut state);
|
||||
|
||||
Ok(state)
|
||||
} else {
|
||||
return Err(BackupError::NoFullAncestor);
|
||||
for backup in backups {
|
||||
backup.delta.apply(&mut state);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the n'th ancestor of the given backup, if it exists.
|
||||
pub fn ancestor(&self, n: u64) -> Option<Arc<Self>> {
|
||||
if n == 0 {
|
||||
None
|
||||
} else if let Some(previous) = &self.previous {
|
||||
if n == 1 {
|
||||
Some(Arc::clone(previous))
|
||||
} else {
|
||||
previous.ancestor(n - 1)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
state
|
||||
}
|
||||
|
||||
/// Return the path to a backup file by properly formatting the data.
|
||||
|
@ -200,7 +170,6 @@ impl Backup {
|
|||
}
|
||||
|
||||
Ok(Backup {
|
||||
previous: None,
|
||||
type_: BackupType::Full,
|
||||
start_time,
|
||||
delta,
|
||||
|
@ -209,7 +178,8 @@ impl Backup {
|
|||
|
||||
/// Create a new incremental backup from a given previous backup
|
||||
pub fn create_from<P: AsRef<Path>>(
|
||||
previous: Arc<Backup>,
|
||||
previous_state: HashMap<PathBuf, HashSet<PathBuf>>,
|
||||
previous_start_time: chrono::DateTime<Utc>,
|
||||
backup_dir: P,
|
||||
dirs: Vec<(PathBuf, PathBuf)>,
|
||||
) -> io::Result<Self> {
|
||||
|
@ -220,9 +190,6 @@ impl Backup {
|
|||
let enc = GzEncoder::new(tar_gz, Compression::default());
|
||||
let mut ar = tar::Builder::new(enc);
|
||||
|
||||
let previous_state = previous
|
||||
.state()
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "No Full ancestor"))?;
|
||||
let mut delta = BackupDelta::new();
|
||||
|
||||
for (dir_in_tar, src_dir) in dirs {
|
||||
|
@ -233,7 +200,7 @@ impl Backup {
|
|||
let path = entry?.path();
|
||||
let stripped = path.strip_prefix(&src_dir).unwrap();
|
||||
|
||||
if !path.not_modified_since(previous.start_time) {
|
||||
if !path.not_modified_since(previous_start_time) {
|
||||
ar.append_path_with_name(&path, dir_in_tar.join(stripped))?;
|
||||
added_files.insert(stripped.to_path_buf());
|
||||
}
|
||||
|
@ -252,7 +219,6 @@ impl Backup {
|
|||
}
|
||||
|
||||
Ok(Backup {
|
||||
previous: Some(previous),
|
||||
type_: BackupType::Incremental,
|
||||
start_time,
|
||||
delta,
|
||||
|
@ -264,8 +230,10 @@ pub struct BackupManager {
|
|||
backup_dir: PathBuf,
|
||||
config_dir: PathBuf,
|
||||
world_dir: PathBuf,
|
||||
max_chain_length: u64,
|
||||
max_backups: u64,
|
||||
last_backup: Option<Arc<Backup>>,
|
||||
chains: Vec<Vec<Backup>>,
|
||||
}
|
||||
|
||||
impl BackupManager {
|
||||
|
@ -282,6 +250,8 @@ impl BackupManager {
|
|||
config_dir,
|
||||
world_dir,
|
||||
max_backups,
|
||||
max_chain_length: 2,
|
||||
chains: Vec::new(),
|
||||
last_backup: None,
|
||||
}
|
||||
}
|
||||
|
@ -304,13 +274,33 @@ impl BackupManager {
|
|||
(PathBuf::from("worlds"), self.world_dir.clone()),
|
||||
];
|
||||
|
||||
let backup = if let Some(last_backup) = &self.last_backup {
|
||||
Backup::create_from(Arc::clone(last_backup), &self.backup_dir, dirs)?
|
||||
// I kinda hate this statement, please just let me combine let statements in if statements
|
||||
// already
|
||||
let backup = if let Some(current_chain) = self.chains.last() {
|
||||
let current_chain_len: u64 = current_chain.len().try_into().unwrap();
|
||||
|
||||
if current_chain_len < self.max_chain_length {
|
||||
if let Some(previous_backup) = current_chain.last() {
|
||||
let state = Backup::state(current_chain);
|
||||
|
||||
Backup::create_from(state, previous_backup.start_time, &self.backup_dir, dirs)?
|
||||
} else {
|
||||
Backup::create(&self.backup_dir, dirs)?
|
||||
}
|
||||
} else {
|
||||
self.chains.push(Vec::new());
|
||||
|
||||
Backup::create(&self.backup_dir, dirs)?
|
||||
}
|
||||
} else {
|
||||
self.chains.push(Vec::new());
|
||||
|
||||
Backup::create(&self.backup_dir, dirs)?
|
||||
};
|
||||
|
||||
self.last_backup = Some(Arc::new(backup));
|
||||
// The above statement always creates this element, so this unwrap is safe
|
||||
self.chains.last_mut().unwrap().push(backup);
|
||||
|
||||
self.write_json()?;
|
||||
|
||||
Ok(())
|
||||
|
@ -351,35 +341,25 @@ impl BackupManager {
|
|||
// }
|
||||
|
||||
pub fn write_json(&self) -> std::io::Result<()> {
|
||||
// Put the backup chain into a list that can be serialized
|
||||
let mut backups: Vec<Arc<Backup>> = Vec::new();
|
||||
let mut backup_opt = &self.last_backup;
|
||||
|
||||
while let Some(backup) = backup_opt {
|
||||
backups.insert(0, Arc::clone(backup));
|
||||
backup_opt = &backup.previous;
|
||||
}
|
||||
|
||||
let json_file = File::create(self.backup_dir.join(Self::METADATA_FILE))?;
|
||||
serde_json::to_writer(json_file, &backups)?;
|
||||
serde_json::to_writer(json_file, &self.chains)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_json(&mut self) -> std::io::Result<()> {
|
||||
let json_file = File::open(self.backup_dir.join(Self::METADATA_FILE))?;
|
||||
let mut backups: Vec<Arc<Backup>> = serde_json::from_reader(json_file)?;
|
||||
|
||||
if !backups.is_empty() {
|
||||
for i in 1..backups.len() {
|
||||
let previous = Arc::clone(&backups[i - 1]);
|
||||
// We can unwrap here, as this function creates the first instance of each Arc,
|
||||
// meaning we're definitely the only pointer.
|
||||
Arc::get_mut(&mut backups[i]).unwrap().previous = Some(previous);
|
||||
let json_file = match File::open(self.backup_dir.join(Self::METADATA_FILE)) {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
// Don't error out if the file isn't there, it will be created when necessary
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
self.last_backup = Some(Arc::clone(backups.last().unwrap()));
|
||||
}
|
||||
};
|
||||
self.chains = serde_json::from_reader(json_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue