refactor: store backups in nested vecs instead; introduce concept of
chainsincremental-backups
parent
f7235fb342
commit
bb7b57899b
|
@ -103,8 +103,6 @@ impl BackupDelta {
|
||||||
/// Represents a successful backup
|
/// Represents a successful backup
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct Backup {
|
pub struct Backup {
|
||||||
#[serde(skip)]
|
|
||||||
previous: Option<Arc<Backup>>,
|
|
||||||
/// When the backup was started (also corresponds to the name)
|
/// When the backup was started (also corresponds to the name)
|
||||||
start_time: chrono::DateTime<Utc>,
|
start_time: chrono::DateTime<Utc>,
|
||||||
/// Type of the backup
|
/// Type of the backup
|
||||||
|
@ -115,42 +113,14 @@ pub struct Backup {
|
||||||
impl Backup {
|
impl Backup {
|
||||||
const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
|
const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz";
|
||||||
|
|
||||||
/// Returns a pointer to this backup's previous backup by cloning the Arc pointer.
|
pub fn state(backups: &Vec<Backup>) -> HashMap<PathBuf, HashSet<PathBuf>> {
|
||||||
pub fn previous(&self) -> Option<Arc<Self>> {
|
let mut state: HashMap<PathBuf, HashSet<PathBuf>> = HashMap::new();
|
||||||
self.previous.as_ref().map(Arc::clone)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calculate the full state of the backup by applying all its ancestors' delta's in order,
|
for backup in backups {
|
||||||
/// starting from the last full ancestor.
|
backup.delta.apply(&mut state);
|
||||||
pub fn state(&self) -> BackupResult<HashMap<PathBuf, HashSet<PathBuf>>> {
|
|
||||||
if self.type_ == BackupType::Full {
|
|
||||||
let mut state = HashMap::new();
|
|
||||||
self.delta.apply(&mut state);
|
|
||||||
|
|
||||||
Ok(state)
|
|
||||||
} else if let Some(previous) = &self.previous {
|
|
||||||
let mut state = previous.state()?;
|
|
||||||
self.delta.apply(&mut state);
|
|
||||||
|
|
||||||
Ok(state)
|
|
||||||
} else {
|
|
||||||
return Err(BackupError::NoFullAncestor);
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the n'th ancestor of the given backup, if it exists.
|
state
|
||||||
pub fn ancestor(&self, n: u64) -> Option<Arc<Self>> {
|
|
||||||
if n == 0 {
|
|
||||||
None
|
|
||||||
} else if let Some(previous) = &self.previous {
|
|
||||||
if n == 1 {
|
|
||||||
Some(Arc::clone(previous))
|
|
||||||
} else {
|
|
||||||
previous.ancestor(n - 1)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the path to a backup file by properly formatting the data.
|
/// Return the path to a backup file by properly formatting the data.
|
||||||
|
@ -200,7 +170,6 @@ impl Backup {
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Backup {
|
Ok(Backup {
|
||||||
previous: None,
|
|
||||||
type_: BackupType::Full,
|
type_: BackupType::Full,
|
||||||
start_time,
|
start_time,
|
||||||
delta,
|
delta,
|
||||||
|
@ -209,7 +178,8 @@ impl Backup {
|
||||||
|
|
||||||
/// Create a new incremental backup from a given previous backup
|
/// Create a new incremental backup from a given previous backup
|
||||||
pub fn create_from<P: AsRef<Path>>(
|
pub fn create_from<P: AsRef<Path>>(
|
||||||
previous: Arc<Backup>,
|
previous_state: HashMap<PathBuf, HashSet<PathBuf>>,
|
||||||
|
previous_start_time: chrono::DateTime<Utc>,
|
||||||
backup_dir: P,
|
backup_dir: P,
|
||||||
dirs: Vec<(PathBuf, PathBuf)>,
|
dirs: Vec<(PathBuf, PathBuf)>,
|
||||||
) -> io::Result<Self> {
|
) -> io::Result<Self> {
|
||||||
|
@ -220,9 +190,6 @@ impl Backup {
|
||||||
let enc = GzEncoder::new(tar_gz, Compression::default());
|
let enc = GzEncoder::new(tar_gz, Compression::default());
|
||||||
let mut ar = tar::Builder::new(enc);
|
let mut ar = tar::Builder::new(enc);
|
||||||
|
|
||||||
let previous_state = previous
|
|
||||||
.state()
|
|
||||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "No Full ancestor"))?;
|
|
||||||
let mut delta = BackupDelta::new();
|
let mut delta = BackupDelta::new();
|
||||||
|
|
||||||
for (dir_in_tar, src_dir) in dirs {
|
for (dir_in_tar, src_dir) in dirs {
|
||||||
|
@ -233,7 +200,7 @@ impl Backup {
|
||||||
let path = entry?.path();
|
let path = entry?.path();
|
||||||
let stripped = path.strip_prefix(&src_dir).unwrap();
|
let stripped = path.strip_prefix(&src_dir).unwrap();
|
||||||
|
|
||||||
if !path.not_modified_since(previous.start_time) {
|
if !path.not_modified_since(previous_start_time) {
|
||||||
ar.append_path_with_name(&path, dir_in_tar.join(stripped))?;
|
ar.append_path_with_name(&path, dir_in_tar.join(stripped))?;
|
||||||
added_files.insert(stripped.to_path_buf());
|
added_files.insert(stripped.to_path_buf());
|
||||||
}
|
}
|
||||||
|
@ -252,7 +219,6 @@ impl Backup {
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Backup {
|
Ok(Backup {
|
||||||
previous: Some(previous),
|
|
||||||
type_: BackupType::Incremental,
|
type_: BackupType::Incremental,
|
||||||
start_time,
|
start_time,
|
||||||
delta,
|
delta,
|
||||||
|
@ -264,8 +230,10 @@ pub struct BackupManager {
|
||||||
backup_dir: PathBuf,
|
backup_dir: PathBuf,
|
||||||
config_dir: PathBuf,
|
config_dir: PathBuf,
|
||||||
world_dir: PathBuf,
|
world_dir: PathBuf,
|
||||||
|
max_chain_length: u64,
|
||||||
max_backups: u64,
|
max_backups: u64,
|
||||||
last_backup: Option<Arc<Backup>>,
|
last_backup: Option<Arc<Backup>>,
|
||||||
|
chains: Vec<Vec<Backup>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BackupManager {
|
impl BackupManager {
|
||||||
|
@ -282,6 +250,8 @@ impl BackupManager {
|
||||||
config_dir,
|
config_dir,
|
||||||
world_dir,
|
world_dir,
|
||||||
max_backups,
|
max_backups,
|
||||||
|
max_chain_length: 2,
|
||||||
|
chains: Vec::new(),
|
||||||
last_backup: None,
|
last_backup: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -304,13 +274,33 @@ impl BackupManager {
|
||||||
(PathBuf::from("worlds"), self.world_dir.clone()),
|
(PathBuf::from("worlds"), self.world_dir.clone()),
|
||||||
];
|
];
|
||||||
|
|
||||||
let backup = if let Some(last_backup) = &self.last_backup {
|
// I kinda hate this statement, please just let me combine let statements in if statements
|
||||||
Backup::create_from(Arc::clone(last_backup), &self.backup_dir, dirs)?
|
// already
|
||||||
|
let backup = if let Some(current_chain) = self.chains.last() {
|
||||||
|
let current_chain_len: u64 = current_chain.len().try_into().unwrap();
|
||||||
|
|
||||||
|
if current_chain_len < self.max_chain_length {
|
||||||
|
if let Some(previous_backup) = current_chain.last() {
|
||||||
|
let state = Backup::state(current_chain);
|
||||||
|
|
||||||
|
Backup::create_from(state, previous_backup.start_time, &self.backup_dir, dirs)?
|
||||||
|
} else {
|
||||||
|
Backup::create(&self.backup_dir, dirs)?
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
self.chains.push(Vec::new());
|
||||||
|
|
||||||
|
Backup::create(&self.backup_dir, dirs)?
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
|
self.chains.push(Vec::new());
|
||||||
|
|
||||||
Backup::create(&self.backup_dir, dirs)?
|
Backup::create(&self.backup_dir, dirs)?
|
||||||
};
|
};
|
||||||
|
|
||||||
self.last_backup = Some(Arc::new(backup));
|
// The above statement always creates this element, so this unwrap is safe
|
||||||
|
self.chains.last_mut().unwrap().push(backup);
|
||||||
|
|
||||||
self.write_json()?;
|
self.write_json()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -351,35 +341,25 @@ impl BackupManager {
|
||||||
// }
|
// }
|
||||||
|
|
||||||
pub fn write_json(&self) -> std::io::Result<()> {
|
pub fn write_json(&self) -> std::io::Result<()> {
|
||||||
// Put the backup chain into a list that can be serialized
|
|
||||||
let mut backups: Vec<Arc<Backup>> = Vec::new();
|
|
||||||
let mut backup_opt = &self.last_backup;
|
|
||||||
|
|
||||||
while let Some(backup) = backup_opt {
|
|
||||||
backups.insert(0, Arc::clone(backup));
|
|
||||||
backup_opt = &backup.previous;
|
|
||||||
}
|
|
||||||
|
|
||||||
let json_file = File::create(self.backup_dir.join(Self::METADATA_FILE))?;
|
let json_file = File::create(self.backup_dir.join(Self::METADATA_FILE))?;
|
||||||
serde_json::to_writer(json_file, &backups)?;
|
serde_json::to_writer(json_file, &self.chains)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_json(&mut self) -> std::io::Result<()> {
|
pub fn load_json(&mut self) -> std::io::Result<()> {
|
||||||
let json_file = File::open(self.backup_dir.join(Self::METADATA_FILE))?;
|
let json_file = match File::open(self.backup_dir.join(Self::METADATA_FILE)) {
|
||||||
let mut backups: Vec<Arc<Backup>> = serde_json::from_reader(json_file)?;
|
Ok(f) => f,
|
||||||
|
Err(e) => {
|
||||||
if !backups.is_empty() {
|
// Don't error out if the file isn't there, it will be created when necessary
|
||||||
for i in 1..backups.len() {
|
if e.kind() == io::ErrorKind::NotFound {
|
||||||
let previous = Arc::clone(&backups[i - 1]);
|
return Ok(());
|
||||||
// We can unwrap here, as this function creates the first instance of each Arc,
|
} else {
|
||||||
// meaning we're definitely the only pointer.
|
return Err(e);
|
||||||
Arc::get_mut(&mut backups[i]).unwrap().previous = Some(previous);
|
}
|
||||||
}
|
}
|
||||||
|
};
|
||||||
self.last_backup = Some(Arc::clone(backups.last().unwrap()));
|
self.chains = serde_json::from_reader(json_file)?;
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue