feat: lots of backup stuff
							parent
							
								
									703a25e8be
								
							
						
					
					
						commit
						b7a678e32f
					
				|  | @ -1,3 +1,3 @@ | |||
| [alias] | ||||
| runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar" | ||||
| runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper.jar" | ||||
| runs = "run -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper-1.19.4-525.jar" | ||||
| runrs = "run --release -- paper 1.19.4-545 --config data/config --backup data/backups --world data/worlds --jar data/paper-1.19.4-525.jar" | ||||
|  |  | |||
|  | @ -1,5 +1,7 @@ | |||
| use chrono::{Local, Utc}; | ||||
| use flate2::write::GzEncoder; | ||||
| use flate2::Compression; | ||||
| use std::collections::{HashMap, HashSet}; | ||||
| use std::fs::File; | ||||
| use std::io; | ||||
| use std::path::{Path, PathBuf}; | ||||
|  | @ -10,15 +12,95 @@ extern "C" { | |||
|     fn getegid() -> u32; | ||||
| } | ||||
| 
 | ||||
| static FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz"; | ||||
| const FILENAME_FORMAT: &str = "%Y-%m-%d_%H-%M-%S.tar.gz"; | ||||
| 
 | ||||
| pub enum BackupType { | ||||
|     Full, | ||||
|     Incremental, | ||||
| } | ||||
| 
 | ||||
| /// Represents a successful backup
 | ||||
| pub struct Backup { | ||||
|     previous: Option<Box<Backup>>, | ||||
|     /// When the backup was started (also corresponds to the name)
 | ||||
|     start_time: chrono::DateTime<Utc>, | ||||
|     /// Type of the backup
 | ||||
|     type_: BackupType, | ||||
|     /// What files were added/modified in each part of the tarball.
 | ||||
|     pub added: HashMap<PathBuf, HashSet<PathBuf>>, | ||||
|     /// What files were removed in this backup, in comparison to the previous backup. For full
 | ||||
|     /// backups, this will always be empty, as they do not consider previous backups.
 | ||||
|     /// The map stores a separate list for each top-level directory, as the contents of these
 | ||||
|     /// directories can come for different source directories.
 | ||||
|     pub removed: HashMap<PathBuf, HashSet<PathBuf>>, | ||||
| } | ||||
| 
 | ||||
| fn files(src_dir: PathBuf) -> io::Result<HashSet<PathBuf>> { | ||||
|     let mut dirs = vec![src_dir.clone()]; | ||||
|     let mut files: HashSet<PathBuf> = HashSet::new(); | ||||
| 
 | ||||
|     while let Some(dir) = dirs.pop() { | ||||
|         for res in dir.read_dir()? { | ||||
|             let entry = res?; | ||||
| 
 | ||||
|             if entry.file_name() == "cache" { | ||||
|                 continue; | ||||
|             } | ||||
| 
 | ||||
|             if entry.file_type()?.is_dir() { | ||||
|                 dirs.push(entry.path()); | ||||
|             } else { | ||||
|                 files.insert(entry.path().strip_prefix(&src_dir).unwrap().to_path_buf()); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     Ok(files) | ||||
| } | ||||
| 
 | ||||
| impl Backup { | ||||
|     /// Create a new full backup
 | ||||
|     pub fn create<P: AsRef<Path>>( | ||||
|         backup_dir: P, | ||||
|         dirs: Vec<(PathBuf, PathBuf)>, | ||||
|     ) -> io::Result<Self> { | ||||
|         let backup_dir = backup_dir.as_ref(); | ||||
|         let start_time = chrono::offset::Utc::now(); | ||||
| 
 | ||||
|         let filename = format!("{}", start_time.format(FILENAME_FORMAT)); | ||||
|         let path = backup_dir.join(filename); | ||||
|         let tar_gz = File::create(path)?; | ||||
|         let enc = GzEncoder::new(tar_gz, Compression::default()); | ||||
|         let mut ar = tar::Builder::new(enc); | ||||
| 
 | ||||
|         let mut added: HashMap<PathBuf, HashSet<PathBuf>> = HashMap::new(); | ||||
| 
 | ||||
|         for (dir_in_tar, src_dir) in dirs { | ||||
|             let files = files(src_dir.clone())?; | ||||
| 
 | ||||
|             for path in &files { | ||||
|                 ar.append_path_with_name(dir_in_tar.join(&path), src_dir.join(&path))?; | ||||
|             } | ||||
| 
 | ||||
|             added.insert(dir_in_tar, files); | ||||
|         } | ||||
| 
 | ||||
|         Ok(Backup { | ||||
|             previous: None, | ||||
|             type_: BackupType::Full, | ||||
|             start_time, | ||||
|             added, | ||||
|             removed: HashMap::new(), | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub struct BackupManager { | ||||
|     backup_dir: PathBuf, | ||||
|     config_dir: PathBuf, | ||||
|     world_dir: PathBuf, | ||||
|     max_backups: u64, | ||||
|     start_time: Option<chrono::DateTime<chrono::Utc>>, | ||||
|     files: Vec<(PathBuf, PathBuf)> | ||||
|     last_backup: Option<Backup>, | ||||
| } | ||||
| 
 | ||||
| impl BackupManager { | ||||
|  | @ -33,75 +115,24 @@ impl BackupManager { | |||
|             config_dir, | ||||
|             world_dir, | ||||
|             max_backups, | ||||
|             start_time: None, | ||||
|             files: Vec::new() | ||||
|             last_backup: None, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn set_files_to_backup(&mut self) -> io::Result<()> { | ||||
|         let mut dirs = vec![ | ||||
|             (PathBuf::from("worlds"), self.world_dir.clone()), | ||||
|     pub fn create_backup(&mut self) -> io::Result<()> { | ||||
|         let dirs = vec![ | ||||
|             (PathBuf::from("config"), self.config_dir.clone()), | ||||
|             (PathBuf::from("worlds"), self.world_dir.clone()), | ||||
|         ]; | ||||
|         self.files.clear(); | ||||
| 
 | ||||
|         while let Some((path_in_tar, path)) = dirs.pop() { | ||||
|             for res in path.read_dir()? { | ||||
|                 let entry = res?; | ||||
| 
 | ||||
|                 if entry.file_name() == "cache" { | ||||
|                     continue; | ||||
|                 } | ||||
| 
 | ||||
|                 let new_path_in_tar = path_in_tar.join(entry.file_name()); | ||||
| 
 | ||||
|                 // All dirs get expanded recursively, while all files get returned as output
 | ||||
|                 // NOTE: does this remove empty directories from backups? Is this a problem?
 | ||||
|                 if entry.file_type()?.is_dir() { | ||||
|                     dirs.push((new_path_in_tar, entry.path())); | ||||
|                 } else { | ||||
|                     self.files.push((new_path_in_tar, entry.path())); | ||||
|                 } | ||||
|             } | ||||
|         if let Some(last_backup) = &self.last_backup { | ||||
|             todo!(); | ||||
|         } else { | ||||
|             self.last_backup = Some(Backup::create(&self.backup_dir, dirs)?); | ||||
|         } | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     pub fn create_archive(&mut self) -> io::Result<()> { | ||||
|         let start_time = chrono::offset::Utc::now(); | ||||
|         self.start_time = Some(start_time); | ||||
| 
 | ||||
|         let filename = format!("{}", start_time.format(FILENAME_FORMAT)); | ||||
|         let path = self.backup_dir.join(filename); | ||||
|         let tar_gz = File::create(path)?; | ||||
|         let enc = GzEncoder::new(tar_gz, Compression::default()); | ||||
|         let mut tar = tar::Builder::new(enc); | ||||
| 
 | ||||
|         self.set_files_to_backup()?; | ||||
| 
 | ||||
|         for (path_in_tar, path) in &self.files { | ||||
|             tar.append_path_with_name(path, path_in_tar)?; | ||||
|         } | ||||
| 
 | ||||
|         // TODO re-add this info file in some way
 | ||||
|         // We add a file to the backup describing for what version it was made
 | ||||
|         // let info = format!("{} {}", self.type_, self.version);
 | ||||
|         // let info_bytes = info.as_bytes();
 | ||||
| 
 | ||||
|         // let mut header = tar::Header::new_gnu();
 | ||||
|         // header.set_size(info_bytes.len().try_into().unwrap());
 | ||||
|         // header.set_mode(0o100644);
 | ||||
|         // unsafe {
 | ||||
|         //     header.set_gid(getegid().into());
 | ||||
|         //     header.set_uid(geteuid().into());
 | ||||
|         // }
 | ||||
| 
 | ||||
|         // tar.append_data(&mut header, "info.txt", info_bytes)?;
 | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Remove the oldest backups
 | ||||
|     pub fn remove_old_backups(&mut self) -> std::io::Result<()> { | ||||
|         // The naming format used allows us to sort the backups by name and still get a sorting by
 | ||||
|  |  | |||
|  | @ -1,5 +1,6 @@ | |||
| mod backups; | ||||
| mod command; | ||||
| mod path; | ||||
| mod process; | ||||
| 
 | ||||
| pub use backups::BackupManager; | ||||
|  |  | |||
|  | @ -0,0 +1,19 @@ | |||
| use chrono::Utc; | ||||
| use std::collections::HashSet; | ||||
| use std::path::PathBuf; | ||||
| use std::{fs, io}; | ||||
| 
 | ||||
| struct ReadDirRecursive { | ||||
|     ignored_dirs: HashSet<PathBuf>, | ||||
|     read_dir: Option<fs::ReadDir>, | ||||
|     stack: Vec<fs::ReadDir>, | ||||
| } | ||||
| 
 | ||||
| impl ReadDirRecursive { | ||||
|     // pub fn new()
 | ||||
| } | ||||
| 
 | ||||
| trait PathExt { | ||||
|     fn modified_since(timestamp: chrono::DateTime<Utc>) -> bool; | ||||
|     fn read_dir_recusive() -> ReadDirRecursive; | ||||
| } | ||||
|  | @ -89,7 +89,7 @@ impl ServerProcess { | |||
|         std::thread::sleep(std::time::Duration::from_secs(10)); | ||||
| 
 | ||||
|         let start_time = chrono::offset::Utc::now(); | ||||
|         let res = self.backups.create_archive(); | ||||
|         let res = self.backups.create_backup(); | ||||
| 
 | ||||
|         if res.is_ok() { | ||||
|             self.backups.remove_old_backups()?; | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue