feat: granular locking for proper concurrent access to server process
ci/woodpecker/push/clippy Pipeline was successful Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details

main
Jef Roosens 2023-08-12 11:44:35 +02:00
parent a51ff3937d
commit b3d1cec078
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
5 changed files with 43 additions and 50 deletions

View File

@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Export command no longer reads backups that do not contribute to the final * Export command no longer reads backups that do not contribute to the final
state state
* Running backups no longer block stdin input or shutdown
## [0.3.1](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.3.1) ## [0.3.1](https://git.rustybever.be/Chewing_Bever/alex/src/tag/0.3.1)

View File

@ -1,9 +1,6 @@
mod config; mod config;
use std::{ use std::{path::PathBuf, sync::Arc};
path::PathBuf,
sync::{Arc, Mutex},
};
use clap::Args; use clap::Args;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -45,11 +42,15 @@ pub struct RunArgs {
pub xmx: Option<u64>, pub xmx: Option<u64>,
} }
fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>) { fn backups_thread(server: Arc<server::ServerProcess>) {
loop { loop {
let next_scheduled_time = { let next_scheduled_time = {
let server = counter.lock().unwrap(); server
server.backups.next_scheduled_time().unwrap() .backups
.read()
.unwrap()
.next_scheduled_time()
.unwrap()
}; };
let now = chrono::offset::Utc::now(); let now = chrono::offset::Utc::now();
@ -57,13 +58,9 @@ fn backups_thread(counter: Arc<Mutex<server::ServerProcess>>) {
std::thread::sleep((next_scheduled_time - now).to_std().unwrap()); std::thread::sleep((next_scheduled_time - now).to_std().unwrap());
} }
{
let mut server = counter.lock().unwrap();
// We explicitely ignore the error here, as we don't want the thread to fail // We explicitely ignore the error here, as we don't want the thread to fail
let _ = server.backup(); let _ = server.backup();
} }
}
} }
impl RunCli { impl RunCli {
@ -90,7 +87,7 @@ impl RunCli {
return Ok(()); return Ok(());
} }
let counter = Arc::new(Mutex::new(cmd.spawn()?)); let counter = Arc::new(cmd.spawn()?);
if !global.layers.is_empty() { if !global.layers.is_empty() {
let clone = Arc::clone(&counter); let clone = Arc::clone(&counter);

View File

@ -1,22 +1,21 @@
use crate::backup::MetaManager; use std::{io::Write, process::Child, sync::RwLock};
use crate::server::Metadata;
use std::io::Write; use crate::{backup::MetaManager, server::Metadata};
use std::process::Child;
pub struct ServerProcess { pub struct ServerProcess {
child: Child, child: RwLock<Child>,
pub backups: MetaManager<Metadata>, pub backups: RwLock<MetaManager<Metadata>>,
} }
impl ServerProcess { impl ServerProcess {
pub fn new(manager: MetaManager<Metadata>, child: Child) -> ServerProcess { pub fn new(manager: MetaManager<Metadata>, child: Child) -> ServerProcess {
ServerProcess { ServerProcess {
child, child: RwLock::new(child),
backups: manager, backups: RwLock::new(manager),
} }
} }
pub fn send_command(&mut self, cmd: &str) -> std::io::Result<()> { pub fn send_command(&self, cmd: &str) -> std::io::Result<()> {
match cmd.trim() { match cmd.trim() {
"stop" | "exit" => self.stop()?, "stop" | "exit" => self.stop()?,
"backup" => self.backup()?, "backup" => self.backup()?,
@ -26,29 +25,34 @@ impl ServerProcess {
Ok(()) Ok(())
} }
fn custom(&mut self, cmd: &str) -> std::io::Result<()> { fn custom(&self, cmd: &str) -> std::io::Result<()> {
let mut stdin = self.child.stdin.as_ref().unwrap(); let child = self.child.write().unwrap();
let mut stdin = child.stdin.as_ref().unwrap();
stdin.write_all(format!("{}\n", cmd.trim()).as_bytes())?; stdin.write_all(format!("{}\n", cmd.trim()).as_bytes())?;
stdin.flush()?; stdin.flush()?;
Ok(()) Ok(())
} }
pub fn stop(&mut self) -> std::io::Result<()> { pub fn stop(&self) -> std::io::Result<()> {
self.custom("stop")?; self.custom("stop")?;
self.child.wait()?;
self.child.write().unwrap().wait()?;
Ok(()) Ok(())
} }
pub fn kill(&mut self) -> std::io::Result<()> { pub fn kill(&self) -> std::io::Result<()> {
self.child.kill() self.child.write().unwrap().kill()
} }
/// Perform a backup by disabling the server's save feature and flushing its data, before /// Perform a backup by disabling the server's save feature and flushing its data, before
/// creating an archive file. /// creating an archive file.
pub fn backup(&mut self) -> std::io::Result<()> { pub fn backup(&self) -> std::io::Result<()> {
let layer_name = String::from(self.backups.next_scheduled_layer().unwrap()); // We explicitely lock this entire function to prevent parallel backups
let mut backups = self.backups.write().unwrap();
let layer_name = String::from(backups.next_scheduled_layer().unwrap());
self.custom(&format!("say starting backup for layer '{}'", layer_name))?; self.custom(&format!("say starting backup for layer '{}'", layer_name))?;
// Make sure the server isn't modifying the files during the backup // Make sure the server isn't modifying the files during the backup
@ -60,7 +64,7 @@ impl ServerProcess {
std::thread::sleep(std::time::Duration::from_secs(10)); std::thread::sleep(std::time::Duration::from_secs(10));
let start_time = chrono::offset::Utc::now(); let start_time = chrono::offset::Utc::now();
let res = self.backups.perform_backup_cycle(); let res = backups.perform_backup_cycle();
// The server's save feature needs to be enabled again even if the archive failed to create // The server's save feature needs to be enabled again even if the archive failed to create
self.custom("save-on")?; self.custom("save-on")?;

View File

@ -1,6 +1,6 @@
use std::{ use std::{
io, io,
sync::{atomic::AtomicBool, Arc, Mutex}, sync::{atomic::AtomicBool, Arc},
}; };
use signal_hook::{ use signal_hook::{
@ -37,7 +37,7 @@ pub fn install_signal_handlers() -> io::Result<(Arc<AtomicBool>, SignalsInfo)> {
/// Loop that handles terminating signals as they come in. /// Loop that handles terminating signals as they come in.
pub fn handle_signals( pub fn handle_signals(
signals: &mut SignalsInfo, signals: &mut SignalsInfo,
counter: Arc<Mutex<server::ServerProcess>>, server: Arc<server::ServerProcess>,
) -> io::Result<()> { ) -> io::Result<()> {
let mut force = false; let mut force = false;
@ -49,17 +49,15 @@ pub fn handle_signals(
// This will currently not work, as the initial stop command will block the kill from // This will currently not work, as the initial stop command will block the kill from
// happening. // happening.
if force { if force {
let mut server = counter.lock().unwrap();
return server.kill(); return server.kill();
} }
// The stop command runs in a separate thread to avoid blocking the signal handling loop. // The stop command runs in a separate thread to avoid blocking the signal handling loop.
// After stopping the server, the thread terminates the process. // After stopping the server, the thread terminates the process.
else { else {
let clone = Arc::clone(&counter); let clone = Arc::clone(&server);
std::thread::spawn(move || { std::thread::spawn(move || {
let mut server = clone.lock().unwrap(); let _ = clone.stop();
let _ = server.stop();
std::process::exit(0); std::process::exit(0);
}); });
} }

View File

@ -1,11 +1,8 @@
use std::{ use std::{io, sync::Arc};
io,
sync::{Arc, Mutex},
};
use crate::server; use crate::server;
pub fn handle_stdin(counter: Arc<Mutex<server::ServerProcess>>) { pub fn handle_stdin(server: Arc<server::ServerProcess>) {
let stdin = io::stdin(); let stdin = io::stdin();
let input = &mut String::new(); let input = &mut String::new();
@ -16,13 +13,9 @@ pub fn handle_stdin(counter: Arc<Mutex<server::ServerProcess>>) {
continue; continue;
}; };
{
let mut server = counter.lock().unwrap();
if let Err(e) = server.send_command(input) { if let Err(e) = server.send_command(input) {
println!("{}", e); println!("{}", e);
}; };
}
if input.trim() == "stop" { if input.trim() == "stop" {
std::process::exit(0); std::process::exit(0);