site-backend/src/api/deploy.rs

106 lines
3.2 KiB
Rust

use std::{collections::HashSet, io::ErrorKind, path::Path};
use axum::{
extract::{BodyStream, Extension, Query},
http::StatusCode,
response::IntoResponse,
};
use flate2::read::GzDecoder;
use futures_util::TryStreamExt;
use serde::Deserialize;
use tar::Archive;
use tokio_util::io::StreamReader;
use crate::DEFAULT_STATIC_DIR_NAME;
#[derive(Deserialize)]
pub struct StaticDirParams
{
dir: Option<String>,
}
pub async fn post_deploy(
Extension(data_dir): Extension<String>,
Query(params): Query<StaticDirParams>,
res: BodyStream,
) -> impl IntoResponse
{
// This converts a stream into something that implements AsyncRead, which we can then use to
// asynchronously write the file to disk
let mut read =
StreamReader::new(res.map_err(|axum_err| std::io::Error::new(ErrorKind::Other, axum_err)));
let uuid = uuid::Uuid::new_v4();
let file_path = Path::new(&data_dir).join(uuid.as_hyphenated().to_string());
let mut file = tokio::fs::File::create(&file_path).await.unwrap();
tokio::io::copy(&mut read, &mut file).await;
let mut static_path = Path::new(&data_dir).join(DEFAULT_STATIC_DIR_NAME);
if params.dir.is_some() {
static_path = static_path.join(params.dir.unwrap());
}
// Make sure the static directory exists
tokio::fs::create_dir_all(&static_path).await;
let fp_clone = file_path.clone();
// Extract the contents of the tarball synchronously
let res =
match tokio::task::spawn_blocking(move || process_archive(&fp_clone, &static_path)).await {
Ok(_) => StatusCode::OK,
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
};
// Remove archive file after use
tokio::fs::remove_file(&file_path).await;
res
}
fn process_archive(archive_path: &Path, static_dir: &Path) -> Result<(), ()>
{
let file = std::fs::File::open(archive_path).map_err(|_| ())?;
let tar = GzDecoder::new(file);
let mut archive = Archive::new(tar);
let mut paths = HashSet::new();
let entries = archive.entries().map_err(|_| ())?;
// Extract each entry into the output directory
for entry_res in entries {
let mut entry = entry_res.map_err(|_| ())?;
entry.unpack_in(static_dir).map_err(|_| ())?;
if let Ok(path) = entry.path() {
paths.insert(path.into_owned());
}
}
// Remove any old files that weren't present in new archive
let mut items = vec![];
// Start by populating the vec with the initial files
let iter = static_dir.read_dir().map_err(|_| ())?;
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
// As long as there are still items in the vec, we keep going
while items.len() > 0 {
let item = items.pop().unwrap();
tracing::debug!("{:?}", item);
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
if item.is_dir() {
std::fs::remove_dir_all(item);
} else {
std::fs::remove_file(item);
}
} else if let Ok(iter) = item.read_dir() {
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
}
}
Ok(())
}