99 lines
3.0 KiB
Rust
99 lines
3.0 KiB
Rust
use std::{collections::HashSet, io::ErrorKind, path::Path};
|
|
|
|
use axum::{
|
|
extract::{BodyStream, Extension, Query},
|
|
http::StatusCode,
|
|
response::IntoResponse,
|
|
};
|
|
use flate2::read::GzDecoder;
|
|
use futures_util::TryStreamExt;
|
|
use serde::Deserialize;
|
|
use std::io;
|
|
use tar::Archive;
|
|
use tokio_util::io::StreamReader;
|
|
|
|
use crate::{DEFAULT_STATIC_SITE, STATIC_DIR_NAME};
|
|
|
|
#[derive(Deserialize)]
|
|
pub struct StaticDirParams {
|
|
dir: Option<String>,
|
|
}
|
|
|
|
pub async fn post_deploy(
|
|
Extension(data_dir): Extension<String>,
|
|
Query(params): Query<StaticDirParams>,
|
|
res: BodyStream,
|
|
) -> crate::Result<()> {
|
|
// This converts a stream into something that implements AsyncRead, which we can then use to
|
|
// asynchronously write the file to disk
|
|
let mut read =
|
|
StreamReader::new(res.map_err(|axum_err| std::io::Error::new(ErrorKind::Other, axum_err)));
|
|
let uuid = uuid::Uuid::new_v4();
|
|
let file_path = Path::new(&data_dir).join(uuid.as_hyphenated().to_string());
|
|
let mut file = tokio::fs::File::create(&file_path).await?;
|
|
tokio::io::copy(&mut read, &mut file).await?;
|
|
|
|
// If no dir is provided, we use the default one. Otherwise, use the provided one.
|
|
let static_path = Path::new(&data_dir)
|
|
.join(STATIC_DIR_NAME)
|
|
.join(params.dir.unwrap_or(DEFAULT_STATIC_SITE.to_string()));
|
|
|
|
// Make sure the static directory exists
|
|
tokio::fs::create_dir_all(&static_path).await?;
|
|
|
|
let fp_clone = file_path.clone();
|
|
// Extract the contents of the tarball synchronously
|
|
tokio::task::spawn_blocking(move || process_archive(&fp_clone, &static_path)).await??;
|
|
|
|
// Remove archive file after use
|
|
tokio::fs::remove_file(&file_path).await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
fn process_archive(archive_path: &Path, static_dir: &Path) -> io::Result<()> {
|
|
let file = std::fs::File::open(archive_path)?;
|
|
let tar = GzDecoder::new(file);
|
|
let mut archive = Archive::new(tar);
|
|
|
|
let mut paths = HashSet::new();
|
|
|
|
let entries = archive.entries()?;
|
|
// Extract each entry into the output directory
|
|
for entry in entries {
|
|
let mut entry = entry?;
|
|
entry.unpack_in(static_dir)?;
|
|
|
|
if let Ok(path) = entry.path() {
|
|
paths.insert(path.into_owned());
|
|
}
|
|
}
|
|
|
|
// Remove any old files that weren't present in new archive
|
|
let mut items = vec![];
|
|
|
|
// Start by populating the vec with the initial files
|
|
let iter = static_dir.read_dir()?;
|
|
iter.filter_map(|r| r.ok())
|
|
.for_each(|e| items.push(e.path()));
|
|
|
|
// As long as there are still items in the vec, we keep going
|
|
while !items.is_empty() {
|
|
let item = items.pop().unwrap();
|
|
tracing::debug!("{:?}", item);
|
|
|
|
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
|
|
if item.is_dir() {
|
|
std::fs::remove_dir_all(item)?;
|
|
} else {
|
|
std::fs::remove_file(item)?;
|
|
}
|
|
} else if let Ok(iter) = item.read_dir() {
|
|
iter.filter_map(|r| r.ok())
|
|
.for_each(|e| items.push(e.path()));
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|