Added support for optionally deploying to subdir (for docs later)
This commit is contained in:
parent
a3cf021fc6
commit
b4c8216ebc
6 changed files with 103 additions and 69 deletions
|
|
@ -1,19 +1,27 @@
|
|||
use std::{collections::HashSet, io::ErrorKind, path::Path};
|
||||
|
||||
use axum::{
|
||||
extract::{BodyStream, Extension},
|
||||
extract::{BodyStream, Extension, Query},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
};
|
||||
use flate2::read::GzDecoder;
|
||||
use futures_util::TryStreamExt;
|
||||
use serde::Deserialize;
|
||||
use tar::Archive;
|
||||
use tokio_util::io::StreamReader;
|
||||
|
||||
use crate::STATIC_DIR_NAME;
|
||||
use crate::DEFAULT_STATIC_DIR_NAME;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct StaticDirParams
|
||||
{
|
||||
dir: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn post_deploy(
|
||||
Extension(data_dir): Extension<String>,
|
||||
Query(params): Query<StaticDirParams>,
|
||||
res: BodyStream,
|
||||
) -> impl IntoResponse
|
||||
{
|
||||
|
|
@ -26,71 +34,72 @@ pub async fn post_deploy(
|
|||
let mut file = tokio::fs::File::create(&file_path).await.unwrap();
|
||||
tokio::io::copy(&mut read, &mut file).await;
|
||||
|
||||
// Extract the contents of the tarball synchronously
|
||||
match tokio::task::spawn_blocking(move || {
|
||||
let file = match std::fs::File::open(file_path) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
|
||||
};
|
||||
let tar = GzDecoder::new(file);
|
||||
let mut archive = Archive::new(tar);
|
||||
let mut static_path = Path::new(&data_dir).join(DEFAULT_STATIC_DIR_NAME);
|
||||
|
||||
let mut paths = HashSet::new();
|
||||
|
||||
let entries = match archive.entries() {
|
||||
Ok(e) => e,
|
||||
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
|
||||
};
|
||||
|
||||
// Extract each entry into the output directory
|
||||
let static_dir = Path::new(&data_dir).join(STATIC_DIR_NAME);
|
||||
for entry_res in entries {
|
||||
if let Ok(mut entry) = entry_res {
|
||||
if let Err(_) = entry.unpack_in(&static_dir) {
|
||||
return StatusCode::INTERNAL_SERVER_ERROR;
|
||||
}
|
||||
|
||||
if let Ok(path) = entry.path() {
|
||||
paths.insert(path.into_owned());
|
||||
}
|
||||
} else {
|
||||
return StatusCode::INTERNAL_SERVER_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove any old files that weren't present in new archive
|
||||
let mut items = vec![];
|
||||
|
||||
// Start by populating the vec with the initial files
|
||||
let iter = match static_dir.read_dir() {
|
||||
Ok(v) => v,
|
||||
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
|
||||
};
|
||||
iter.filter_map(|r| r.ok())
|
||||
.for_each(|e| items.push(e.path()));
|
||||
|
||||
// As long as there are still items in the vec, we keep going
|
||||
while items.len() > 0 {
|
||||
let item = items.pop().unwrap();
|
||||
tracing::debug!("{:?}", item);
|
||||
|
||||
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
|
||||
if item.is_dir() {
|
||||
std::fs::remove_dir_all(item);
|
||||
} else {
|
||||
std::fs::remove_file(item);
|
||||
}
|
||||
} else if let Ok(iter) = item.read_dir() {
|
||||
iter.filter_map(|r| r.ok())
|
||||
.for_each(|e| items.push(e.path()));
|
||||
}
|
||||
}
|
||||
|
||||
StatusCode::OK
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(s) => s,
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
if params.dir.is_some() {
|
||||
static_path = static_path.join(params.dir.unwrap());
|
||||
}
|
||||
|
||||
// Make sure the static directory exists
|
||||
tokio::fs::create_dir_all(&static_path).await;
|
||||
|
||||
let fp_clone = file_path.clone();
|
||||
// Extract the contents of the tarball synchronously
|
||||
let res =
|
||||
match tokio::task::spawn_blocking(move || process_archive(&fp_clone, &static_path)).await {
|
||||
Ok(_) => StatusCode::OK,
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
};
|
||||
|
||||
// Remove archive file after use
|
||||
tokio::fs::remove_file(&file_path).await;
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
fn process_archive(archive_path: &Path, static_dir: &Path) -> Result<(), ()>
|
||||
{
|
||||
let file = std::fs::File::open(archive_path).map_err(|_| ())?;
|
||||
let tar = GzDecoder::new(file);
|
||||
let mut archive = Archive::new(tar);
|
||||
|
||||
let mut paths = HashSet::new();
|
||||
|
||||
let entries = archive.entries().map_err(|_| ())?;
|
||||
// Extract each entry into the output directory
|
||||
for entry_res in entries {
|
||||
let mut entry = entry_res.map_err(|_| ())?;
|
||||
entry.unpack_in(static_dir).map_err(|_| ())?;
|
||||
|
||||
if let Ok(path) = entry.path() {
|
||||
paths.insert(path.into_owned());
|
||||
}
|
||||
}
|
||||
|
||||
// Remove any old files that weren't present in new archive
|
||||
let mut items = vec![];
|
||||
|
||||
// Start by populating the vec with the initial files
|
||||
let iter = static_dir.read_dir().map_err(|_| ())?;
|
||||
iter.filter_map(|r| r.ok())
|
||||
.for_each(|e| items.push(e.path()));
|
||||
|
||||
// As long as there are still items in the vec, we keep going
|
||||
while items.len() > 0 {
|
||||
let item = items.pop().unwrap();
|
||||
tracing::debug!("{:?}", item);
|
||||
|
||||
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
|
||||
if item.is_dir() {
|
||||
std::fs::remove_dir_all(item);
|
||||
} else {
|
||||
std::fs::remove_file(item);
|
||||
}
|
||||
} else if let Ok(iter) = item.read_dir() {
|
||||
iter.filter_map(|r| r.ok())
|
||||
.for_each(|e| items.push(e.path()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue