First implementation of archive uploading

This commit is contained in:
Jef Roosens 2022-04-02 12:44:41 +02:00
parent 910711a0f0
commit d5e3104019
Signed by: Jef Roosens
GPG key ID: B75D4F293C7052DB
3 changed files with 79 additions and 21 deletions

View file

@ -1,14 +1,16 @@
use async_compression::tokio::bufread::GzipDecoder;
use axum::{
extract::BodyStream,
http::StatusCode,
response::IntoResponse,
routing::{get_service, post},
Router,
};
use flate2::read::GzDecoder;
use futures_util::TryStreamExt;
use hyper::{Body, Request};
use std::collections::HashSet;
use std::io::ErrorKind;
use std::net::SocketAddr;
use std::path::Path;
use tar::Archive;
use tokio_util::io::StreamReader;
use tower_http::{auth::RequireAuthorizationLayer, services::ServeDir, trace::TraceLayer};
@ -57,10 +59,79 @@ async fn main() {
.unwrap();
}
async fn post_deploy(res: BodyStream) {
async fn post_deploy(res: BodyStream) -> impl IntoResponse {
// This converts a stream into something that implements AsyncRead, which we can then use to
// asynchronously write the file to disk
let mut read =
StreamReader::new(res.map_err(|axum_err| std::io::Error::new(ErrorKind::Other, axum_err)));
// let tar = GzipDecoder::new(body);
// let mut archive = Archive::new(tar);
// archive.unpack("./static").unwrap();
let mut file = tokio::fs::File::create("test.archive.gz").await.unwrap();
tokio::io::copy(&mut read, &mut file).await.unwrap();
// Extract the contents of the tarball synchronously
match tokio::task::spawn_blocking(|| {
let file = match std::fs::File::open("test.archive.gz") {
Ok(v) => v,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
let tar = GzDecoder::new(file);
let mut archive = Archive::new(tar);
let mut paths = HashSet::new();
let entries = match archive.entries() {
Ok(e) => e,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
// Extract each entry into the output directory
for entry_res in entries {
if let Ok(mut entry) = entry_res {
if let Err(_) = entry.unpack_in("static") {
return StatusCode::INTERNAL_SERVER_ERROR;
}
if let Ok(path) = entry.path() {
paths.insert(path.into_owned());
}
} else {
return StatusCode::INTERNAL_SERVER_ERROR;
}
}
// Remove any old files that weren't present in new archive
let mut items = vec![];
// Start by populating the vec with the initial files
let base_path = Path::new("static");
let iter = match base_path.read_dir() {
Ok(v) => v,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
// As long as there are still items in the vec, we keep going
while items.len() > 0 {
let item = items.pop().unwrap();
tracing::debug!("{:?}", item);
if !paths.contains(item.strip_prefix("static/").unwrap()) {
if item.is_dir() {
std::fs::remove_dir_all(item);
} else {
std::fs::remove_file(item);
}
} else if let Ok(iter) = item.read_dir() {
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
}
}
StatusCode::OK
})
.await
{
Ok(s) => s,
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
}
}