site-backend/src/main.rs

142 lines
4.8 KiB
Rust

use axum::{
extract::{BodyStream, Extension},
http::StatusCode,
response::IntoResponse,
routing::{get_service, post},
Router,
};
use flate2::read::GzDecoder;
use futures_util::TryStreamExt;
use std::collections::HashSet;
use std::io::ErrorKind;
use std::net::SocketAddr;
use std::path::Path;
use tar::Archive;
use tokio_util::io::StreamReader;
use tower_http::{auth::RequireAuthorizationLayer, services::ServeDir, trace::TraceLayer};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
const STATIC_DIR_NAME: &str = "static";
#[tokio::main]
async fn main() {
// Enable tracing
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(
std::env::var("RUST_LOG")
.unwrap_or_else(|_| "site_backend=debug,tower_http=debug".into()),
))
.with(tracing_subscriber::fmt::layer())
.init();
// Get required variables from env vars
let api_key = std::env::var("API_KEY").expect("No API_KEY was provided.");
let data_dir = std::env::var("DATA_DIR").expect("No DATA_DIR was provided.");
let static_dir = format!("{}/{}", data_dir, STATIC_DIR_NAME);
std::fs::create_dir_all(&static_dir);
let app = Router::new()
.route(
"/api/deploy",
post(post_deploy).layer(RequireAuthorizationLayer::bearer(&api_key)),
)
// The fallback option is to serve the actual static files
.fallback(get_service(ServeDir::new(static_dir)).handle_error(
|error: std::io::Error| async move {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Unhandled internal error: {}", error),
)
},
))
.layer(TraceLayer::new_for_http())
.layer(Extension(data_dir));
let addr = SocketAddr::from(([0, 0, 0, 0], 3000));
tracing::debug!("listening on {}", addr);
axum::Server::bind(&addr)
.serve(app.into_make_service())
.await
.unwrap();
}
async fn post_deploy(Extension(data_dir): Extension<String>, res: BodyStream) -> impl IntoResponse {
// This converts a stream into something that implements AsyncRead, which we can then use to
// asynchronously write the file to disk
let mut read =
StreamReader::new(res.map_err(|axum_err| std::io::Error::new(ErrorKind::Other, axum_err)));
let uuid = uuid::Uuid::new_v4();
let file_path = Path::new(&data_dir).join(uuid.as_hyphenated().to_string());
let mut file = tokio::fs::File::create(&file_path).await.unwrap();
tokio::io::copy(&mut read, &mut file).await;
// Extract the contents of the tarball synchronously
match tokio::task::spawn_blocking(move || {
let file = match std::fs::File::open(file_path) {
Ok(v) => v,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
let tar = GzDecoder::new(file);
let mut archive = Archive::new(tar);
let mut paths = HashSet::new();
let entries = match archive.entries() {
Ok(e) => e,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
// Extract each entry into the output directory
let static_dir = Path::new(&data_dir).join(STATIC_DIR_NAME);
for entry_res in entries {
if let Ok(mut entry) = entry_res {
if let Err(_) = entry.unpack_in(&static_dir) {
return StatusCode::INTERNAL_SERVER_ERROR;
}
if let Ok(path) = entry.path() {
paths.insert(path.into_owned());
}
} else {
return StatusCode::INTERNAL_SERVER_ERROR;
}
}
// Remove any old files that weren't present in new archive
let mut items = vec![];
// Start by populating the vec with the initial files
let iter = match static_dir.read_dir() {
Ok(v) => v,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
// As long as there are still items in the vec, we keep going
while items.len() > 0 {
let item = items.pop().unwrap();
tracing::debug!("{:?}", item);
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
if item.is_dir() {
std::fs::remove_dir_all(item);
} else {
std::fs::remove_file(item);
}
} else if let Ok(iter) = item.read_dir() {
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
}
}
StatusCode::OK
})
.await
{
Ok(s) => s,
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
}
}