Split code into modules
parent
f6a3afb315
commit
8664453ef7
|
@ -0,0 +1,95 @@
|
||||||
|
use crate::STATIC_DIR_NAME;
|
||||||
|
use axum::{
|
||||||
|
extract::{BodyStream, Extension},
|
||||||
|
http::StatusCode,
|
||||||
|
response::IntoResponse,
|
||||||
|
};
|
||||||
|
use flate2::read::GzDecoder;
|
||||||
|
use futures_util::TryStreamExt;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::io::ErrorKind;
|
||||||
|
use std::path::Path;
|
||||||
|
use tar::Archive;
|
||||||
|
use tokio_util::io::StreamReader;
|
||||||
|
|
||||||
|
pub async fn post_deploy(
|
||||||
|
Extension(data_dir): Extension<String>,
|
||||||
|
res: BodyStream,
|
||||||
|
) -> impl IntoResponse {
|
||||||
|
// This converts a stream into something that implements AsyncRead, which we can then use to
|
||||||
|
// asynchronously write the file to disk
|
||||||
|
let mut read =
|
||||||
|
StreamReader::new(res.map_err(|axum_err| std::io::Error::new(ErrorKind::Other, axum_err)));
|
||||||
|
let uuid = uuid::Uuid::new_v4();
|
||||||
|
let file_path = Path::new(&data_dir).join(uuid.as_hyphenated().to_string());
|
||||||
|
let mut file = tokio::fs::File::create(&file_path).await.unwrap();
|
||||||
|
tokio::io::copy(&mut read, &mut file).await;
|
||||||
|
|
||||||
|
// Extract the contents of the tarball synchronously
|
||||||
|
match tokio::task::spawn_blocking(move || {
|
||||||
|
let file = match std::fs::File::open(file_path) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
};
|
||||||
|
let tar = GzDecoder::new(file);
|
||||||
|
let mut archive = Archive::new(tar);
|
||||||
|
|
||||||
|
let mut paths = HashSet::new();
|
||||||
|
|
||||||
|
let entries = match archive.entries() {
|
||||||
|
Ok(e) => e,
|
||||||
|
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Extract each entry into the output directory
|
||||||
|
let static_dir = Path::new(&data_dir).join(STATIC_DIR_NAME);
|
||||||
|
for entry_res in entries {
|
||||||
|
if let Ok(mut entry) = entry_res {
|
||||||
|
if let Err(_) = entry.unpack_in(&static_dir) {
|
||||||
|
return StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(path) = entry.path() {
|
||||||
|
paths.insert(path.into_owned());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return StatusCode::INTERNAL_SERVER_ERROR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove any old files that weren't present in new archive
|
||||||
|
let mut items = vec![];
|
||||||
|
|
||||||
|
// Start by populating the vec with the initial files
|
||||||
|
let iter = match static_dir.read_dir() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
};
|
||||||
|
iter.filter_map(|r| r.ok())
|
||||||
|
.for_each(|e| items.push(e.path()));
|
||||||
|
|
||||||
|
// As long as there are still items in the vec, we keep going
|
||||||
|
while items.len() > 0 {
|
||||||
|
let item = items.pop().unwrap();
|
||||||
|
tracing::debug!("{:?}", item);
|
||||||
|
|
||||||
|
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
|
||||||
|
if item.is_dir() {
|
||||||
|
std::fs::remove_dir_all(item);
|
||||||
|
} else {
|
||||||
|
std::fs::remove_file(item);
|
||||||
|
}
|
||||||
|
} else if let Ok(iter) = item.read_dir() {
|
||||||
|
iter.filter_map(|r| r.ok())
|
||||||
|
.for_each(|e| items.push(e.path()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
StatusCode::OK
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,8 @@
|
||||||
|
use axum::routing::post;
|
||||||
|
use axum::Router;
|
||||||
|
|
||||||
|
mod deploy;
|
||||||
|
|
||||||
|
pub fn router() -> Router {
|
||||||
|
Router::new().route("/deploy", post(deploy::post_deploy))
|
||||||
|
}
|
120
src/main.rs
120
src/main.rs
|
@ -1,22 +1,11 @@
|
||||||
use axum::{
|
use axum::{extract::Extension, http::StatusCode, routing::get_service, Router};
|
||||||
extract::{BodyStream, Extension},
|
|
||||||
http::StatusCode,
|
|
||||||
response::{IntoResponse, Json},
|
|
||||||
routing::{get, get_service, post},
|
|
||||||
Router,
|
|
||||||
};
|
|
||||||
use flate2::read::GzDecoder;
|
|
||||||
use futures_util::TryStreamExt;
|
|
||||||
use serde_json::{json, Value};
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::io::ErrorKind;
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::Path;
|
|
||||||
use tar::Archive;
|
|
||||||
use tokio_util::io::StreamReader;
|
|
||||||
use tower_http::{auth::RequireAuthorizationLayer, services::ServeDir, trace::TraceLayer};
|
use tower_http::{auth::RequireAuthorizationLayer, services::ServeDir, trace::TraceLayer};
|
||||||
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
|
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
|
||||||
|
|
||||||
|
mod api;
|
||||||
|
mod matrix;
|
||||||
|
|
||||||
const STATIC_DIR_NAME: &str = "static";
|
const STATIC_DIR_NAME: &str = "static";
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
|
@ -24,8 +13,7 @@ async fn main() {
|
||||||
// Enable tracing
|
// Enable tracing
|
||||||
tracing_subscriber::registry()
|
tracing_subscriber::registry()
|
||||||
.with(tracing_subscriber::EnvFilter::new(
|
.with(tracing_subscriber::EnvFilter::new(
|
||||||
std::env::var("RUST_LOG")
|
std::env::var("RUST_LOG").unwrap_or_else(|_| "site=debug,tower_http=debug".into()),
|
||||||
.unwrap_or_else(|_| "site_backend=debug,tower_http=debug".into()),
|
|
||||||
))
|
))
|
||||||
.with(tracing_subscriber::fmt::layer())
|
.with(tracing_subscriber::fmt::layer())
|
||||||
.init();
|
.init();
|
||||||
|
@ -38,11 +26,12 @@ async fn main() {
|
||||||
std::fs::create_dir_all(&static_dir);
|
std::fs::create_dir_all(&static_dir);
|
||||||
|
|
||||||
let app = Router::new()
|
let app = Router::new()
|
||||||
.route("/.well-known/matrix/server", get(get_matrix_server))
|
// Handle Matrix .well-known files
|
||||||
.route("/.well-known/matrix/client", get(get_matrix_client))
|
.nest("/", matrix::router())
|
||||||
.route(
|
// Routes under /api path
|
||||||
"/api/deploy",
|
.nest(
|
||||||
post(post_deploy).layer(RequireAuthorizationLayer::bearer(&api_key)),
|
"/api",
|
||||||
|
api::router().layer(RequireAuthorizationLayer::bearer(&api_key)),
|
||||||
)
|
)
|
||||||
// The fallback option is to serve the actual static files
|
// The fallback option is to serve the actual static files
|
||||||
.fallback(get_service(ServeDir::new(static_dir)).handle_error(
|
.fallback(get_service(ServeDir::new(static_dir)).handle_error(
|
||||||
|
@ -63,90 +52,3 @@ async fn main() {
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_matrix_server() -> impl IntoResponse {
|
|
||||||
Json(json!({"m.server": "matrix.rustybever.be:443"}))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_matrix_client() -> impl IntoResponse {
|
|
||||||
Json(json!({"m.homeserver": {"base_url": "https://matrix.rustybever.be"}}))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn post_deploy(Extension(data_dir): Extension<String>, res: BodyStream) -> impl IntoResponse {
|
|
||||||
// This converts a stream into something that implements AsyncRead, which we can then use to
|
|
||||||
// asynchronously write the file to disk
|
|
||||||
let mut read =
|
|
||||||
StreamReader::new(res.map_err(|axum_err| std::io::Error::new(ErrorKind::Other, axum_err)));
|
|
||||||
let uuid = uuid::Uuid::new_v4();
|
|
||||||
let file_path = Path::new(&data_dir).join(uuid.as_hyphenated().to_string());
|
|
||||||
let mut file = tokio::fs::File::create(&file_path).await.unwrap();
|
|
||||||
tokio::io::copy(&mut read, &mut file).await;
|
|
||||||
|
|
||||||
// Extract the contents of the tarball synchronously
|
|
||||||
match tokio::task::spawn_blocking(move || {
|
|
||||||
let file = match std::fs::File::open(file_path) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
};
|
|
||||||
let tar = GzDecoder::new(file);
|
|
||||||
let mut archive = Archive::new(tar);
|
|
||||||
|
|
||||||
let mut paths = HashSet::new();
|
|
||||||
|
|
||||||
let entries = match archive.entries() {
|
|
||||||
Ok(e) => e,
|
|
||||||
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Extract each entry into the output directory
|
|
||||||
let static_dir = Path::new(&data_dir).join(STATIC_DIR_NAME);
|
|
||||||
for entry_res in entries {
|
|
||||||
if let Ok(mut entry) = entry_res {
|
|
||||||
if let Err(_) = entry.unpack_in(&static_dir) {
|
|
||||||
return StatusCode::INTERNAL_SERVER_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(path) = entry.path() {
|
|
||||||
paths.insert(path.into_owned());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return StatusCode::INTERNAL_SERVER_ERROR;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove any old files that weren't present in new archive
|
|
||||||
let mut items = vec![];
|
|
||||||
|
|
||||||
// Start by populating the vec with the initial files
|
|
||||||
let iter = match static_dir.read_dir() {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
};
|
|
||||||
iter.filter_map(|r| r.ok())
|
|
||||||
.for_each(|e| items.push(e.path()));
|
|
||||||
|
|
||||||
// As long as there are still items in the vec, we keep going
|
|
||||||
while items.len() > 0 {
|
|
||||||
let item = items.pop().unwrap();
|
|
||||||
tracing::debug!("{:?}", item);
|
|
||||||
|
|
||||||
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
|
|
||||||
if item.is_dir() {
|
|
||||||
std::fs::remove_dir_all(item);
|
|
||||||
} else {
|
|
||||||
std::fs::remove_file(item);
|
|
||||||
}
|
|
||||||
} else if let Ok(iter) = item.read_dir() {
|
|
||||||
iter.filter_map(|r| r.ok())
|
|
||||||
.for_each(|e| items.push(e.path()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
StatusCode::OK
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(s) => s,
|
|
||||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
use axum::{response::IntoResponse, routing::get, Json, Router};
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
pub fn router() -> Router {
|
||||||
|
Router::new()
|
||||||
|
.route("/.well-known/matrix/server", get(get_matrix_server))
|
||||||
|
.route("/.well-known/matrix/client", get(get_matrix_client))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_matrix_server() -> impl IntoResponse {
|
||||||
|
Json(json!({"m.server": "matrix.rustybever.be:443"}))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_matrix_client() -> impl IntoResponse {
|
||||||
|
Json(json!({"m.homeserver": {"base_url": "https://matrix.rustybever.be"}}))
|
||||||
|
}
|
Loading…
Reference in New Issue