Compare commits

..

5 Commits

Author SHA1 Message Date
Jef Roosens 1cca0d46de
feat: add compression layer
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/lint Pipeline failed Details
ci/woodpecker/push/deploy Pipeline failed Details
2025-01-01 22:00:02 +01:00
Jef Roosens 1203a34f8f
fix: some small bugs 2025-01-01 21:49:56 +01:00
Jef Roosens 6610442bf1
feat: add error handling; update dockerfile 2025-01-01 21:17:18 +01:00
Jef Roosens 391e45c09d
feat: update all deps; implement arbitrary path static file hosting 2025-01-01 19:29:44 +01:00
Jef Roosens 03bc88e7c3
refactor: remove metric collector; reorganize stuff; temporarily remove
api
2025-01-01 17:26:16 +01:00
10 changed files with 1095 additions and 656 deletions

View File

@ -4,7 +4,7 @@ branches:
pipeline:
build:
image: 'rust:1.69-alpine3.16'
image: 'rust:1.83-alpine3.21'
commands:
- apk add build-base
- cargo build

1362
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -11,18 +11,18 @@ name = "site"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
axum = { version = "0.6.18" }
hyper = { version = "0.14.26" }
tokio = { version = "1.28.0", features = ["full"] }
tracing = "0.1.37"
tracing-subscriber = {version = "0.3.17", features = ["env-filter"] }
tower-http = { version = "0.4.0", features = ["fs", "trace", "auth"] }
tar = "0.4.38"
flate2 = "1.0.26"
tokio-util = { version = "0.7.8", features = ["io"] }
futures-util = "0.3.28"
uuid = { version = "1.3.2", features = ["v4"] }
serde_json = "1.0.96"
metrics = "0.21.0"
metrics-exporter-prometheus = "0.12.0"
axum = "0.8.1"
hyper = { version = "1.5.2" }
tokio = { version = "1.42.0", features = ["full"] }
tracing = "0.1.41"
tracing-subscriber = {version = "0.3.19", features = ["env-filter"] }
tower-http = { version = "0.6.2", features = ["fs", "trace", "auth", "compression-br", "compression-gzip"] }
tar = "0.4.43"
flate2 = "1.0.35"
tokio-util = { version = "0.7.13", features = ["io"] }
futures-util = "0.3.31"
uuid = { version = "1.11.0", features = ["v4"] }
serde_json = "1.0.134"
metrics = "0.24.1"
metrics-exporter-prometheus = "0.16.0"
serde = { version = "1.0", features = ["derive"] }

View File

@ -1,4 +1,4 @@
FROM rust:1.69-alpine3.16 AS builder
FROM rust:1.83-alpine3.21 AS builder
ARG DI_VER=1.2.5
@ -13,12 +13,16 @@ RUN wget -O - "https://github.com/Yelp/dumb-init/archive/refs/tags/v${DI_VER}.ta
mv dumb-init .. && \
cd ..
COPY Cargo.toml Cargo.lock ./
RUN cargo fetch --locked
COPY . ./
RUN cargo build --release
RUN cargo build --release --frozen
FROM alpine:3.16
FROM alpine:3.21
COPY --from=builder /app/target/release/site /bin/site
COPY --from=builder /app/dumb-init /bin/dumb-init

View File

@ -1,98 +0,0 @@
use std::{collections::HashSet, io::ErrorKind, path::Path};
use axum::{
extract::{BodyStream, Extension, Query},
http::StatusCode,
response::IntoResponse,
};
use flate2::read::GzDecoder;
use futures_util::TryStreamExt;
use serde::Deserialize;
use std::io;
use tar::Archive;
use tokio_util::io::StreamReader;
use crate::{DEFAULT_STATIC_SITE, STATIC_DIR_NAME};
#[derive(Deserialize)]
pub struct StaticDirParams {
dir: Option<String>,
}
pub async fn post_deploy(
Extension(data_dir): Extension<String>,
Query(params): Query<StaticDirParams>,
res: BodyStream,
) -> crate::Result<()> {
// This converts a stream into something that implements AsyncRead, which we can then use to
// asynchronously write the file to disk
let mut read =
StreamReader::new(res.map_err(|axum_err| std::io::Error::new(ErrorKind::Other, axum_err)));
let uuid = uuid::Uuid::new_v4();
let file_path = Path::new(&data_dir).join(uuid.as_hyphenated().to_string());
let mut file = tokio::fs::File::create(&file_path).await?;
tokio::io::copy(&mut read, &mut file).await?;
// If no dir is provided, we use the default one. Otherwise, use the provided one.
let static_path = Path::new(&data_dir)
.join(STATIC_DIR_NAME)
.join(params.dir.unwrap_or(DEFAULT_STATIC_SITE.to_string()));
// Make sure the static directory exists
tokio::fs::create_dir_all(&static_path).await?;
let fp_clone = file_path.clone();
// Extract the contents of the tarball synchronously
tokio::task::spawn_blocking(move || process_archive(&fp_clone, &static_path)).await??;
// Remove archive file after use
tokio::fs::remove_file(&file_path).await?;
Ok(())
}
fn process_archive(archive_path: &Path, static_dir: &Path) -> io::Result<()> {
let file = std::fs::File::open(archive_path)?;
let tar = GzDecoder::new(file);
let mut archive = Archive::new(tar);
let mut paths = HashSet::new();
let entries = archive.entries()?;
// Extract each entry into the output directory
for entry in entries {
let mut entry = entry?;
entry.unpack_in(static_dir)?;
if let Ok(path) = entry.path() {
paths.insert(path.into_owned());
}
}
// Remove any old files that weren't present in new archive
let mut items = vec![];
// Start by populating the vec with the initial files
let iter = static_dir.read_dir()?;
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
// As long as there are still items in the vec, we keep going
while !items.is_empty() {
let item = items.pop().unwrap();
tracing::debug!("{:?}", item);
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
if item.is_dir() {
std::fs::remove_dir_all(item)?;
} else {
std::fs::remove_file(item)?;
}
} else if let Ok(iter) = item.read_dir() {
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
}
}
Ok(())
}

View File

@ -1,7 +0,0 @@
use axum::{routing::post, Router};
mod deploy;
pub fn router() -> Router {
Router::new().route("/deploy", post(deploy::post_deploy))
}

View File

@ -25,6 +25,8 @@ impl Error for ServerError {}
impl IntoResponse for ServerError {
fn into_response(self) -> Response {
tracing::error!("{}", self);
match self {
ServerError::IO(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
ServerError::Axum(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),

View File

@ -1,28 +1,32 @@
mod api;
mod error;
mod matrix;
mod metrics;
// mod metrics;
mod server;
pub use error::Result;
use tokio::net::TcpListener;
use std::{future::ready, net::SocketAddr};
use std::{net::SocketAddr, path::PathBuf};
use axum::{
extract::Extension,
middleware,
response::Redirect,
routing::{any, get},
Router,
};
use tower_http::{
services::ServeDir, trace::TraceLayer, validate_request::ValidateRequestHeaderLayer,
};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
/// Name of the directory where static sites are stored inside the data directory
const STATIC_DIR_NAME: &str = "static";
/// Name of the subdir of STATIC_DIR_NAME where the default (fallback) site is located
const DEFAULT_STATIC_SITE: &str = "default";
const STATIC_ROOT_NAME: &str = "_root";
const REDIRECTS: [(&str, &str); 6] = [
("/github", "https://github.com/ChewingBever"),
("/gitea", "https://git.rustybever.be/Chewing_Bever"),
("/gitlab", "https://gitlab.com/Chewing_Bever"),
("/codeberg", "https://codeberg.org/Chewing_Bever"),
("/matrix", "https://matrix.to/#/@jef:rustybever.be"),
("/aur", "https://aur.archlinux.org/account/Chewing_Bever"),
];
#[derive(Clone)]
pub struct Context {
static_dir: PathBuf,
tmp_dir: PathBuf,
}
#[tokio::main]
async fn main() {
@ -36,66 +40,22 @@ async fn main() {
// Get required variables from env vars
let api_key = std::env::var("API_KEY").expect("No API_KEY was provided.");
let data_dir = std::env::var("DATA_DIR").expect("No DATA_DIR was provided.");
let static_dir = format!("{}/{}", data_dir, STATIC_DIR_NAME);
let data_dir = PathBuf::from(std::env::var("DATA_DIR").expect("No DATA_DIR was provided."));
let static_dir = data_dir.join(STATIC_DIR_NAME);
std::fs::create_dir_all(&static_dir);
std::fs::create_dir_all(&static_dir).unwrap();
let state = Context {
static_dir,
tmp_dir: std::env::temp_dir(),
};
// Initialize metrics
let recorder_handle = metrics::setup_metrics_recorder();
let mut app = Router::new()
// Handle Matrix .well-known files
.nest("/", matrix::router())
// Routes under /api path
.nest(
"/api",
api::router().layer(ValidateRequestHeaderLayer::bearer(&api_key)),
)
.route("/metrics", get(move || ready(recorder_handle.render())));
// Each static site gets mounted explicitely so that the default site can be used as fallback
// Each entry is of the form (route, static dir name)
let sites = [
("/docs/vieter", "docs-vieter"),
("/api-docs/vieter", "api-docs-vieter"),
("/man/vieter", "man-vieter"),
];
for (path, dir) in sites {
let full_path = format!("{}/{}", static_dir, dir);
app = app.nest_service(path, ServeDir::new(full_path));
}
// Define some redirects
let redirects = [
("/github", "https://github.com/ChewingBever"),
("/gitea", "https://git.rustybever.be/Chewing_Bever"),
("/gitlab", "https://gitlab.com/Chewing_Bever"),
("/codeberg", "https://codeberg.org/Chewing_Bever"),
("/matrix", "https://matrix.to/#/@jef:rustybever.be"),
("/aur", "https://aur.archlinux.org/account/Chewing_Bever"),
];
for (path, url) in redirects {
app = app.route(path, any(|| async { Redirect::permanent(url) }))
}
app = app
// The fallback option is to serve the actual static files
.fallback_service(ServeDir::new(format!(
"{}/{}",
static_dir, DEFAULT_STATIC_SITE
)))
.layer(middleware::from_fn(metrics::track_metrics))
.layer(Extension(data_dir))
.layer(TraceLayer::new_for_http());
// let recorder_handle = metrics::setup_metrics_recorder();
let app = server::app(state, &api_key, &REDIRECTS);
let addr = SocketAddr::from(([0, 0, 0, 0], 3000));
let listener = TcpListener::bind(addr).await.unwrap();
tracing::debug!("listening on {}", addr);
axum::Server::bind(&addr)
.serve(app.into_make_service())
.await
.unwrap();
axum::serve(listener, app).await.unwrap();
}

138
src/server/mod.rs 100644
View File

@ -0,0 +1,138 @@
mod matrix;
use axum::{
body::Body,
extract::{self, State},
response::Redirect,
routing::{any, post},
Router,
};
use flate2::read::GzDecoder;
use futures_util::TryStreamExt;
use tar::Archive;
use tokio::fs::File;
use tokio_util::io::StreamReader;
use tower_http::{
compression::CompressionLayer, services::ServeDir, trace::TraceLayer,
validate_request::ValidateRequestHeaderLayer,
};
use std::{
io,
path::{Path, PathBuf},
};
use crate::{error::Result, STATIC_ROOT_NAME};
pub fn app(
ctx: crate::Context,
api_key: &str,
redirects: &[(&'static str, &'static str)],
) -> Router {
// We first try to route the request according to the contents of the root directory. If the
// file doesn't exist, then we look for it in the other directories.
let serve_dir = ServeDir::new(ctx.static_dir.join(STATIC_ROOT_NAME))
.append_index_html_on_directories(true)
.fallback(ServeDir::new(ctx.static_dir.clone()).append_index_html_on_directories(true));
let mut app = Router::new()
.route_service("/", serve_dir.clone())
.route(
"/{*path}",
post(post_static_archive)
.delete(delete_dir)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key))
.get_service(serve_dir),
)
.with_state(ctx.clone())
.merge(matrix::router());
for (path, url) in redirects.iter() {
app = app.route(path, any(|| async { Redirect::permanent(url) }))
}
app.layer(CompressionLayer::new().gzip(true).br(true))
.layer(TraceLayer::new_for_http())
}
pub async fn post_static_archive(
State(ctx): State<crate::Context>,
extract::Path(path): extract::Path<String>,
body: Body,
) -> Result<()> {
// Copy tarball data to file for parsing
let stream = body.into_data_stream();
let mut reader = StreamReader::new(stream.map_err(io::Error::other));
let uuid = uuid::Uuid::new_v4();
let ar_path = ctx.tmp_dir.join(uuid.to_string());
let mut f = File::create(&ar_path).await?;
tokio::io::copy(&mut reader, &mut f).await?;
// Root is stored in its own specifc directory, as otherwise it would wipe all other uploaded
// directories every time it's updated
let dest_dir = if path.is_empty() {
String::from(crate::STATIC_ROOT_NAME)
} else {
path
};
let dest_dir = ctx.static_dir.join(dest_dir);
tokio::task::spawn_blocking(move || process_archive(&ar_path, &dest_dir))
.await
.unwrap()?;
Ok(())
}
fn process_archive(ar_path: &Path, dest_dir: &Path) -> io::Result<()> {
let f = std::fs::File::open(ar_path)?;
let tar = GzDecoder::new(f);
let mut ar = Archive::new(tar);
// trim possible trailing slash from path
let dest_dir = PathBuf::from(dest_dir.to_string_lossy().trim_end_matches('/'));
// extract extension and append '.new' to form new extension
let ext = dest_dir
.extension()
.map(|ext| ext.to_string_lossy().to_string())
.unwrap_or(String::from(""));
let new_dir = dest_dir.with_extension(format!("{ext}.new"));
// Directory might be left behind by previous failed upload
if new_dir.try_exists()? {
std::fs::remove_dir_all(&new_dir)?;
}
// Unpack archive into new directory
std::fs::create_dir_all(&new_dir)?;
ar.unpack(&new_dir)?;
// Replace original directory with new one
if dest_dir.try_exists()? {
std::fs::remove_dir_all(&dest_dir)?;
}
std::fs::rename(new_dir, dest_dir)?;
std::fs::remove_file(ar_path)?;
Ok(())
}
pub async fn delete_dir(
State(ctx): State<crate::Context>,
extract::Path(path): extract::Path<String>,
) -> Result<()> {
let dest_dir = if path.is_empty() {
String::from(crate::STATIC_ROOT_NAME)
} else {
path
};
let dest_dir = ctx.static_dir.join(dest_dir);
tokio::fs::remove_dir_all(dest_dir).await?;
Ok(())
}