Compare commits

...

5 Commits

Author SHA1 Message Date
Jef Roosens af9902f9f4
Merge branch 'dev' of git.rustybever.be:Chewing_Bever/site-backend into dev
ci/woodpecker/push/deploy unknown status Details
ci/woodpecker/push/lint Pipeline was successful Details
2022-04-02 21:03:41 +02:00
Jef Roosens 869c629242
Merge branch 'dev' of git.rustybever.be:Chewing_Bever/site-backend into dev 2022-04-02 21:03:06 +02:00
Jef Roosens 26cd2e1bea
Added CI lint job 2022-04-02 21:02:15 +02:00
Jef Roosens f77be877db
Added rustfmt config & ran formatter 2022-04-02 21:00:38 +02:00
Jef Roosens 8664453ef7
Split code into modules 2022-04-02 20:56:51 +02:00
7 changed files with 214 additions and 112 deletions

View File

@ -1,5 +1,5 @@
platform: linux/amd64
branches: main
platform: 'linux/amd64'
branches: 'main'
pipeline:
release:

View File

@ -0,0 +1,7 @@
platform: 'linux/amd64'
pipeline:
lint:
image: 'rustlang/rust:nightly'
commands:
- cargo fmt -- --check

68
rustfmt.toml 100644
View File

@ -0,0 +1,68 @@
unstable_features = true
binop_separator = "Front"
blank_lines_lower_bound = 0
blank_lines_upper_bound = 1
# Trying something new
brace_style = "AlwaysNextLine"
color = "Auto"
combine_control_expr = false
comment_width = 80
condense_wildcard_suffixes = false
control_brace_style = "AlwaysSameLine"
disable_all_formatting = false
edition = "2018"
emit_mode = "Files"
empty_item_single_line = true
enum_discrim_align_threshold = 0
error_on_line_overflow = false
error_on_unformatted = false
fn_args_layout = "Tall"
fn_single_line = false
force_explicit_abi = true
force_multiline_blocks = false
format_code_in_doc_comments = false
format_macro_bodies = true
format_macro_matchers = false
format_strings = false
group_imports = "StdExternalCrate"
hard_tabs = false
hide_parse_errors = false
ignore = []
imports_granularity = "Crate"
imports_indent = "Block"
imports_layout = "Mixed"
indent_style = "Block"
inline_attribute_width = 0
license_template_path = ""
make_backup = false
match_arm_blocks = true
match_arm_leading_pipes = "Never"
match_block_trailing_comma = true
max_width = 100
merge_derives = true
newline_style = "Auto"
normalize_comments = false
normalize_doc_attributes = false
overflow_delimited_expr = false
remove_nested_parens = true
reorder_impl_items = false
reorder_imports = true
reorder_modules = true
report_fixme = "Always"
report_todo = "Always"
skip_children = false
space_after_colon = true
space_before_colon = false
spaces_around_ranges = false
struct_field_align_threshold = 0
struct_lit_single_line = true
tab_spaces = 4
trailing_comma = "Vertical"
trailing_semicolon = true
type_punctuation_density = "Wide"
use_field_init_shorthand = false
use_small_heuristics = "Default"
use_try_shorthand = false
version = "One"
where_single_line = false
wrap_comments = false

96
src/api/deploy.rs 100644
View File

@ -0,0 +1,96 @@
use std::{collections::HashSet, io::ErrorKind, path::Path};
use axum::{
extract::{BodyStream, Extension},
http::StatusCode,
response::IntoResponse,
};
use flate2::read::GzDecoder;
use futures_util::TryStreamExt;
use tar::Archive;
use tokio_util::io::StreamReader;
use crate::STATIC_DIR_NAME;
pub async fn post_deploy(
Extension(data_dir): Extension<String>,
res: BodyStream,
) -> impl IntoResponse
{
// This converts a stream into something that implements AsyncRead, which we can then use to
// asynchronously write the file to disk
let mut read =
StreamReader::new(res.map_err(|axum_err| std::io::Error::new(ErrorKind::Other, axum_err)));
let uuid = uuid::Uuid::new_v4();
let file_path = Path::new(&data_dir).join(uuid.as_hyphenated().to_string());
let mut file = tokio::fs::File::create(&file_path).await.unwrap();
tokio::io::copy(&mut read, &mut file).await;
// Extract the contents of the tarball synchronously
match tokio::task::spawn_blocking(move || {
let file = match std::fs::File::open(file_path) {
Ok(v) => v,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
let tar = GzDecoder::new(file);
let mut archive = Archive::new(tar);
let mut paths = HashSet::new();
let entries = match archive.entries() {
Ok(e) => e,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
// Extract each entry into the output directory
let static_dir = Path::new(&data_dir).join(STATIC_DIR_NAME);
for entry_res in entries {
if let Ok(mut entry) = entry_res {
if let Err(_) = entry.unpack_in(&static_dir) {
return StatusCode::INTERNAL_SERVER_ERROR;
}
if let Ok(path) = entry.path() {
paths.insert(path.into_owned());
}
} else {
return StatusCode::INTERNAL_SERVER_ERROR;
}
}
// Remove any old files that weren't present in new archive
let mut items = vec![];
// Start by populating the vec with the initial files
let iter = match static_dir.read_dir() {
Ok(v) => v,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
// As long as there are still items in the vec, we keep going
while items.len() > 0 {
let item = items.pop().unwrap();
tracing::debug!("{:?}", item);
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
if item.is_dir() {
std::fs::remove_dir_all(item);
} else {
std::fs::remove_file(item);
}
} else if let Ok(iter) = item.read_dir() {
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
}
}
StatusCode::OK
})
.await
{
Ok(s) => s,
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
}
}

8
src/api/mod.rs 100644
View File

@ -0,0 +1,8 @@
use axum::{routing::post, Router};
mod deploy;
pub fn router() -> Router
{
Router::new().route("/deploy", post(deploy::post_deploy))
}

View File

@ -1,31 +1,21 @@
use axum::{
extract::{BodyStream, Extension},
http::StatusCode,
response::{IntoResponse, Json},
routing::{get, get_service, post},
Router,
};
use flate2::read::GzDecoder;
use futures_util::TryStreamExt;
use serde_json::{json, Value};
use std::collections::HashSet;
use std::io::ErrorKind;
use std::net::SocketAddr;
use std::path::Path;
use tar::Archive;
use tokio_util::io::StreamReader;
use axum::{extract::Extension, http::StatusCode, routing::get_service, Router};
use tower_http::{auth::RequireAuthorizationLayer, services::ServeDir, trace::TraceLayer};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
mod api;
mod matrix;
const STATIC_DIR_NAME: &str = "static";
#[tokio::main]
async fn main() {
async fn main()
{
// Enable tracing
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(
std::env::var("RUST_LOG")
.unwrap_or_else(|_| "site_backend=debug,tower_http=debug".into()),
std::env::var("RUST_LOG").unwrap_or_else(|_| "site=debug,tower_http=debug".into()),
))
.with(tracing_subscriber::fmt::layer())
.init();
@ -38,11 +28,12 @@ async fn main() {
std::fs::create_dir_all(&static_dir);
let app = Router::new()
.route("/.well-known/matrix/server", get(get_matrix_server))
.route("/.well-known/matrix/client", get(get_matrix_client))
.route(
"/api/deploy",
post(post_deploy).layer(RequireAuthorizationLayer::bearer(&api_key)),
// Handle Matrix .well-known files
.nest("/", matrix::router())
// Routes under /api path
.nest(
"/api",
api::router().layer(RequireAuthorizationLayer::bearer(&api_key)),
)
// The fallback option is to serve the actual static files
.fallback(get_service(ServeDir::new(static_dir)).handle_error(
@ -63,90 +54,3 @@ async fn main() {
.await
.unwrap();
}
async fn get_matrix_server() -> impl IntoResponse {
Json(json!({"m.server": "matrix.rustybever.be:443"}))
}
async fn get_matrix_client() -> impl IntoResponse {
Json(json!({"m.homeserver": {"base_url": "https://matrix.rustybever.be"}}))
}
async fn post_deploy(Extension(data_dir): Extension<String>, res: BodyStream) -> impl IntoResponse {
// This converts a stream into something that implements AsyncRead, which we can then use to
// asynchronously write the file to disk
let mut read =
StreamReader::new(res.map_err(|axum_err| std::io::Error::new(ErrorKind::Other, axum_err)));
let uuid = uuid::Uuid::new_v4();
let file_path = Path::new(&data_dir).join(uuid.as_hyphenated().to_string());
let mut file = tokio::fs::File::create(&file_path).await.unwrap();
tokio::io::copy(&mut read, &mut file).await;
// Extract the contents of the tarball synchronously
match tokio::task::spawn_blocking(move || {
let file = match std::fs::File::open(file_path) {
Ok(v) => v,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
let tar = GzDecoder::new(file);
let mut archive = Archive::new(tar);
let mut paths = HashSet::new();
let entries = match archive.entries() {
Ok(e) => e,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
// Extract each entry into the output directory
let static_dir = Path::new(&data_dir).join(STATIC_DIR_NAME);
for entry_res in entries {
if let Ok(mut entry) = entry_res {
if let Err(_) = entry.unpack_in(&static_dir) {
return StatusCode::INTERNAL_SERVER_ERROR;
}
if let Ok(path) = entry.path() {
paths.insert(path.into_owned());
}
} else {
return StatusCode::INTERNAL_SERVER_ERROR;
}
}
// Remove any old files that weren't present in new archive
let mut items = vec![];
// Start by populating the vec with the initial files
let iter = match static_dir.read_dir() {
Ok(v) => v,
Err(_) => return StatusCode::INTERNAL_SERVER_ERROR,
};
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
// As long as there are still items in the vec, we keep going
while items.len() > 0 {
let item = items.pop().unwrap();
tracing::debug!("{:?}", item);
if !paths.contains(item.strip_prefix(&static_dir).unwrap()) {
if item.is_dir() {
std::fs::remove_dir_all(item);
} else {
std::fs::remove_file(item);
}
} else if let Ok(iter) = item.read_dir() {
iter.filter_map(|r| r.ok())
.for_each(|e| items.push(e.path()));
}
}
StatusCode::OK
})
.await
{
Ok(s) => s,
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
}
}

19
src/matrix.rs 100644
View File

@ -0,0 +1,19 @@
use axum::{response::IntoResponse, routing::get, Json, Router};
use serde_json::json;
pub fn router() -> Router
{
Router::new()
.route("/.well-known/matrix/server", get(get_matrix_server))
.route("/.well-known/matrix/client", get(get_matrix_client))
}
async fn get_matrix_server() -> impl IntoResponse
{
Json(json!({"m.server": "matrix.rustybever.be:443"}))
}
async fn get_matrix_client() -> impl IntoResponse
{
Json(json!({"m.homeserver": {"base_url": "https://matrix.rustybever.be"}}))
}