feat(server): correctly serve repo files
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details

repo-db
Jef Roosens 2023-08-02 21:15:12 +02:00
parent 2e5c84a48d
commit af27b06df1
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
5 changed files with 99 additions and 42 deletions

View File

@ -14,3 +14,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Serve packages from any number of repositories & architectures * Serve packages from any number of repositories & architectures
* Publish packages to and delete packages from repositories using HTTP * Publish packages to and delete packages from repositories using HTTP
requests requests
* Packages of architecture "any" are part of every architecture's
database

View File

@ -58,7 +58,7 @@ impl Cli {
// build our application with a single route // build our application with a single route
let app = Router::new() let app = Router::new()
.merge(crate::repo::router(&global)) .merge(crate::repo::router())
.with_state(global) .with_state(global)
.layer(TraceLayer::new_for_http()); .layer(TraceLayer::new_for_http());

View File

@ -10,6 +10,7 @@ pub type Result<T> = std::result::Result<T, ServerError>;
pub enum ServerError { pub enum ServerError {
IO(io::Error), IO(io::Error),
Axum(axum::Error), Axum(axum::Error),
Status(StatusCode),
} }
impl fmt::Display for ServerError { impl fmt::Display for ServerError {
@ -17,6 +18,7 @@ impl fmt::Display for ServerError {
match self { match self {
ServerError::IO(err) => write!(fmt, "{}", err), ServerError::IO(err) => write!(fmt, "{}", err),
ServerError::Axum(err) => write!(fmt, "{}", err), ServerError::Axum(err) => write!(fmt, "{}", err),
ServerError::Status(status) => write!(fmt, "{}", status),
} }
} }
} }
@ -30,6 +32,7 @@ impl IntoResponse for ServerError {
match self { match self {
ServerError::IO(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), ServerError::IO(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
ServerError::Axum(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), ServerError::Axum(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
ServerError::Status(status) => status.into_response(),
} }
} }
} }
@ -51,3 +54,9 @@ impl From<tokio::task::JoinError> for ServerError {
ServerError::IO(err.into()) ServerError::IO(err.into())
} }
} }
impl From<StatusCode> for ServerError {
fn from(status: StatusCode) -> Self {
Self::Status(status)
}
}

View File

@ -5,7 +5,7 @@ use std::fs;
use std::io; use std::io;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
const ANY_ARCH: &str = "any"; pub const ANY_ARCH: &str = "any";
/// Overarching abstraction that orchestrates updating the repositories stored on the server /// Overarching abstraction that orchestrates updating the repositories stored on the server
pub struct RepoGroupManager { pub struct RepoGroupManager {
@ -18,7 +18,7 @@ fn parse_pkg_filename(file_name: &str) -> (String, &str, &str, &str) {
let name = name_parts[..name_parts.len() - 3].join("-"); let name = name_parts[..name_parts.len() - 3].join("-");
let version = name_parts[name_parts.len() - 3]; let version = name_parts[name_parts.len() - 3];
let release = name_parts[name_parts.len() - 2]; let release = name_parts[name_parts.len() - 2];
let (arch, _) = name_parts[name_parts.len() - 1].split_once(".").unwrap(); let (arch, _) = name_parts[name_parts.len() - 1].split_once('.').unwrap();
(name, version, release, arch) (name, version, release, arch)
} }
@ -48,17 +48,13 @@ impl RepoGroupManager {
// All architectures should also include the "any" architecture, except for the "any" // All architectures should also include the "any" architecture, except for the "any"
// architecture itself. // architecture itself.
let any_entries_iter = if arch != ANY_ARCH {
let repo_any_dir = self.repo_dir.join(repo).join(ANY_ARCH); let repo_any_dir = self.repo_dir.join(repo).join(ANY_ARCH);
if repo_any_dir.try_exists()? { let any_entries_iter = if arch != ANY_ARCH && repo_any_dir.try_exists()? {
Some(repo_any_dir.read_dir()?) Some(repo_any_dir.read_dir()?)
} else { } else {
None None
} }
} else {
None
}
.into_iter() .into_iter()
.flatten(); .flatten();
@ -115,8 +111,13 @@ impl RepoGroupManager {
self.add_pkg(repo, &pkg)?; self.add_pkg(repo, &pkg)?;
// After successfully adding the package, we move it to the packages directory // After successfully adding the package, we move it to the packages directory
let dest_pkg_path = self.pkg_dir.join(repo).join(pkg.file_name()); let dest_pkg_path = self
fs::create_dir_all(self.pkg_dir.join(repo))?; .pkg_dir
.join(repo)
.join(&pkg.info.arch)
.join(pkg.file_name());
fs::create_dir_all(dest_pkg_path.parent().unwrap())?;
fs::rename(&path, dest_pkg_path) fs::rename(&path, dest_pkg_path)
} }
@ -151,7 +152,7 @@ impl RepoGroupManager {
} }
pub fn remove_repo(&mut self, repo: &str) -> io::Result<bool> { pub fn remove_repo(&mut self, repo: &str) -> io::Result<bool> {
let repo_dir = self.repo_dir.join(&repo); let repo_dir = self.repo_dir.join(repo);
if !repo_dir.exists() { if !repo_dir.exists() {
Ok(false) Ok(false)
@ -172,18 +173,7 @@ impl RepoGroupManager {
} }
fs::remove_dir_all(&repo_dir)?; fs::remove_dir_all(&repo_dir)?;
fs::remove_dir_all(self.pkg_dir.join(sub_path))?;
// Remove every package archive for the architecture
for entry in self.pkg_dir.join(repo).read_dir()? {
let entry = entry?;
let file_name = entry.file_name();
let file_name = file_name.to_string_lossy();
let (_, _, _, pkg_arch) = parse_pkg_filename(&file_name);
if arch == pkg_arch {
fs::remove_file(entry.path())?;
}
}
// Removing the "any" architecture updates all other repositories // Removing the "any" architecture updates all other repositories
if arch == ANY_ARCH { if arch == ANY_ARCH {
@ -226,15 +216,15 @@ impl RepoGroupManager {
fs::remove_dir_all(entry.path())?; fs::remove_dir_all(entry.path())?;
// Also remove the old package archive // Also remove the old package archive
let repo_pkg_dir = self.pkg_dir.join(repo); let repo_arch_pkg_dir = self.pkg_dir.join(repo).join(arch);
repo_pkg_dir.read_dir()?.try_for_each(|res| { repo_arch_pkg_dir.read_dir()?.try_for_each(|res| {
res.and_then(|entry: fs::DirEntry| { res.and_then(|entry: fs::DirEntry| {
let file_name = entry.file_name(); let file_name = entry.file_name();
let file_name = file_name.to_string_lossy(); let file_name = file_name.to_string_lossy();
let (name, _, _, pkg_arch) = parse_pkg_filename(&file_name); let (name, _, _, _) = parse_pkg_filename(&file_name);
if name == pkg_name && arch == pkg_arch { if name == pkg_name {
fs::remove_file(entry.path()) fs::remove_file(entry.path())
} else { } else {
Ok(()) Ok(())

View File

@ -3,30 +3,28 @@ mod package;
pub use manager::RepoGroupManager; pub use manager::RepoGroupManager;
use axum::body::Body;
use axum::extract::{BodyStream, Path, State}; use axum::extract::{BodyStream, Path, State};
use axum::http::Request;
use axum::http::StatusCode; use axum::http::StatusCode;
use axum::routing::{delete, get_service, post}; use axum::response::IntoResponse;
use axum::routing::{delete, post};
use axum::Router; use axum::Router;
use futures::StreamExt; use futures::StreamExt;
use std::sync::Arc; use std::sync::Arc;
use tokio::{fs, io::AsyncWriteExt}; use tokio::{fs, io::AsyncWriteExt};
use tower_http::services::ServeDir; use tower::util::ServiceExt;
use tower_http::services::{ServeDir, ServeFile};
use uuid::Uuid; use uuid::Uuid;
pub fn router(global: &crate::Global) -> Router<crate::Global> { pub fn router() -> Router<crate::Global> {
// Try to serve packages by default, and try the database files instead if not found
let serve_repos = get_service(
ServeDir::new(&global.config.pkg_dir).fallback(ServeDir::new(&global.config.repo_dir)),
);
Router::new() Router::new()
.route("/:repo", post(post_package_archive).delete(delete_repo)) .route("/:repo", post(post_package_archive).delete(delete_repo))
.route("/:repo/:arch", delete(delete_arch_repo)) .route("/:repo/:arch", delete(delete_arch_repo))
.route( .route(
"/:repo/:arch/:filename", "/:repo/:arch/:filename",
delete(delete_package).get(serve_repos.clone()), delete(delete_package).get(get_file),
) )
.fallback(serve_repos)
.with_state(global.clone())
} }
async fn post_package_archive( async fn post_package_archive(
@ -44,10 +42,68 @@ async fn post_package_archive(
} }
let clone = Arc::clone(&global.repo_manager); let clone = Arc::clone(&global.repo_manager);
tokio::task::spawn_blocking(move || clone.write().unwrap().add_pkg_from_path(&repo, &path)) let path_clone = path.clone();
.await??; let res = tokio::task::spawn_blocking(move || {
clone.write().unwrap().add_pkg_from_path(&repo, &path_clone)
})
.await?;
Ok(()) // Remove the downloaded file if the adding failed
if res.is_err() {
let _ = tokio::fs::remove_file(path).await;
}
Ok(res?)
}
/// Serve the package archive files and database archives. If files are requested for an
/// architecture that does not have any explicit packages, a repository containing only "any" files
/// is returned.
async fn get_file(
State(global): State<crate::Global>,
Path((repo, arch, mut file_name)): Path<(String, String, String)>,
req: Request<Body>,
) -> crate::Result<impl IntoResponse> {
let repo_dir = global.config.repo_dir.join(&repo).join(&arch);
let repo_exists = tokio::fs::try_exists(&repo_dir).await?;
let res = if file_name.ends_with(".db") || file_name.ends_with(".db.tar.gz") {
// Append tar extension to ensure we find the file
if file_name.ends_with(".db") {
file_name.push_str(".tar.gz");
};
if repo_exists {
ServeFile::new(repo_dir.join(file_name)).oneshot(req).await
} else {
let path = global
.config
.repo_dir
.join(repo)
.join(manager::ANY_ARCH)
.join(file_name);
ServeFile::new(path).oneshot(req).await
}
} else {
let any_file = global
.config
.pkg_dir
.join(repo)
.join(manager::ANY_ARCH)
.join(file_name);
if repo_exists {
ServeDir::new(global.config.pkg_dir)
.fallback(ServeFile::new(any_file))
.oneshot(req)
.await
} else {
ServeFile::new(any_file).oneshot(req).await
}
};
Ok(res)
} }
async fn delete_repo( async fn delete_repo(