feat: added distro routes and manager

concurrent-repos
Jef Roosens 2024-05-30 09:42:28 +02:00
parent 5e1dfd22da
commit 58def483aa
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
11 changed files with 216 additions and 60 deletions

View File

@ -1,4 +1,4 @@
use crate::{repo::MetaRepoMgr, Config, Global}; use crate::{distro::MetaDistroMgr, repo::MetaRepoMgr, Config, Global};
use axum::{extract::FromRef, Router}; use axum::{extract::FromRef, Router};
use clap::Parser; use clap::Parser;
@ -42,12 +42,6 @@ pub struct Cli {
pub log: String, pub log: String,
} }
impl FromRef<Global> for Arc<RwLock<MetaRepoMgr>> {
fn from_ref(global: &Global) -> Self {
Arc::clone(&global.repo_manager)
}
}
impl Cli { impl Cli {
pub fn init_tracing(&self) { pub fn init_tracing(&self) {
tracing_subscriber::registry() tracing_subscriber::registry()
@ -81,13 +75,11 @@ impl Cli {
let config = Config { let config = Config {
data_dir: self.data_dir.clone(), data_dir: self.data_dir.clone(),
}; };
let repo_manager = MetaRepoMgr::new(&self.data_dir.join("repos"), db.clone());
let global = Global { let mgr = MetaDistroMgr::new(&self.data_dir.join("distros"), db.clone());
config, mgr.bootstrap().await?;
repo_manager: Arc::new(RwLock::new(repo_manager)),
db, let global = Global { config, mgr, db };
};
// build our application with a single route // build our application with a single route
let app = Router::new() let app = Router::new()

View File

@ -13,6 +13,15 @@ pub struct Model {
} }
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {} pub enum Relation {
#[sea_orm(has_many = "super::repo::Entity")]
Repo,
}
impl Related<super::repo::Entity> for Entity {
fn to() -> RelationDef {
Relation::Repo.def()
}
}
impl ActiveModelBehavior for ActiveModel {} impl ActiveModelBehavior for ActiveModel {}

View File

@ -8,16 +8,31 @@ use serde::{Deserialize, Serialize};
pub struct Model { pub struct Model {
#[sea_orm(primary_key)] #[sea_orm(primary_key)]
pub id: i32, pub id: i32,
pub distro_id: i32,
pub name: String, pub name: String,
pub description: Option<String>, pub description: Option<String>,
} }
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation { pub enum Relation {
#[sea_orm(
belongs_to = "super::distro::Entity",
from = "Column::DistroId",
to = "super::distro::Column::Id",
on_update = "NoAction",
on_delete = "Cascade"
)]
Distro,
#[sea_orm(has_many = "super::package::Entity")] #[sea_orm(has_many = "super::package::Entity")]
Package, Package,
} }
impl Related<super::distro::Entity> for Entity {
fn to() -> RelationDef {
Relation::Distro.def()
}
}
impl Related<super::package::Entity> for Entity { impl Related<super::package::Entity> for Entity {
fn to() -> RelationDef { fn to() -> RelationDef {
Relation::Package.def() Relation::Package.def()

View File

@ -43,8 +43,16 @@ impl MigrationTrait for Migration {
.auto_increment() .auto_increment()
.primary_key(), .primary_key(),
) )
.col(ColumnDef::new(Repo::DistroId).integer().not_null())
.col(ColumnDef::new(Repo::Name).string().not_null().unique_key()) .col(ColumnDef::new(Repo::Name).string().not_null().unique_key())
.col(ColumnDef::new(Repo::Description).string()) .col(ColumnDef::new(Repo::Description).string())
.foreign_key(
ForeignKey::create()
.name("fk-repo-distro_id")
.from(Repo::Table, Repo::DistroId)
.to(Distro::Table, Distro::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(), .to_owned(),
) )
.await?; .await?;
@ -232,6 +240,7 @@ pub enum Distro {
pub enum Repo { pub enum Repo {
Table, Table,
Id, Id,
DistroId,
Name, Name,
Description, Description,
} }

View File

@ -0,0 +1,47 @@
use crate::db::*;
use sea_orm::{sea_query::IntoCondition, *};
#[derive(Deserialize)]
pub struct Filter {
name: Option<String>,
}
impl IntoCondition for Filter {
fn into_condition(self) -> Condition {
Condition::all().add_option(
self.name
.map(|name| distro::Column::Name.like(format!("%{}%", name))),
)
}
}
pub async fn page(
conn: &DbConn,
per_page: u64,
page: u64,
filter: Filter,
) -> Result<(u64, Vec<distro::Model>)> {
let paginator = Distro::find()
.filter(filter)
.order_by_asc(distro::Column::Id)
.paginate(conn, per_page);
let repos = paginator.fetch_page(page).await?;
let total_pages = paginator.num_pages().await?;
Ok((total_pages, repos))
}
pub async fn by_id(conn: &DbConn, id: i32) -> Result<Option<distro::Model>> {
distro::Entity::find_by_id(id).one(conn).await
}
pub async fn insert(conn: &DbConn, name: &str, description: Option<&str>) -> Result<distro::Model> {
let model = distro::ActiveModel {
id: NotSet,
name: Set(String::from(name)),
description: Set(description.map(String::from)),
};
model.insert(conn).await
}

View File

@ -1,3 +1,4 @@
pub mod distro;
pub mod package; pub mod package;
pub mod repo; pub mod repo;

View File

@ -43,9 +43,15 @@ pub async fn by_name(conn: &DbConn, name: &str) -> Result<Option<repo::Model>> {
.await .await
} }
pub async fn insert(conn: &DbConn, name: &str, description: Option<&str>) -> Result<repo::Model> { pub async fn insert(
conn: &DbConn,
distro_id: i32,
name: &str,
description: Option<&str>,
) -> Result<repo::Model> {
let model = repo::ActiveModel { let model = repo::ActiveModel {
id: NotSet, id: NotSet,
distro_id: Set(distro_id),
name: Set(String::from(name)), name: Set(String::from(name)),
description: Set(description.map(String::from)), description: Set(description.map(String::from)),
}; };

View File

@ -0,0 +1,69 @@
use crate::{db, MetaRepoMgr};
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::Arc,
};
use sea_orm::{DbConn, EntityTrait};
use tokio::sync::Mutex;
#[derive(Clone)]
pub struct MetaDistroMgr {
distro_dir: PathBuf,
conn: DbConn,
distros: Arc<Mutex<HashMap<String, Arc<MetaRepoMgr>>>>,
}
impl MetaDistroMgr {
pub fn new<P: AsRef<Path>>(distro_dir: P, conn: DbConn) -> Self {
Self {
distro_dir: distro_dir.as_ref().to_path_buf(),
conn,
distros: Arc::new(Mutex::new(HashMap::new())),
}
}
/// Populate the manager with the currently known distros from the database.
pub async fn bootstrap(&self) -> crate::Result<()> {
let mut map = self.distros.lock().await;
let distros = db::Distro::find().all(&self.conn).await?;
for distro in distros {
let mgr = MetaRepoMgr::new(
self.distro_dir.join(&distro.name),
distro.id,
self.conn.clone(),
);
map.insert(distro.name, Arc::new(mgr));
}
Ok(())
}
pub async fn get_mgr(&self, distro: &str) -> Option<Arc<MetaRepoMgr>> {
let map = self.distros.lock().await;
map.get(distro).map(|mgr| Arc::clone(mgr))
}
pub async fn get_or_create_mgr(&self, distro: &str) -> crate::Result<Arc<MetaRepoMgr>> {
let mut map = self.distros.lock().await;
if let Some(mgr) = map.get(distro) {
Ok(Arc::clone(mgr))
} else {
let distro = db::query::distro::insert(&self.conn, distro, None).await?;
let mgr = Arc::new(MetaRepoMgr::new(
self.distro_dir.join(&distro.name),
distro.id,
self.conn.clone(),
));
map.insert(distro.name, Arc::clone(&mgr));
Ok(mgr)
}
}
}

View File

@ -1,6 +1,7 @@
mod api; mod api;
mod cli; mod cli;
pub mod db; pub mod db;
mod distro;
mod error; mod error;
mod repo; mod repo;
@ -19,7 +20,7 @@ pub struct Config {
#[derive(Clone)] #[derive(Clone)]
pub struct Global { pub struct Global {
config: Config, config: Config,
repo_manager: Arc<RwLock<MetaRepoMgr>>, mgr: distro::MetaDistroMgr,
db: sea_orm::DbConn, db: sea_orm::DbConn,
} }

View File

@ -12,13 +12,15 @@ pub const ANY_ARCH: &'static str = "any";
pub struct MetaRepoMgr { pub struct MetaRepoMgr {
repo_dir: PathBuf, repo_dir: PathBuf,
distro_id: i32,
conn: DbConn, conn: DbConn,
} }
impl MetaRepoMgr { impl MetaRepoMgr {
pub fn new<P: AsRef<Path>>(repo_dir: P, conn: DbConn) -> Self { pub fn new<P: AsRef<Path>>(repo_dir: P, distro_id: i32, conn: DbConn) -> Self {
MetaRepoMgr { MetaRepoMgr {
repo_dir: repo_dir.as_ref().to_path_buf(), repo_dir: repo_dir.as_ref().to_path_buf(),
distro_id,
conn, conn,
} }
} }
@ -237,7 +239,9 @@ impl MetaRepoMgr {
let repo_id = if let Some(repo_entity) = res { let repo_id = if let Some(repo_entity) = res {
repo_entity.id repo_entity.id
} else { } else {
db::query::repo::insert(&self.conn, repo, None).await?.id db::query::repo::insert(&self.conn, self.distro_id, repo, None)
.await?
.id
}; };
// If the package already exists in the database, we remove it first // If the package already exists in the database, we remove it first

View File

@ -20,19 +20,19 @@ use tower_http::{services::ServeFile, validate_request::ValidateRequestHeaderLay
pub fn router(api_key: &str) -> Router<crate::Global> { pub fn router(api_key: &str) -> Router<crate::Global> {
Router::new() Router::new()
.route( .route(
"/:repo", "/:distro/:repo",
post(post_package_archive) post(post_package_archive)
.delete(delete_repo) .delete(delete_repo)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key)), .route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
) )
.route( .route(
"/:repo/:arch", "/:distro/:repo/:arch",
delete(delete_arch_repo).route_layer(ValidateRequestHeaderLayer::bearer(api_key)), delete(delete_arch_repo).route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
) )
// Routes added after the layer do not get that layer applied, so the GET requests will not // Routes added after the layer do not get that layer applied, so the GET requests will not
// be authorized // be authorized
.route( .route(
"/:repo/:arch/:filename", "/:distro/:repo/:arch/:filename",
delete(delete_package) delete(delete_package)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key)) .route_layer(ValidateRequestHeaderLayer::bearer(api_key))
.get(get_file), .get(get_file),
@ -44,10 +44,15 @@ pub fn router(api_key: &str) -> Router<crate::Global> {
/// is returned. /// is returned.
async fn get_file( async fn get_file(
State(global): State<crate::Global>, State(global): State<crate::Global>,
Path((repo, arch, file_name)): Path<(String, String, String)>, Path((distro, repo, arch, file_name)): Path<(String, String, String, String)>,
req: Request<Body>, req: Request<Body>,
) -> crate::Result<impl IntoResponse> { ) -> crate::Result<impl IntoResponse> {
let repo_dir = global.config.data_dir.join("repos").join(&repo); let repo_dir = global
.config
.data_dir
.join("distros")
.join(&distro)
.join(&repo);
let file_name = let file_name =
if file_name == format!("{}.db", repo) || file_name == format!("{}.db.tar.gz", repo) { if file_name == format!("{}.db", repo) || file_name == format!("{}.db.tar.gz", repo) {
@ -65,16 +70,12 @@ async fn get_file(
async fn post_package_archive( async fn post_package_archive(
State(global): State<crate::Global>, State(global): State<crate::Global>,
Path(repo): Path<String>, Path((distro, repo)): Path<(String, String)>,
body: Body, body: Body,
) -> crate::Result<()> { ) -> crate::Result<()> {
let mut body = StreamReader::new(body.into_data_stream().map_err(std::io::Error::other)); let mut body = StreamReader::new(body.into_data_stream().map_err(std::io::Error::other));
let (name, version, arch) = global let mgr = global.mgr.get_or_create_mgr(&distro).await?;
.repo_manager let (name, version, arch) = mgr.add_pkg_from_reader(&mut body, &repo).await?;
.write()
.await
.add_pkg_from_reader(&mut body, &repo)
.await?;
tracing::info!( tracing::info!(
"Added '{}-{}' to repository '{}' ({})", "Added '{}-{}' to repository '{}' ({})",
@ -89,14 +90,18 @@ async fn post_package_archive(
async fn delete_repo( async fn delete_repo(
State(global): State<crate::Global>, State(global): State<crate::Global>,
Path(repo): Path<String>, Path((distro, repo)): Path<(String, String)>,
) -> crate::Result<StatusCode> { ) -> crate::Result<StatusCode> {
let repo_removed = global.repo_manager.write().await.remove_repo(&repo).await?; if let Some(mgr) = global.mgr.get_mgr(&distro).await {
let repo_removed = mgr.remove_repo(&repo).await?;
if repo_removed { if repo_removed {
tracing::info!("Removed repository '{}'", repo); tracing::info!("Removed repository '{}'", repo);
Ok(StatusCode::OK) Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
} else { } else {
Ok(StatusCode::NOT_FOUND) Ok(StatusCode::NOT_FOUND)
} }
@ -104,19 +109,18 @@ async fn delete_repo(
async fn delete_arch_repo( async fn delete_arch_repo(
State(global): State<crate::Global>, State(global): State<crate::Global>,
Path((repo, arch)): Path<(String, String)>, Path((distro, repo, arch)): Path<(String, String, String)>,
) -> crate::Result<StatusCode> { ) -> crate::Result<StatusCode> {
let repo_removed = global if let Some(mgr) = global.mgr.get_mgr(&distro).await {
.repo_manager let repo_removed = mgr.remove_repo_arch(&repo, &arch).await?;
.write()
.await
.remove_repo_arch(&repo, &arch)
.await?;
if repo_removed { if repo_removed {
tracing::info!("Removed arch '{}' from repository '{}'", arch, repo); tracing::info!("Removed arch '{}' from repository '{}'", arch, repo);
Ok(StatusCode::OK) Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
} else { } else {
Ok(StatusCode::NOT_FOUND) Ok(StatusCode::NOT_FOUND)
} }
@ -124,24 +128,23 @@ async fn delete_arch_repo(
async fn delete_package( async fn delete_package(
State(global): State<crate::Global>, State(global): State<crate::Global>,
Path((repo, arch, pkg_name)): Path<(String, String, String)>, Path((distro, repo, arch, pkg_name)): Path<(String, String, String, String)>,
) -> crate::Result<StatusCode> { ) -> crate::Result<StatusCode> {
let pkg_removed = global if let Some(mgr) = global.mgr.get_mgr(&distro).await {
.repo_manager let pkg_removed = mgr.remove_pkg(&repo, &arch, &pkg_name).await?;
.write()
.await
.remove_pkg(&repo, &arch, &pkg_name)
.await?;
if pkg_removed { if pkg_removed {
tracing::info!( tracing::info!(
"Removed package '{}' ({}) from repository '{}'", "Removed package '{}' ({}) from repository '{}'",
pkg_name, pkg_name,
arch, arch,
repo repo
); );
Ok(StatusCode::OK) Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
} else { } else {
Ok(StatusCode::NOT_FOUND) Ok(StatusCode::NOT_FOUND)
} }