refactor: restructure database query code

concurrent-repos
Jef Roosens 2024-05-21 09:16:45 +02:00
parent e1642d939b
commit 45f1abade3
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
9 changed files with 243 additions and 354 deletions

View File

@ -1,6 +1,6 @@
mod pagination; mod pagination;
use sea_orm::{sea_query::IntoCondition, *}; use sea_orm::{*};
use axum::extract::{Path, Query, State}; use axum::extract::{Path, Query, State};
use axum::routing::get; use axum::routing::get;
@ -49,10 +49,7 @@ async fn get_single_repo(
State(global): State<crate::Global>, State(global): State<crate::Global>,
Path(id): Path<i32>, Path(id): Path<i32>,
) -> crate::Result<Json<db::repo::Model>> { ) -> crate::Result<Json<db::repo::Model>> {
let repo = global let repo = db::query::repo::by_id(&global.db, id)
.db
.repo
.by_id(id)
.await? .await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?; .ok_or(axum::http::StatusCode::NOT_FOUND)?;
@ -64,15 +61,13 @@ async fn get_packages(
Query(pagination): Query<pagination::Query>, Query(pagination): Query<pagination::Query>,
Query(filter): Query<db::query::package::Filter>, Query(filter): Query<db::query::package::Filter>,
) -> crate::Result<Json<PaginatedResponse<db::package::Model>>> { ) -> crate::Result<Json<PaginatedResponse<db::package::Model>>> {
let (total_pages, pkgs) = global let (total_pages, pkgs) = db::query::package::page(
.db &global.db,
.pkg pagination.per_page.unwrap_or(25),
.page( pagination.page.unwrap_or(1) - 1,
pagination.per_page.unwrap_or(25), filter,
pagination.page.unwrap_or(1) - 1, )
filter, .await?;
)
.await?;
Ok(Json(pagination.res(total_pages, pkgs))) Ok(Json(pagination.res(total_pages, pkgs)))
} }
@ -81,10 +76,7 @@ async fn get_single_package(
State(global): State<crate::Global>, State(global): State<crate::Global>,
Path(id): Path<i32>, Path(id): Path<i32>,
) -> crate::Result<Json<crate::db::FullPackage>> { ) -> crate::Result<Json<crate::db::FullPackage>> {
let entry = global let entry = db::query::package::full(&global.db, id)
.db
.pkg
.full(id)
.await? .await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?; .ok_or(axum::http::StatusCode::NOT_FOUND)?;

View File

@ -10,6 +10,7 @@ use std::sync::{Arc, RwLock};
use tower_http::trace::TraceLayer; use tower_http::trace::TraceLayer;
use tracing::debug; use tracing::debug;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
use sea_orm_migration::MigratorTrait;
#[derive(Parser)] #[derive(Parser)]
#[command(author, version, about, long_about = None)] #[command(author, version, about, long_about = None)]
@ -75,10 +76,8 @@ impl Cli {
debug!("Connecting to database with URL {}", db_url); debug!("Connecting to database with URL {}", db_url);
let db = crate::db::RieterDb::connect(db_url).await?; let db = sea_orm::Database::connect(db_url).await?;
// let db = crate::db::init("postgres://rieter:rieter@localhost:5432/rieter") crate::db::Migrator::up(&db, None).await?;
// .await
// .unwrap();
let config = Config { let config = Config {
data_dir: self.data_dir.clone(), data_dir: self.data_dir.clone(),

View File

@ -1,61 +0,0 @@
use super::RieterDb;
use sea_orm::{DbBackend, DbErr, ExecResult, QueryResult, Statement};
use std::{future::Future, pin::Pin};
// Allows RieterDb objects to be passed to ORM functions
impl sea_orm::ConnectionTrait for RieterDb {
fn get_database_backend(&self) -> DbBackend {
self.conn.get_database_backend()
}
fn execute<'life0, 'async_trait>(
&'life0 self,
stmt: Statement,
) -> Pin<Box<dyn Future<Output = std::result::Result<ExecResult, DbErr>> + Send + 'async_trait>>
where
Self: 'async_trait,
'life0: 'async_trait,
{
self.conn.execute(stmt)
}
fn execute_unprepared<'life0, 'life1, 'async_trait>(
&'life0 self,
sql: &'life1 str,
) -> Pin<Box<dyn Future<Output = std::result::Result<ExecResult, DbErr>> + Send + 'async_trait>>
where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
{
self.conn.execute_unprepared(sql)
}
fn query_one<'life0, 'async_trait>(
&'life0 self,
stmt: Statement,
) -> Pin<
Box<
dyn Future<Output = std::result::Result<Option<QueryResult>, DbErr>>
+ Send
+ 'async_trait,
>,
>
where
Self: 'async_trait,
'life0: 'async_trait,
{
self.conn.query_one(stmt)
}
fn query_all<'life0, 'async_trait>(
&'life0 self,
stmt: Statement,
) -> Pin<
Box<
dyn Future<Output = std::result::Result<Vec<QueryResult>, DbErr>> + Send + 'async_trait,
>,
>
where
Self: 'async_trait,
'life0: 'async_trait,
{
self.conn.query_all(stmt)
}
}

View File

@ -1,14 +1,14 @@
mod conn;
pub mod entities; pub mod entities;
mod migrator; mod migrator;
pub mod query; pub mod query;
use sea_orm::{ConnectOptions, Database, DatabaseConnection, DeriveActiveEnum, EnumIter}; use sea_orm::{DeriveActiveEnum, EnumIter};
use sea_orm_migration::MigratorTrait;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
pub use entities::{prelude::*, *}; pub use entities::{prelude::*, *};
use migrator::Migrator; pub use migrator::Migrator;
type Result<T> = std::result::Result<T, sea_orm::DbErr>; type Result<T> = std::result::Result<T, sea_orm::DbErr>;
@ -41,24 +41,3 @@ pub struct FullPackage {
related: Vec<(PackageRelatedEnum, String)>, related: Vec<(PackageRelatedEnum, String)>,
files: Vec<String>, files: Vec<String>,
} }
#[derive(Clone, Debug)]
pub struct RieterDb {
conn: DatabaseConnection,
pub pkg: query::PackageQuery,
pub repo: query::RepoQuery,
}
impl RieterDb {
pub async fn connect<C: Into<ConnectOptions>>(opt: C) -> Result<Self> {
let db = Database::connect(opt).await?;
Migrator::up(&db, None).await?;
Ok(Self {
conn: db.clone(),
pkg: query::PackageQuery::new(db.clone()),
repo: query::RepoQuery::new(db.clone()),
})
}
}

View File

@ -1,7 +1,4 @@
pub mod package; pub mod package;
pub mod repo; pub mod repo;
pub use package::PackageQuery;
pub use repo::RepoQuery;
type Result<T> = std::result::Result<T, sea_orm::DbErr>; type Result<T> = std::result::Result<T, sea_orm::DbErr>;

View File

@ -3,11 +3,6 @@ use serde::Deserialize;
use crate::db::*; use crate::db::*;
#[derive(Clone, Debug)]
pub struct PackageQuery {
conn: DatabaseConnection,
}
#[derive(Deserialize)] #[derive(Deserialize)]
pub struct Filter { pub struct Filter {
repo: Option<i32>, repo: Option<i32>,
@ -27,189 +22,183 @@ impl IntoCondition for Filter {
} }
} }
impl PackageQuery { pub async fn page(
pub fn new(conn: DatabaseConnection) -> Self { conn: &DbConn,
Self { conn } per_page: u64,
page: u64,
filter: Filter,
) -> super::Result<(u64, Vec<package::Model>)> {
let paginator = Package::find()
.filter(filter)
.order_by_asc(package::Column::Id)
.paginate(conn, per_page);
let packages = paginator.fetch_page(page).await?;
let total_pages = paginator.num_pages().await?;
Ok((total_pages, packages))
}
pub async fn by_id(conn: &DbConn, id: i32) -> Result<Option<package::Model>> {
package::Entity::find_by_id(id).one(conn).await
}
pub async fn by_fields(
conn: &DbConn,
repo_id: i32,
name: &str,
version: Option<&str>,
arch: &str,
) -> Result<Option<package::Model>> {
let mut query = Package::find()
.filter(package::Column::RepoId.eq(repo_id))
.filter(package::Column::Name.eq(name))
.filter(package::Column::Arch.eq(arch));
if let Some(version) = version {
query = query.filter(package::Column::Version.eq(version));
} }
pub async fn page( query.one(conn).await
&self, }
per_page: u64,
page: u64,
filter: Filter,
) -> super::Result<(u64, Vec<package::Model>)> {
let paginator = Package::find()
.filter(filter)
.order_by_asc(package::Column::Id)
.paginate(&self.conn, per_page);
let packages = paginator.fetch_page(page).await?;
let total_pages = paginator.num_pages().await?;
Ok((total_pages, packages)) pub async fn delete_with_arch(conn: &DbConn, repo_id: i32, arch: &str) -> Result<DeleteResult> {
} Package::delete_many()
.filter(package::Column::RepoId.eq(repo_id))
.filter(package::Column::Arch.eq(arch))
.exec(conn)
.await
}
pub async fn by_id(&self, id: i32) -> Result<Option<package::Model>> { pub async fn insert(conn: &DbConn, repo_id: i32, pkg: crate::repo::package::Package) -> Result<()> {
package::Entity::find_by_id(id).one(&self.conn).await let info = pkg.info;
}
pub async fn by_fields( let model = package::ActiveModel {
&self, id: NotSet,
repo_id: i32, repo_id: Set(repo_id),
name: &str, base: Set(info.base),
version: Option<&str>, name: Set(info.name),
arch: &str, version: Set(info.version),
) -> Result<Option<package::Model>> { arch: Set(info.arch),
let mut query = Package::find() size: Set(info.size),
.filter(package::Column::RepoId.eq(repo_id)) c_size: Set(info.csize),
.filter(package::Column::Name.eq(name)) description: Set(info.description),
.filter(package::Column::Arch.eq(arch)); url: Set(info.url),
build_date: Set(info.build_date.to_string()),
packager: Set(info.packager),
pgp_sig: Set(info.pgpsig),
pgp_sig_size: Set(info.pgpsigsize),
sha256_sum: Set(info.sha256sum),
};
if let Some(version) = version { let pkg_entry = model.insert(conn).await?;
query = query.filter(package::Column::Version.eq(version));
}
query.one(&self.conn).await // Insert all the related tables
} PackageLicense::insert_many(info.licenses.iter().map(|s| package_license::ActiveModel {
package_id: Set(pkg_entry.id),
name: Set(s.to_string()),
}))
.on_empty_do_nothing()
.exec(conn)
.await?;
pub async fn delete_with_arch(&self, repo_id: i32, arch: &str) -> Result<DeleteResult> { PackageGroup::insert_many(info.groups.iter().map(|s| package_group::ActiveModel {
Package::delete_many() package_id: Set(pkg_entry.id),
.filter(package::Column::RepoId.eq(repo_id)) name: Set(s.to_string()),
.filter(package::Column::Arch.eq(arch)) }))
.exec(&self.conn) .on_empty_do_nothing()
.await .exec(conn)
} .await?;
pub async fn insert(&self, repo_id: i32, pkg: crate::repo::package::Package) -> Result<()> { let related = info
let info = pkg.info; .conflicts
.iter()
.map(|s| (PackageRelatedEnum::Conflicts, s))
.chain(
info.replaces
.iter()
.map(|s| (PackageRelatedEnum::Replaces, s)),
)
.chain(
info.provides
.iter()
.map(|s| (PackageRelatedEnum::Provides, s)),
)
.chain(info.depends.iter().map(|s| (PackageRelatedEnum::Depend, s)))
.chain(
info.makedepends
.iter()
.map(|s| (PackageRelatedEnum::Depend, s)),
)
.chain(
info.checkdepends
.iter()
.map(|s| (PackageRelatedEnum::Checkdepend, s)),
)
.chain(
info.optdepends
.iter()
.map(|s| (PackageRelatedEnum::Optdepend, s)),
);
let model = package::ActiveModel { PackageRelated::insert_many(related.map(|(t, s)| package_related::ActiveModel {
id: NotSet, package_id: Set(pkg_entry.id),
repo_id: Set(repo_id), r#type: Set(t),
base: Set(info.base), name: Set(s.to_string()),
name: Set(info.name), }))
version: Set(info.version), .on_empty_do_nothing()
arch: Set(info.arch), .exec(conn)
size: Set(info.size), .await?;
c_size: Set(info.csize),
description: Set(info.description),
url: Set(info.url),
build_date: Set(info.build_date.to_string()),
packager: Set(info.packager),
pgp_sig: Set(info.pgpsig),
pgp_sig_size: Set(info.pgpsigsize),
sha256_sum: Set(info.sha256sum),
};
let pkg_entry = model.insert(&self.conn).await?; PackageFile::insert_many(pkg.files.iter().map(|s| package_file::ActiveModel {
package_id: Set(pkg_entry.id),
path: Set(s.display().to_string()),
}))
.on_empty_do_nothing()
.exec(conn)
.await?;
// Insert all the related tables Ok(())
PackageLicense::insert_many(info.licenses.iter().map(|s| package_license::ActiveModel { }
package_id: Set(pkg_entry.id),
name: Set(s.to_string()), pub async fn full(conn: &DbConn, id: i32) -> Result<Option<FullPackage>> {
if let Some(entry) = by_id(conn, id).await? {
let licenses = entry
.find_related(PackageLicense)
.all(conn)
.await?
.into_iter()
.map(|e| e.name)
.collect();
let groups = entry
.find_related(PackageGroup)
.all(conn)
.await?
.into_iter()
.map(|e| e.name)
.collect();
let related = entry
.find_related(PackageRelated)
.all(conn)
.await?
.into_iter()
.map(|e| (e.r#type, e.name))
.collect();
let files = entry
.find_related(PackageFile)
.all(conn)
.await?
.into_iter()
.map(|e| e.path)
.collect();
Ok(Some(FullPackage {
entry,
licenses,
groups,
related,
files,
})) }))
.on_empty_do_nothing() } else {
.exec(&self.conn) Ok(None)
.await?;
PackageGroup::insert_many(info.groups.iter().map(|s| package_group::ActiveModel {
package_id: Set(pkg_entry.id),
name: Set(s.to_string()),
}))
.on_empty_do_nothing()
.exec(&self.conn)
.await?;
let related = info
.conflicts
.iter()
.map(|s| (PackageRelatedEnum::Conflicts, s))
.chain(
info.replaces
.iter()
.map(|s| (PackageRelatedEnum::Replaces, s)),
)
.chain(
info.provides
.iter()
.map(|s| (PackageRelatedEnum::Provides, s)),
)
.chain(info.depends.iter().map(|s| (PackageRelatedEnum::Depend, s)))
.chain(
info.makedepends
.iter()
.map(|s| (PackageRelatedEnum::Depend, s)),
)
.chain(
info.checkdepends
.iter()
.map(|s| (PackageRelatedEnum::Checkdepend, s)),
)
.chain(
info.optdepends
.iter()
.map(|s| (PackageRelatedEnum::Optdepend, s)),
);
PackageRelated::insert_many(related.map(|(t, s)| package_related::ActiveModel {
package_id: Set(pkg_entry.id),
r#type: Set(t),
name: Set(s.to_string()),
}))
.on_empty_do_nothing()
.exec(&self.conn)
.await?;
PackageFile::insert_many(pkg.files.iter().map(|s| package_file::ActiveModel {
package_id: Set(pkg_entry.id),
path: Set(s.display().to_string()),
}))
.on_empty_do_nothing()
.exec(&self.conn)
.await?;
Ok(())
}
pub async fn full(&self, id: i32) -> Result<Option<FullPackage>> {
if let Some(entry) = self.by_id(id).await? {
let licenses = entry
.find_related(PackageLicense)
.all(&self.conn)
.await?
.into_iter()
.map(|e| e.name)
.collect();
let groups = entry
.find_related(PackageGroup)
.all(&self.conn)
.await?
.into_iter()
.map(|e| e.name)
.collect();
let related = entry
.find_related(PackageRelated)
.all(&self.conn)
.await?
.into_iter()
.map(|e| (e.r#type, e.name))
.collect();
let files = entry
.find_related(PackageFile)
.all(&self.conn)
.await?
.into_iter()
.map(|e| e.path)
.collect();
Ok(Some(FullPackage {
entry,
licenses,
groups,
related,
files,
}))
} else {
Ok(None)
}
} }
} }

View File

@ -2,11 +2,6 @@ use sea_orm::{sea_query::IntoCondition, *};
use crate::db::*; use crate::db::*;
#[derive(Clone, Debug)]
pub struct RepoQuery {
conn: DatabaseConnection,
}
#[derive(Deserialize)] #[derive(Deserialize)]
pub struct Filter { pub struct Filter {
name: Option<String>, name: Option<String>,
@ -18,43 +13,37 @@ impl IntoCondition for Filter {
} }
} }
impl RepoQuery { pub async fn page(conn: &DbConn, per_page: u64, page: u64) -> Result<(u64, Vec<repo::Model>)> {
pub fn new(conn: DatabaseConnection) -> Self { let paginator = Repo::find()
Self { conn } .order_by_asc(repo::Column::Id)
} .paginate(conn, per_page);
let repos = paginator.fetch_page(page).await?;
let total_pages = paginator.num_pages().await?;
pub async fn page(&self, per_page: u64, page: u64) -> Result<(u64, Vec<repo::Model>)> { Ok((total_pages, repos))
let paginator = Repo::find() }
.order_by_asc(repo::Column::Id)
.paginate(&self.conn, per_page); pub async fn by_id(conn: &DbConn, id: i32) -> Result<Option<repo::Model>> {
let repos = paginator.fetch_page(page).await?; repo::Entity::find_by_id(id).one(conn).await
let total_pages = paginator.num_pages().await?; }
Ok((total_pages, repos)) pub async fn by_name(conn: &DbConn, name: &str) -> Result<Option<repo::Model>> {
} Repo::find()
.filter(repo::Column::Name.eq(name))
pub async fn by_id(&self, id: i32) -> Result<Option<repo::Model>> { .one(conn)
repo::Entity::find_by_id(id).one(&self.conn).await .await
} }
pub async fn by_name(&self, name: &str) -> Result<Option<repo::Model>> { pub async fn insert(
Repo::find() conn: &DbConn,
.filter(repo::Column::Name.eq(name)) name: &str,
.one(&self.conn) description: Option<&str>,
.await ) -> Result<InsertResult<repo::ActiveModel>> {
} let model = repo::ActiveModel {
id: NotSet,
pub async fn insert( name: Set(String::from(name)),
&self, description: Set(description.map(String::from)),
name: &str, };
description: Option<&str>,
) -> Result<InsertResult<repo::ActiveModel>> { Repo::insert(model).exec(conn).await
let model = repo::ActiveModel {
id: NotSet,
name: Set(String::from(name)),
description: Set(description.map(String::from)),
};
Repo::insert(model).exec(&self.conn).await
}
} }

View File

@ -22,7 +22,7 @@ pub struct Config {
pub struct Global { pub struct Global {
config: Config, config: Config,
repo_manager: Arc<RwLock<RepoGroupManager>>, repo_manager: Arc<RwLock<RepoGroupManager>>,
db: db::RieterDb, db: sea_orm::DbConn,
} }
#[tokio::main] #[tokio::main]

View File

@ -5,7 +5,7 @@ pub use manager::RepoGroupManager;
use std::path::PathBuf; use std::path::PathBuf;
use axum::body::{Body, BodyDataStream}; use axum::body::{Body};
use axum::extract::{Path, State}; use axum::extract::{Path, State};
use axum::http::Request; use axum::http::Request;
use axum::http::StatusCode; use axum::http::StatusCode;
@ -21,6 +21,8 @@ use tower_http::services::{ServeDir, ServeFile};
use tower_http::validate_request::ValidateRequestHeaderLayer; use tower_http::validate_request::ValidateRequestHeaderLayer;
use uuid::Uuid; use uuid::Uuid;
use crate::db;
const DB_FILE_EXTS: [&str; 4] = [".db", ".files", ".db.tar.gz", ".files.tar.gz"]; const DB_FILE_EXTS: [&str; 4] = [".db", ".files", ".db.tar.gz", ".files.tar.gz"];
pub fn router(api_key: &str) -> Router<crate::Global> { pub fn router(api_key: &str) -> Router<crate::Global> {
@ -128,26 +130,31 @@ async fn post_package_archive(
tracing::info!("Added '{}' to repository '{}'", pkg.file_name(), repo); tracing::info!("Added '{}' to repository '{}'", pkg.file_name(), repo);
// Query the repo for its ID, or create it if it does not already exist // Query the repo for its ID, or create it if it does not already exist
let res = global.db.repo.by_name(&repo).await?; let res = db::query::repo::by_name(&global.db, &repo).await?;
let repo_id = if let Some(repo_entity) = res { let repo_id = if let Some(repo_entity) = res {
repo_entity.id repo_entity.id
} else { } else {
global.db.repo.insert(&repo, None).await?.last_insert_id db::query::repo::insert(&global.db, &repo, None)
.await?
.last_insert_id
}; };
// If the package already exists in the database, we remove it first // If the package already exists in the database, we remove it first
let res = global let res = db::query::package::by_fields(
.db &global.db,
.pkg repo_id,
.by_fields(repo_id, &pkg.info.name, None, &pkg.info.arch) &pkg.info.name,
.await?; None,
&pkg.info.arch,
)
.await?;
if let Some(entry) = res { if let Some(entry) = res {
entry.delete(&global.db).await?; entry.delete(&global.db).await?;
} }
global.db.pkg.insert(repo_id, pkg).await?; db::query::package::insert(&global.db, repo_id, pkg).await?;
Ok(()) Ok(())
} }
@ -172,7 +179,7 @@ async fn delete_repo(
.await??; .await??;
if repo_removed { if repo_removed {
let res = global.db.repo.by_name(&repo).await?; let res = db::query::repo::by_name(&global.db, &repo).await?;
if let Some(repo_entry) = res { if let Some(repo_entry) = res {
repo_entry.delete(&global.db).await?; repo_entry.delete(&global.db).await?;
@ -203,10 +210,10 @@ async fn delete_arch_repo(
.await??; .await??;
if repo_removed { if repo_removed {
let res = global.db.repo.by_name(&repo).await?; let res = db::query::repo::by_name(&global.db, &repo).await?;
if let Some(repo_entry) = res { if let Some(repo_entry) = res {
global.db.pkg.delete_with_arch(repo_entry.id, &arch).await?; db::query::package::delete_with_arch(&global.db, repo_entry.id, &arch).await?;
} }
tracing::info!("Removed architecture '{}' from repository '{}'", arch, repo); tracing::info!("Removed architecture '{}' from repository '{}'", arch, repo);
@ -229,19 +236,17 @@ async fn delete_package(
.await??; .await??;
if let Some((name, version, release, arch)) = res { if let Some((name, version, release, arch)) = res {
let res = global.db.repo.by_name(&repo).await?; let res = db::query::repo::by_name(&global.db, &repo).await?;
if let Some(repo_entry) = res { if let Some(repo_entry) = res {
let res = global let res = db::query::package::by_fields(
.db &global.db,
.pkg repo_entry.id,
.by_fields( &name,
repo_entry.id, Some(&format!("{}-{}", version, release)),
&name, &arch,
Some(&format!("{}-{}", version, release)), )
&arch, .await?;
)
.await?;
if let Some(entry) = res { if let Some(entry) = res {
entry.delete(&global.db).await?; entry.delete(&global.db).await?;