refactor: restructure database query code

concurrent-repos
Jef Roosens 2024-05-21 09:16:45 +02:00
parent e1642d939b
commit 45f1abade3
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
9 changed files with 243 additions and 354 deletions

View File

@ -1,6 +1,6 @@
mod pagination;
use sea_orm::{sea_query::IntoCondition, *};
use sea_orm::{*};
use axum::extract::{Path, Query, State};
use axum::routing::get;
@ -49,10 +49,7 @@ async fn get_single_repo(
State(global): State<crate::Global>,
Path(id): Path<i32>,
) -> crate::Result<Json<db::repo::Model>> {
let repo = global
.db
.repo
.by_id(id)
let repo = db::query::repo::by_id(&global.db, id)
.await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
@ -64,15 +61,13 @@ async fn get_packages(
Query(pagination): Query<pagination::Query>,
Query(filter): Query<db::query::package::Filter>,
) -> crate::Result<Json<PaginatedResponse<db::package::Model>>> {
let (total_pages, pkgs) = global
.db
.pkg
.page(
pagination.per_page.unwrap_or(25),
pagination.page.unwrap_or(1) - 1,
filter,
)
.await?;
let (total_pages, pkgs) = db::query::package::page(
&global.db,
pagination.per_page.unwrap_or(25),
pagination.page.unwrap_or(1) - 1,
filter,
)
.await?;
Ok(Json(pagination.res(total_pages, pkgs)))
}
@ -81,10 +76,7 @@ async fn get_single_package(
State(global): State<crate::Global>,
Path(id): Path<i32>,
) -> crate::Result<Json<crate::db::FullPackage>> {
let entry = global
.db
.pkg
.full(id)
let entry = db::query::package::full(&global.db, id)
.await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?;

View File

@ -10,6 +10,7 @@ use std::sync::{Arc, RwLock};
use tower_http::trace::TraceLayer;
use tracing::debug;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
use sea_orm_migration::MigratorTrait;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
@ -75,10 +76,8 @@ impl Cli {
debug!("Connecting to database with URL {}", db_url);
let db = crate::db::RieterDb::connect(db_url).await?;
// let db = crate::db::init("postgres://rieter:rieter@localhost:5432/rieter")
// .await
// .unwrap();
let db = sea_orm::Database::connect(db_url).await?;
crate::db::Migrator::up(&db, None).await?;
let config = Config {
data_dir: self.data_dir.clone(),

View File

@ -1,61 +0,0 @@
use super::RieterDb;
use sea_orm::{DbBackend, DbErr, ExecResult, QueryResult, Statement};
use std::{future::Future, pin::Pin};
// Allows RieterDb objects to be passed to ORM functions
impl sea_orm::ConnectionTrait for RieterDb {
fn get_database_backend(&self) -> DbBackend {
self.conn.get_database_backend()
}
fn execute<'life0, 'async_trait>(
&'life0 self,
stmt: Statement,
) -> Pin<Box<dyn Future<Output = std::result::Result<ExecResult, DbErr>> + Send + 'async_trait>>
where
Self: 'async_trait,
'life0: 'async_trait,
{
self.conn.execute(stmt)
}
fn execute_unprepared<'life0, 'life1, 'async_trait>(
&'life0 self,
sql: &'life1 str,
) -> Pin<Box<dyn Future<Output = std::result::Result<ExecResult, DbErr>> + Send + 'async_trait>>
where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
{
self.conn.execute_unprepared(sql)
}
fn query_one<'life0, 'async_trait>(
&'life0 self,
stmt: Statement,
) -> Pin<
Box<
dyn Future<Output = std::result::Result<Option<QueryResult>, DbErr>>
+ Send
+ 'async_trait,
>,
>
where
Self: 'async_trait,
'life0: 'async_trait,
{
self.conn.query_one(stmt)
}
fn query_all<'life0, 'async_trait>(
&'life0 self,
stmt: Statement,
) -> Pin<
Box<
dyn Future<Output = std::result::Result<Vec<QueryResult>, DbErr>> + Send + 'async_trait,
>,
>
where
Self: 'async_trait,
'life0: 'async_trait,
{
self.conn.query_all(stmt)
}
}

View File

@ -1,14 +1,14 @@
mod conn;
pub mod entities;
mod migrator;
pub mod query;
use sea_orm::{ConnectOptions, Database, DatabaseConnection, DeriveActiveEnum, EnumIter};
use sea_orm_migration::MigratorTrait;
use sea_orm::{DeriveActiveEnum, EnumIter};
use serde::{Deserialize, Serialize};
pub use entities::{prelude::*, *};
use migrator::Migrator;
pub use migrator::Migrator;
type Result<T> = std::result::Result<T, sea_orm::DbErr>;
@ -41,24 +41,3 @@ pub struct FullPackage {
related: Vec<(PackageRelatedEnum, String)>,
files: Vec<String>,
}
#[derive(Clone, Debug)]
pub struct RieterDb {
conn: DatabaseConnection,
pub pkg: query::PackageQuery,
pub repo: query::RepoQuery,
}
impl RieterDb {
pub async fn connect<C: Into<ConnectOptions>>(opt: C) -> Result<Self> {
let db = Database::connect(opt).await?;
Migrator::up(&db, None).await?;
Ok(Self {
conn: db.clone(),
pkg: query::PackageQuery::new(db.clone()),
repo: query::RepoQuery::new(db.clone()),
})
}
}

View File

@ -1,7 +1,4 @@
pub mod package;
pub mod repo;
pub use package::PackageQuery;
pub use repo::RepoQuery;
type Result<T> = std::result::Result<T, sea_orm::DbErr>;

View File

@ -3,11 +3,6 @@ use serde::Deserialize;
use crate::db::*;
#[derive(Clone, Debug)]
pub struct PackageQuery {
conn: DatabaseConnection,
}
#[derive(Deserialize)]
pub struct Filter {
repo: Option<i32>,
@ -27,189 +22,183 @@ impl IntoCondition for Filter {
}
}
impl PackageQuery {
pub fn new(conn: DatabaseConnection) -> Self {
Self { conn }
pub async fn page(
conn: &DbConn,
per_page: u64,
page: u64,
filter: Filter,
) -> super::Result<(u64, Vec<package::Model>)> {
let paginator = Package::find()
.filter(filter)
.order_by_asc(package::Column::Id)
.paginate(conn, per_page);
let packages = paginator.fetch_page(page).await?;
let total_pages = paginator.num_pages().await?;
Ok((total_pages, packages))
}
pub async fn by_id(conn: &DbConn, id: i32) -> Result<Option<package::Model>> {
package::Entity::find_by_id(id).one(conn).await
}
pub async fn by_fields(
conn: &DbConn,
repo_id: i32,
name: &str,
version: Option<&str>,
arch: &str,
) -> Result<Option<package::Model>> {
let mut query = Package::find()
.filter(package::Column::RepoId.eq(repo_id))
.filter(package::Column::Name.eq(name))
.filter(package::Column::Arch.eq(arch));
if let Some(version) = version {
query = query.filter(package::Column::Version.eq(version));
}
pub async fn page(
&self,
per_page: u64,
page: u64,
filter: Filter,
) -> super::Result<(u64, Vec<package::Model>)> {
let paginator = Package::find()
.filter(filter)
.order_by_asc(package::Column::Id)
.paginate(&self.conn, per_page);
let packages = paginator.fetch_page(page).await?;
let total_pages = paginator.num_pages().await?;
query.one(conn).await
}
Ok((total_pages, packages))
}
pub async fn delete_with_arch(conn: &DbConn, repo_id: i32, arch: &str) -> Result<DeleteResult> {
Package::delete_many()
.filter(package::Column::RepoId.eq(repo_id))
.filter(package::Column::Arch.eq(arch))
.exec(conn)
.await
}
pub async fn by_id(&self, id: i32) -> Result<Option<package::Model>> {
package::Entity::find_by_id(id).one(&self.conn).await
}
pub async fn insert(conn: &DbConn, repo_id: i32, pkg: crate::repo::package::Package) -> Result<()> {
let info = pkg.info;
pub async fn by_fields(
&self,
repo_id: i32,
name: &str,
version: Option<&str>,
arch: &str,
) -> Result<Option<package::Model>> {
let mut query = Package::find()
.filter(package::Column::RepoId.eq(repo_id))
.filter(package::Column::Name.eq(name))
.filter(package::Column::Arch.eq(arch));
let model = package::ActiveModel {
id: NotSet,
repo_id: Set(repo_id),
base: Set(info.base),
name: Set(info.name),
version: Set(info.version),
arch: Set(info.arch),
size: Set(info.size),
c_size: Set(info.csize),
description: Set(info.description),
url: Set(info.url),
build_date: Set(info.build_date.to_string()),
packager: Set(info.packager),
pgp_sig: Set(info.pgpsig),
pgp_sig_size: Set(info.pgpsigsize),
sha256_sum: Set(info.sha256sum),
};
if let Some(version) = version {
query = query.filter(package::Column::Version.eq(version));
}
let pkg_entry = model.insert(conn).await?;
query.one(&self.conn).await
}
// Insert all the related tables
PackageLicense::insert_many(info.licenses.iter().map(|s| package_license::ActiveModel {
package_id: Set(pkg_entry.id),
name: Set(s.to_string()),
}))
.on_empty_do_nothing()
.exec(conn)
.await?;
pub async fn delete_with_arch(&self, repo_id: i32, arch: &str) -> Result<DeleteResult> {
Package::delete_many()
.filter(package::Column::RepoId.eq(repo_id))
.filter(package::Column::Arch.eq(arch))
.exec(&self.conn)
.await
}
PackageGroup::insert_many(info.groups.iter().map(|s| package_group::ActiveModel {
package_id: Set(pkg_entry.id),
name: Set(s.to_string()),
}))
.on_empty_do_nothing()
.exec(conn)
.await?;
pub async fn insert(&self, repo_id: i32, pkg: crate::repo::package::Package) -> Result<()> {
let info = pkg.info;
let related = info
.conflicts
.iter()
.map(|s| (PackageRelatedEnum::Conflicts, s))
.chain(
info.replaces
.iter()
.map(|s| (PackageRelatedEnum::Replaces, s)),
)
.chain(
info.provides
.iter()
.map(|s| (PackageRelatedEnum::Provides, s)),
)
.chain(info.depends.iter().map(|s| (PackageRelatedEnum::Depend, s)))
.chain(
info.makedepends
.iter()
.map(|s| (PackageRelatedEnum::Depend, s)),
)
.chain(
info.checkdepends
.iter()
.map(|s| (PackageRelatedEnum::Checkdepend, s)),
)
.chain(
info.optdepends
.iter()
.map(|s| (PackageRelatedEnum::Optdepend, s)),
);
let model = package::ActiveModel {
id: NotSet,
repo_id: Set(repo_id),
base: Set(info.base),
name: Set(info.name),
version: Set(info.version),
arch: Set(info.arch),
size: Set(info.size),
c_size: Set(info.csize),
description: Set(info.description),
url: Set(info.url),
build_date: Set(info.build_date.to_string()),
packager: Set(info.packager),
pgp_sig: Set(info.pgpsig),
pgp_sig_size: Set(info.pgpsigsize),
sha256_sum: Set(info.sha256sum),
};
PackageRelated::insert_many(related.map(|(t, s)| package_related::ActiveModel {
package_id: Set(pkg_entry.id),
r#type: Set(t),
name: Set(s.to_string()),
}))
.on_empty_do_nothing()
.exec(conn)
.await?;
let pkg_entry = model.insert(&self.conn).await?;
PackageFile::insert_many(pkg.files.iter().map(|s| package_file::ActiveModel {
package_id: Set(pkg_entry.id),
path: Set(s.display().to_string()),
}))
.on_empty_do_nothing()
.exec(conn)
.await?;
// Insert all the related tables
PackageLicense::insert_many(info.licenses.iter().map(|s| package_license::ActiveModel {
package_id: Set(pkg_entry.id),
name: Set(s.to_string()),
Ok(())
}
pub async fn full(conn: &DbConn, id: i32) -> Result<Option<FullPackage>> {
if let Some(entry) = by_id(conn, id).await? {
let licenses = entry
.find_related(PackageLicense)
.all(conn)
.await?
.into_iter()
.map(|e| e.name)
.collect();
let groups = entry
.find_related(PackageGroup)
.all(conn)
.await?
.into_iter()
.map(|e| e.name)
.collect();
let related = entry
.find_related(PackageRelated)
.all(conn)
.await?
.into_iter()
.map(|e| (e.r#type, e.name))
.collect();
let files = entry
.find_related(PackageFile)
.all(conn)
.await?
.into_iter()
.map(|e| e.path)
.collect();
Ok(Some(FullPackage {
entry,
licenses,
groups,
related,
files,
}))
.on_empty_do_nothing()
.exec(&self.conn)
.await?;
PackageGroup::insert_many(info.groups.iter().map(|s| package_group::ActiveModel {
package_id: Set(pkg_entry.id),
name: Set(s.to_string()),
}))
.on_empty_do_nothing()
.exec(&self.conn)
.await?;
let related = info
.conflicts
.iter()
.map(|s| (PackageRelatedEnum::Conflicts, s))
.chain(
info.replaces
.iter()
.map(|s| (PackageRelatedEnum::Replaces, s)),
)
.chain(
info.provides
.iter()
.map(|s| (PackageRelatedEnum::Provides, s)),
)
.chain(info.depends.iter().map(|s| (PackageRelatedEnum::Depend, s)))
.chain(
info.makedepends
.iter()
.map(|s| (PackageRelatedEnum::Depend, s)),
)
.chain(
info.checkdepends
.iter()
.map(|s| (PackageRelatedEnum::Checkdepend, s)),
)
.chain(
info.optdepends
.iter()
.map(|s| (PackageRelatedEnum::Optdepend, s)),
);
PackageRelated::insert_many(related.map(|(t, s)| package_related::ActiveModel {
package_id: Set(pkg_entry.id),
r#type: Set(t),
name: Set(s.to_string()),
}))
.on_empty_do_nothing()
.exec(&self.conn)
.await?;
PackageFile::insert_many(pkg.files.iter().map(|s| package_file::ActiveModel {
package_id: Set(pkg_entry.id),
path: Set(s.display().to_string()),
}))
.on_empty_do_nothing()
.exec(&self.conn)
.await?;
Ok(())
}
pub async fn full(&self, id: i32) -> Result<Option<FullPackage>> {
if let Some(entry) = self.by_id(id).await? {
let licenses = entry
.find_related(PackageLicense)
.all(&self.conn)
.await?
.into_iter()
.map(|e| e.name)
.collect();
let groups = entry
.find_related(PackageGroup)
.all(&self.conn)
.await?
.into_iter()
.map(|e| e.name)
.collect();
let related = entry
.find_related(PackageRelated)
.all(&self.conn)
.await?
.into_iter()
.map(|e| (e.r#type, e.name))
.collect();
let files = entry
.find_related(PackageFile)
.all(&self.conn)
.await?
.into_iter()
.map(|e| e.path)
.collect();
Ok(Some(FullPackage {
entry,
licenses,
groups,
related,
files,
}))
} else {
Ok(None)
}
} else {
Ok(None)
}
}

View File

@ -2,11 +2,6 @@ use sea_orm::{sea_query::IntoCondition, *};
use crate::db::*;
#[derive(Clone, Debug)]
pub struct RepoQuery {
conn: DatabaseConnection,
}
#[derive(Deserialize)]
pub struct Filter {
name: Option<String>,
@ -18,43 +13,37 @@ impl IntoCondition for Filter {
}
}
impl RepoQuery {
pub fn new(conn: DatabaseConnection) -> Self {
Self { conn }
}
pub async fn page(conn: &DbConn, per_page: u64, page: u64) -> Result<(u64, Vec<repo::Model>)> {
let paginator = Repo::find()
.order_by_asc(repo::Column::Id)
.paginate(conn, per_page);
let repos = paginator.fetch_page(page).await?;
let total_pages = paginator.num_pages().await?;
pub async fn page(&self, per_page: u64, page: u64) -> Result<(u64, Vec<repo::Model>)> {
let paginator = Repo::find()
.order_by_asc(repo::Column::Id)
.paginate(&self.conn, per_page);
let repos = paginator.fetch_page(page).await?;
let total_pages = paginator.num_pages().await?;
Ok((total_pages, repos))
}
pub async fn by_id(&self, id: i32) -> Result<Option<repo::Model>> {
repo::Entity::find_by_id(id).one(&self.conn).await
}
pub async fn by_name(&self, name: &str) -> Result<Option<repo::Model>> {
Repo::find()
.filter(repo::Column::Name.eq(name))
.one(&self.conn)
.await
}
pub async fn insert(
&self,
name: &str,
description: Option<&str>,
) -> Result<InsertResult<repo::ActiveModel>> {
let model = repo::ActiveModel {
id: NotSet,
name: Set(String::from(name)),
description: Set(description.map(String::from)),
};
Repo::insert(model).exec(&self.conn).await
}
Ok((total_pages, repos))
}
pub async fn by_id(conn: &DbConn, id: i32) -> Result<Option<repo::Model>> {
repo::Entity::find_by_id(id).one(conn).await
}
pub async fn by_name(conn: &DbConn, name: &str) -> Result<Option<repo::Model>> {
Repo::find()
.filter(repo::Column::Name.eq(name))
.one(conn)
.await
}
pub async fn insert(
conn: &DbConn,
name: &str,
description: Option<&str>,
) -> Result<InsertResult<repo::ActiveModel>> {
let model = repo::ActiveModel {
id: NotSet,
name: Set(String::from(name)),
description: Set(description.map(String::from)),
};
Repo::insert(model).exec(conn).await
}

View File

@ -22,7 +22,7 @@ pub struct Config {
pub struct Global {
config: Config,
repo_manager: Arc<RwLock<RepoGroupManager>>,
db: db::RieterDb,
db: sea_orm::DbConn,
}
#[tokio::main]

View File

@ -5,7 +5,7 @@ pub use manager::RepoGroupManager;
use std::path::PathBuf;
use axum::body::{Body, BodyDataStream};
use axum::body::{Body};
use axum::extract::{Path, State};
use axum::http::Request;
use axum::http::StatusCode;
@ -21,6 +21,8 @@ use tower_http::services::{ServeDir, ServeFile};
use tower_http::validate_request::ValidateRequestHeaderLayer;
use uuid::Uuid;
use crate::db;
const DB_FILE_EXTS: [&str; 4] = [".db", ".files", ".db.tar.gz", ".files.tar.gz"];
pub fn router(api_key: &str) -> Router<crate::Global> {
@ -128,26 +130,31 @@ async fn post_package_archive(
tracing::info!("Added '{}' to repository '{}'", pkg.file_name(), repo);
// Query the repo for its ID, or create it if it does not already exist
let res = global.db.repo.by_name(&repo).await?;
let res = db::query::repo::by_name(&global.db, &repo).await?;
let repo_id = if let Some(repo_entity) = res {
repo_entity.id
} else {
global.db.repo.insert(&repo, None).await?.last_insert_id
db::query::repo::insert(&global.db, &repo, None)
.await?
.last_insert_id
};
// If the package already exists in the database, we remove it first
let res = global
.db
.pkg
.by_fields(repo_id, &pkg.info.name, None, &pkg.info.arch)
.await?;
let res = db::query::package::by_fields(
&global.db,
repo_id,
&pkg.info.name,
None,
&pkg.info.arch,
)
.await?;
if let Some(entry) = res {
entry.delete(&global.db).await?;
}
global.db.pkg.insert(repo_id, pkg).await?;
db::query::package::insert(&global.db, repo_id, pkg).await?;
Ok(())
}
@ -172,7 +179,7 @@ async fn delete_repo(
.await??;
if repo_removed {
let res = global.db.repo.by_name(&repo).await?;
let res = db::query::repo::by_name(&global.db, &repo).await?;
if let Some(repo_entry) = res {
repo_entry.delete(&global.db).await?;
@ -203,10 +210,10 @@ async fn delete_arch_repo(
.await??;
if repo_removed {
let res = global.db.repo.by_name(&repo).await?;
let res = db::query::repo::by_name(&global.db, &repo).await?;
if let Some(repo_entry) = res {
global.db.pkg.delete_with_arch(repo_entry.id, &arch).await?;
db::query::package::delete_with_arch(&global.db, repo_entry.id, &arch).await?;
}
tracing::info!("Removed architecture '{}' from repository '{}'", arch, repo);
@ -229,19 +236,17 @@ async fn delete_package(
.await??;
if let Some((name, version, release, arch)) = res {
let res = global.db.repo.by_name(&repo).await?;
let res = db::query::repo::by_name(&global.db, &repo).await?;
if let Some(repo_entry) = res {
let res = global
.db
.pkg
.by_fields(
repo_entry.id,
&name,
Some(&format!("{}-{}", version, release)),
&arch,
)
.await?;
let res = db::query::package::by_fields(
&global.db,
repo_entry.id,
&name,
Some(&format!("{}-{}", version, release)),
&arch,
)
.await?;
if let Some(entry) = res {
entry.delete(&global.db).await?;