Compare commits
No commits in common. "45f1abade39390332056e41c24ff1c2f2e4cf958" and "e684cfb84ebf889e0968f45aa913b9f3efcbf99e" have entirely different histories.
45f1abade3
...
e684cfb84e
|
@ -1,7 +1,5 @@
|
||||||
mod pagination;
|
mod pagination;
|
||||||
|
|
||||||
use sea_orm::{*};
|
|
||||||
|
|
||||||
use axum::extract::{Path, Query, State};
|
use axum::extract::{Path, Query, State};
|
||||||
use axum::routing::get;
|
use axum::routing::get;
|
||||||
use axum::Json;
|
use axum::Json;
|
||||||
|
@ -9,7 +7,7 @@ use axum::Router;
|
||||||
|
|
||||||
use pagination::PaginatedResponse;
|
use pagination::PaginatedResponse;
|
||||||
|
|
||||||
use crate::db::{self, *};
|
use crate::db;
|
||||||
|
|
||||||
pub fn router() -> Router<crate::Global> {
|
pub fn router() -> Router<crate::Global> {
|
||||||
Router::new()
|
Router::new()
|
||||||
|
@ -22,34 +20,26 @@ pub fn router() -> Router<crate::Global> {
|
||||||
async fn get_repos(
|
async fn get_repos(
|
||||||
State(global): State<crate::Global>,
|
State(global): State<crate::Global>,
|
||||||
Query(pagination): Query<pagination::Query>,
|
Query(pagination): Query<pagination::Query>,
|
||||||
Query(filter): Query<db::query::repo::Filter>,
|
|
||||||
) -> crate::Result<Json<PaginatedResponse<db::repo::Model>>> {
|
) -> crate::Result<Json<PaginatedResponse<db::repo::Model>>> {
|
||||||
let page = pagination.page.unwrap_or(1) - 1;
|
let (total_pages, repos) = global
|
||||||
let per_page = pagination.per_page.unwrap_or(25);
|
.db
|
||||||
|
.repo
|
||||||
let paginator = Repo::find()
|
.page(
|
||||||
.filter(filter)
|
pagination.per_page.unwrap_or(25),
|
||||||
.order_by_asc(package::Column::Id)
|
pagination.page.unwrap_or(1) - 1,
|
||||||
.paginate(&global.db, pagination.per_page.unwrap_or(25));
|
)
|
||||||
let items = paginator
|
|
||||||
.fetch_page(pagination.page.unwrap_or(1) - 1)
|
|
||||||
.await?;
|
.await?;
|
||||||
let total_pages = paginator.num_pages().await?;
|
Ok(Json(pagination.res(total_pages, repos)))
|
||||||
|
|
||||||
Ok(Json(PaginatedResponse {
|
|
||||||
page,
|
|
||||||
per_page,
|
|
||||||
total_pages,
|
|
||||||
count: items.len(),
|
|
||||||
items,
|
|
||||||
}))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_single_repo(
|
async fn get_single_repo(
|
||||||
State(global): State<crate::Global>,
|
State(global): State<crate::Global>,
|
||||||
Path(id): Path<i32>,
|
Path(id): Path<i32>,
|
||||||
) -> crate::Result<Json<db::repo::Model>> {
|
) -> crate::Result<Json<db::repo::Model>> {
|
||||||
let repo = db::query::repo::by_id(&global.db, id)
|
let repo = global
|
||||||
|
.db
|
||||||
|
.repo
|
||||||
|
.by_id(id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
|
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
|
||||||
|
|
||||||
|
@ -59,13 +49,13 @@ async fn get_single_repo(
|
||||||
async fn get_packages(
|
async fn get_packages(
|
||||||
State(global): State<crate::Global>,
|
State(global): State<crate::Global>,
|
||||||
Query(pagination): Query<pagination::Query>,
|
Query(pagination): Query<pagination::Query>,
|
||||||
Query(filter): Query<db::query::package::Filter>,
|
|
||||||
) -> crate::Result<Json<PaginatedResponse<db::package::Model>>> {
|
) -> crate::Result<Json<PaginatedResponse<db::package::Model>>> {
|
||||||
let (total_pages, pkgs) = db::query::package::page(
|
let (total_pages, pkgs) = global
|
||||||
&global.db,
|
.db
|
||||||
|
.pkg
|
||||||
|
.page(
|
||||||
pagination.per_page.unwrap_or(25),
|
pagination.per_page.unwrap_or(25),
|
||||||
pagination.page.unwrap_or(1) - 1,
|
pagination.page.unwrap_or(1) - 1,
|
||||||
filter,
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -76,7 +66,10 @@ async fn get_single_package(
|
||||||
State(global): State<crate::Global>,
|
State(global): State<crate::Global>,
|
||||||
Path(id): Path<i32>,
|
Path(id): Path<i32>,
|
||||||
) -> crate::Result<Json<crate::db::FullPackage>> {
|
) -> crate::Result<Json<crate::db::FullPackage>> {
|
||||||
let entry = db::query::package::full(&global.db, id)
|
let entry = global
|
||||||
|
.db
|
||||||
|
.pkg
|
||||||
|
.full(id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
|
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub const DEFAULT_PAGE: u64 = 1;
|
pub const DEFAULT_PAGE: u64 = 0;
|
||||||
pub const DEFAULT_PER_PAGE: u64 = 25;
|
pub const DEFAULT_PER_PAGE: u64 = 25;
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
|
|
|
@ -10,7 +10,6 @@ use std::sync::{Arc, RwLock};
|
||||||
use tower_http::trace::TraceLayer;
|
use tower_http::trace::TraceLayer;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
|
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
|
||||||
use sea_orm_migration::MigratorTrait;
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(author, version, about, long_about = None)]
|
#[command(author, version, about, long_about = None)]
|
||||||
|
@ -76,8 +75,10 @@ impl Cli {
|
||||||
|
|
||||||
debug!("Connecting to database with URL {}", db_url);
|
debug!("Connecting to database with URL {}", db_url);
|
||||||
|
|
||||||
let db = sea_orm::Database::connect(db_url).await?;
|
let db = crate::db::RieterDb::connect(db_url).await?;
|
||||||
crate::db::Migrator::up(&db, None).await?;
|
// let db = crate::db::init("postgres://rieter:rieter@localhost:5432/rieter")
|
||||||
|
// .await
|
||||||
|
// .unwrap();
|
||||||
|
|
||||||
let config = Config {
|
let config = Config {
|
||||||
data_dir: self.data_dir.clone(),
|
data_dir: self.data_dir.clone(),
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
use super::RieterDb;
|
||||||
|
use sea_orm::{DbBackend, DbErr, ExecResult, QueryResult, Statement};
|
||||||
|
use std::{future::Future, pin::Pin};
|
||||||
|
|
||||||
|
// Allows RieterDb objects to be passed to ORM functions
|
||||||
|
impl sea_orm::ConnectionTrait for RieterDb {
|
||||||
|
fn get_database_backend(&self) -> DbBackend {
|
||||||
|
self.conn.get_database_backend()
|
||||||
|
}
|
||||||
|
fn execute<'life0, 'async_trait>(
|
||||||
|
&'life0 self,
|
||||||
|
stmt: Statement,
|
||||||
|
) -> Pin<Box<dyn Future<Output = std::result::Result<ExecResult, DbErr>> + Send + 'async_trait>>
|
||||||
|
where
|
||||||
|
Self: 'async_trait,
|
||||||
|
'life0: 'async_trait,
|
||||||
|
{
|
||||||
|
self.conn.execute(stmt)
|
||||||
|
}
|
||||||
|
fn execute_unprepared<'life0, 'life1, 'async_trait>(
|
||||||
|
&'life0 self,
|
||||||
|
sql: &'life1 str,
|
||||||
|
) -> Pin<Box<dyn Future<Output = std::result::Result<ExecResult, DbErr>> + Send + 'async_trait>>
|
||||||
|
where
|
||||||
|
Self: 'async_trait,
|
||||||
|
'life0: 'async_trait,
|
||||||
|
'life1: 'async_trait,
|
||||||
|
{
|
||||||
|
self.conn.execute_unprepared(sql)
|
||||||
|
}
|
||||||
|
fn query_one<'life0, 'async_trait>(
|
||||||
|
&'life0 self,
|
||||||
|
stmt: Statement,
|
||||||
|
) -> Pin<
|
||||||
|
Box<
|
||||||
|
dyn Future<Output = std::result::Result<Option<QueryResult>, DbErr>>
|
||||||
|
+ Send
|
||||||
|
+ 'async_trait,
|
||||||
|
>,
|
||||||
|
>
|
||||||
|
where
|
||||||
|
Self: 'async_trait,
|
||||||
|
'life0: 'async_trait,
|
||||||
|
{
|
||||||
|
self.conn.query_one(stmt)
|
||||||
|
}
|
||||||
|
fn query_all<'life0, 'async_trait>(
|
||||||
|
&'life0 self,
|
||||||
|
stmt: Statement,
|
||||||
|
) -> Pin<
|
||||||
|
Box<
|
||||||
|
dyn Future<Output = std::result::Result<Vec<QueryResult>, DbErr>> + Send + 'async_trait,
|
||||||
|
>,
|
||||||
|
>
|
||||||
|
where
|
||||||
|
Self: 'async_trait,
|
||||||
|
'life0: 'async_trait,
|
||||||
|
{
|
||||||
|
self.conn.query_all(stmt)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,20 +1,19 @@
|
||||||
|
mod conn;
|
||||||
pub mod entities;
|
pub mod entities;
|
||||||
mod migrator;
|
mod migrator;
|
||||||
pub mod query;
|
mod query;
|
||||||
|
|
||||||
use sea_orm::{DeriveActiveEnum, EnumIter};
|
|
||||||
|
|
||||||
|
use sea_orm::{ConnectOptions, Database, DatabaseConnection, DeriveActiveEnum, EnumIter};
|
||||||
|
use sea_orm_migration::MigratorTrait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub use entities::{prelude::*, *};
|
pub use entities::{prelude::*, *};
|
||||||
pub use migrator::Migrator;
|
use migrator::Migrator;
|
||||||
|
|
||||||
|
|
||||||
type Result<T> = std::result::Result<T, sea_orm::DbErr>;
|
type Result<T> = std::result::Result<T, sea_orm::DbErr>;
|
||||||
|
|
||||||
#[derive(EnumIter, DeriveActiveEnum, Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
#[derive(EnumIter, DeriveActiveEnum, Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||||
#[sea_orm(rs_type = "i32", db_type = "Integer")]
|
#[sea_orm(rs_type = "i32", db_type = "Integer")]
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
pub enum PackageRelatedEnum {
|
pub enum PackageRelatedEnum {
|
||||||
#[sea_orm(num_value = 0)]
|
#[sea_orm(num_value = 0)]
|
||||||
Conflicts,
|
Conflicts,
|
||||||
|
@ -41,3 +40,24 @@ pub struct FullPackage {
|
||||||
related: Vec<(PackageRelatedEnum, String)>,
|
related: Vec<(PackageRelatedEnum, String)>,
|
||||||
files: Vec<String>,
|
files: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct RieterDb {
|
||||||
|
conn: DatabaseConnection,
|
||||||
|
pub pkg: query::PackageQuery,
|
||||||
|
pub repo: query::RepoQuery,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RieterDb {
|
||||||
|
pub async fn connect<C: Into<ConnectOptions>>(opt: C) -> Result<Self> {
|
||||||
|
let db = Database::connect(opt).await?;
|
||||||
|
|
||||||
|
Migrator::up(&db, None).await?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
conn: db.clone(),
|
||||||
|
pkg: query::PackageQuery::new(db.clone()),
|
||||||
|
repo: query::RepoQuery::new(db.clone()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
pub mod package;
|
mod package;
|
||||||
pub mod repo;
|
mod repo;
|
||||||
|
|
||||||
|
pub use package::PackageQuery;
|
||||||
|
pub use repo::RepoQuery;
|
||||||
|
|
||||||
type Result<T> = std::result::Result<T, sea_orm::DbErr>;
|
type Result<T> = std::result::Result<T, sea_orm::DbErr>;
|
||||||
|
|
|
@ -1,54 +1,42 @@
|
||||||
use sea_orm::{sea_query::IntoCondition, *};
|
use sea_orm::*;
|
||||||
use serde::Deserialize;
|
|
||||||
|
|
||||||
use crate::db::*;
|
use crate::db::*;
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Filter {
|
pub struct PackageQuery {
|
||||||
repo: Option<i32>,
|
conn: DatabaseConnection,
|
||||||
arch: Option<String>,
|
|
||||||
name: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntoCondition for Filter {
|
impl PackageQuery {
|
||||||
fn into_condition(self) -> Condition {
|
pub fn new(conn: DatabaseConnection) -> Self {
|
||||||
Condition::all()
|
Self { conn }
|
||||||
.add_option(self.repo.map(|repo| package::Column::RepoId.eq(repo)))
|
|
||||||
.add_option(self.arch.map(|arch| package::Column::Arch.eq(arch)))
|
|
||||||
.add_option(
|
|
||||||
self.name
|
|
||||||
.map(|name| package::Column::Name.like(format!("%{}%", name))),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn page(
|
pub async fn page(
|
||||||
conn: &DbConn,
|
&self,
|
||||||
per_page: u64,
|
per_page: u64,
|
||||||
page: u64,
|
page: u64,
|
||||||
filter: Filter,
|
) -> super::Result<(u64, Vec<package::Model>)> {
|
||||||
) -> super::Result<(u64, Vec<package::Model>)> {
|
|
||||||
let paginator = Package::find()
|
let paginator = Package::find()
|
||||||
.filter(filter)
|
|
||||||
.order_by_asc(package::Column::Id)
|
.order_by_asc(package::Column::Id)
|
||||||
.paginate(conn, per_page);
|
.paginate(&self.conn, per_page);
|
||||||
let packages = paginator.fetch_page(page).await?;
|
let packages = paginator.fetch_page(page).await?;
|
||||||
let total_pages = paginator.num_pages().await?;
|
let total_pages = paginator.num_pages().await?;
|
||||||
|
|
||||||
Ok((total_pages, packages))
|
Ok((total_pages, packages))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn by_id(conn: &DbConn, id: i32) -> Result<Option<package::Model>> {
|
pub async fn by_id(&self, id: i32) -> Result<Option<package::Model>> {
|
||||||
package::Entity::find_by_id(id).one(conn).await
|
package::Entity::find_by_id(id).one(&self.conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn by_fields(
|
pub async fn by_fields(
|
||||||
conn: &DbConn,
|
&self,
|
||||||
repo_id: i32,
|
repo_id: i32,
|
||||||
name: &str,
|
name: &str,
|
||||||
version: Option<&str>,
|
version: Option<&str>,
|
||||||
arch: &str,
|
arch: &str,
|
||||||
) -> Result<Option<package::Model>> {
|
) -> Result<Option<package::Model>> {
|
||||||
let mut query = Package::find()
|
let mut query = Package::find()
|
||||||
.filter(package::Column::RepoId.eq(repo_id))
|
.filter(package::Column::RepoId.eq(repo_id))
|
||||||
.filter(package::Column::Name.eq(name))
|
.filter(package::Column::Name.eq(name))
|
||||||
|
@ -58,18 +46,18 @@ pub async fn by_fields(
|
||||||
query = query.filter(package::Column::Version.eq(version));
|
query = query.filter(package::Column::Version.eq(version));
|
||||||
}
|
}
|
||||||
|
|
||||||
query.one(conn).await
|
query.one(&self.conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_with_arch(conn: &DbConn, repo_id: i32, arch: &str) -> Result<DeleteResult> {
|
pub async fn delete_with_arch(&self, repo_id: i32, arch: &str) -> Result<DeleteResult> {
|
||||||
Package::delete_many()
|
Package::delete_many()
|
||||||
.filter(package::Column::RepoId.eq(repo_id))
|
.filter(package::Column::RepoId.eq(repo_id))
|
||||||
.filter(package::Column::Arch.eq(arch))
|
.filter(package::Column::Arch.eq(arch))
|
||||||
.exec(conn)
|
.exec(&self.conn)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn insert(conn: &DbConn, repo_id: i32, pkg: crate::repo::package::Package) -> Result<()> {
|
pub async fn insert(&self, repo_id: i32, pkg: crate::repo::package::Package) -> Result<()> {
|
||||||
let info = pkg.info;
|
let info = pkg.info;
|
||||||
|
|
||||||
let model = package::ActiveModel {
|
let model = package::ActiveModel {
|
||||||
|
@ -90,7 +78,7 @@ pub async fn insert(conn: &DbConn, repo_id: i32, pkg: crate::repo::package::Pack
|
||||||
sha256_sum: Set(info.sha256sum),
|
sha256_sum: Set(info.sha256sum),
|
||||||
};
|
};
|
||||||
|
|
||||||
let pkg_entry = model.insert(conn).await?;
|
let pkg_entry = model.insert(&self.conn).await?;
|
||||||
|
|
||||||
// Insert all the related tables
|
// Insert all the related tables
|
||||||
PackageLicense::insert_many(info.licenses.iter().map(|s| package_license::ActiveModel {
|
PackageLicense::insert_many(info.licenses.iter().map(|s| package_license::ActiveModel {
|
||||||
|
@ -98,7 +86,7 @@ pub async fn insert(conn: &DbConn, repo_id: i32, pkg: crate::repo::package::Pack
|
||||||
name: Set(s.to_string()),
|
name: Set(s.to_string()),
|
||||||
}))
|
}))
|
||||||
.on_empty_do_nothing()
|
.on_empty_do_nothing()
|
||||||
.exec(conn)
|
.exec(&self.conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
PackageGroup::insert_many(info.groups.iter().map(|s| package_group::ActiveModel {
|
PackageGroup::insert_many(info.groups.iter().map(|s| package_group::ActiveModel {
|
||||||
|
@ -106,7 +94,7 @@ pub async fn insert(conn: &DbConn, repo_id: i32, pkg: crate::repo::package::Pack
|
||||||
name: Set(s.to_string()),
|
name: Set(s.to_string()),
|
||||||
}))
|
}))
|
||||||
.on_empty_do_nothing()
|
.on_empty_do_nothing()
|
||||||
.exec(conn)
|
.exec(&self.conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let related = info
|
let related = info
|
||||||
|
@ -144,48 +132,45 @@ pub async fn insert(conn: &DbConn, repo_id: i32, pkg: crate::repo::package::Pack
|
||||||
package_id: Set(pkg_entry.id),
|
package_id: Set(pkg_entry.id),
|
||||||
r#type: Set(t),
|
r#type: Set(t),
|
||||||
name: Set(s.to_string()),
|
name: Set(s.to_string()),
|
||||||
}))
|
}));
|
||||||
.on_empty_do_nothing()
|
|
||||||
.exec(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
PackageFile::insert_many(pkg.files.iter().map(|s| package_file::ActiveModel {
|
PackageFile::insert_many(pkg.files.iter().map(|s| package_file::ActiveModel {
|
||||||
package_id: Set(pkg_entry.id),
|
package_id: Set(pkg_entry.id),
|
||||||
path: Set(s.display().to_string()),
|
path: Set(s.display().to_string()),
|
||||||
}))
|
}))
|
||||||
.on_empty_do_nothing()
|
.on_empty_do_nothing()
|
||||||
.exec(conn)
|
.exec(&self.conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn full(conn: &DbConn, id: i32) -> Result<Option<FullPackage>> {
|
pub async fn full(&self, id: i32) -> Result<Option<FullPackage>> {
|
||||||
if let Some(entry) = by_id(conn, id).await? {
|
if let Some(entry) = self.by_id(id).await? {
|
||||||
let licenses = entry
|
let licenses = entry
|
||||||
.find_related(PackageLicense)
|
.find_related(PackageLicense)
|
||||||
.all(conn)
|
.all(&self.conn)
|
||||||
.await?
|
.await?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|e| e.name)
|
.map(|e| e.name)
|
||||||
.collect();
|
.collect();
|
||||||
let groups = entry
|
let groups = entry
|
||||||
.find_related(PackageGroup)
|
.find_related(PackageGroup)
|
||||||
.all(conn)
|
.all(&self.conn)
|
||||||
.await?
|
.await?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|e| e.name)
|
.map(|e| e.name)
|
||||||
.collect();
|
.collect();
|
||||||
let related = entry
|
let related = entry
|
||||||
.find_related(PackageRelated)
|
.find_related(PackageRelated)
|
||||||
.all(conn)
|
.all(&self.conn)
|
||||||
.await?
|
.await?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|e| (e.r#type, e.name))
|
.map(|e| (e.r#type, e.name))
|
||||||
.collect();
|
.collect();
|
||||||
let files = entry
|
let files = entry
|
||||||
.find_related(PackageFile)
|
.find_related(PackageFile)
|
||||||
.all(conn)
|
.all(&self.conn)
|
||||||
.await?
|
.await?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|e| e.path)
|
.map(|e| e.path)
|
||||||
|
@ -201,4 +186,5 @@ pub async fn full(conn: &DbConn, id: i32) -> Result<Option<FullPackage>> {
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,49 +1,49 @@
|
||||||
use sea_orm::{sea_query::IntoCondition, *};
|
use sea_orm::*;
|
||||||
|
|
||||||
use crate::db::*;
|
use crate::db::*;
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Filter {
|
pub struct RepoQuery {
|
||||||
name: Option<String>,
|
conn: DatabaseConnection,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntoCondition for Filter {
|
impl RepoQuery {
|
||||||
fn into_condition(self) -> Condition {
|
pub fn new(conn: DatabaseConnection) -> Self {
|
||||||
Condition::all().add_option(self.name.map(|name| package::Column::Name.like(name)))
|
Self { conn }
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn page(conn: &DbConn, per_page: u64, page: u64) -> Result<(u64, Vec<repo::Model>)> {
|
pub async fn page(&self, per_page: u64, page: u64) -> Result<(u64, Vec<repo::Model>)> {
|
||||||
let paginator = Repo::find()
|
let paginator = Repo::find()
|
||||||
.order_by_asc(repo::Column::Id)
|
.order_by_asc(repo::Column::Id)
|
||||||
.paginate(conn, per_page);
|
.paginate(&self.conn, per_page);
|
||||||
let repos = paginator.fetch_page(page).await?;
|
let repos = paginator.fetch_page(page).await?;
|
||||||
let total_pages = paginator.num_pages().await?;
|
let total_pages = paginator.num_pages().await?;
|
||||||
|
|
||||||
Ok((total_pages, repos))
|
Ok((total_pages, repos))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn by_id(conn: &DbConn, id: i32) -> Result<Option<repo::Model>> {
|
pub async fn by_id(&self, id: i32) -> Result<Option<repo::Model>> {
|
||||||
repo::Entity::find_by_id(id).one(conn).await
|
repo::Entity::find_by_id(id).one(&self.conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn by_name(conn: &DbConn, name: &str) -> Result<Option<repo::Model>> {
|
pub async fn by_name(&self, name: &str) -> Result<Option<repo::Model>> {
|
||||||
Repo::find()
|
Repo::find()
|
||||||
.filter(repo::Column::Name.eq(name))
|
.filter(repo::Column::Name.eq(name))
|
||||||
.one(conn)
|
.one(&self.conn)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn insert(
|
pub async fn insert(
|
||||||
conn: &DbConn,
|
&self,
|
||||||
name: &str,
|
name: &str,
|
||||||
description: Option<&str>,
|
description: Option<&str>,
|
||||||
) -> Result<InsertResult<repo::ActiveModel>> {
|
) -> Result<InsertResult<repo::ActiveModel>> {
|
||||||
let model = repo::ActiveModel {
|
let model = repo::ActiveModel {
|
||||||
id: NotSet,
|
id: NotSet,
|
||||||
name: Set(String::from(name)),
|
name: Set(String::from(name)),
|
||||||
description: Set(description.map(String::from)),
|
description: Set(description.map(String::from)),
|
||||||
};
|
};
|
||||||
|
|
||||||
Repo::insert(model).exec(conn).await
|
Repo::insert(model).exec(&self.conn).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ pub struct Config {
|
||||||
pub struct Global {
|
pub struct Global {
|
||||||
config: Config,
|
config: Config,
|
||||||
repo_manager: Arc<RwLock<RepoGroupManager>>,
|
repo_manager: Arc<RwLock<RepoGroupManager>>,
|
||||||
db: sea_orm::DbConn,
|
db: db::RieterDb,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
|
|
|
@ -5,7 +5,7 @@ pub use manager::RepoGroupManager;
|
||||||
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use axum::body::{Body};
|
use axum::body::{Body, BodyDataStream};
|
||||||
use axum::extract::{Path, State};
|
use axum::extract::{Path, State};
|
||||||
use axum::http::Request;
|
use axum::http::Request;
|
||||||
use axum::http::StatusCode;
|
use axum::http::StatusCode;
|
||||||
|
@ -21,8 +21,6 @@ use tower_http::services::{ServeDir, ServeFile};
|
||||||
use tower_http::validate_request::ValidateRequestHeaderLayer;
|
use tower_http::validate_request::ValidateRequestHeaderLayer;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::db;
|
|
||||||
|
|
||||||
const DB_FILE_EXTS: [&str; 4] = [".db", ".files", ".db.tar.gz", ".files.tar.gz"];
|
const DB_FILE_EXTS: [&str; 4] = [".db", ".files", ".db.tar.gz", ".files.tar.gz"];
|
||||||
|
|
||||||
pub fn router(api_key: &str) -> Router<crate::Global> {
|
pub fn router(api_key: &str) -> Router<crate::Global> {
|
||||||
|
@ -130,31 +128,26 @@ async fn post_package_archive(
|
||||||
tracing::info!("Added '{}' to repository '{}'", pkg.file_name(), repo);
|
tracing::info!("Added '{}' to repository '{}'", pkg.file_name(), repo);
|
||||||
|
|
||||||
// Query the repo for its ID, or create it if it does not already exist
|
// Query the repo for its ID, or create it if it does not already exist
|
||||||
let res = db::query::repo::by_name(&global.db, &repo).await?;
|
let res = global.db.repo.by_name(&repo).await?;
|
||||||
|
|
||||||
let repo_id = if let Some(repo_entity) = res {
|
let repo_id = if let Some(repo_entity) = res {
|
||||||
repo_entity.id
|
repo_entity.id
|
||||||
} else {
|
} else {
|
||||||
db::query::repo::insert(&global.db, &repo, None)
|
global.db.repo.insert(&repo, None).await?.last_insert_id
|
||||||
.await?
|
|
||||||
.last_insert_id
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// If the package already exists in the database, we remove it first
|
// If the package already exists in the database, we remove it first
|
||||||
let res = db::query::package::by_fields(
|
let res = global
|
||||||
&global.db,
|
.db
|
||||||
repo_id,
|
.pkg
|
||||||
&pkg.info.name,
|
.by_fields(repo_id, &pkg.info.name, None, &pkg.info.arch)
|
||||||
None,
|
|
||||||
&pkg.info.arch,
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if let Some(entry) = res {
|
if let Some(entry) = res {
|
||||||
entry.delete(&global.db).await?;
|
entry.delete(&global.db).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db::query::package::insert(&global.db, repo_id, pkg).await?;
|
global.db.pkg.insert(repo_id, pkg).await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -179,7 +172,7 @@ async fn delete_repo(
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
if repo_removed {
|
if repo_removed {
|
||||||
let res = db::query::repo::by_name(&global.db, &repo).await?;
|
let res = global.db.repo.by_name(&repo).await?;
|
||||||
|
|
||||||
if let Some(repo_entry) = res {
|
if let Some(repo_entry) = res {
|
||||||
repo_entry.delete(&global.db).await?;
|
repo_entry.delete(&global.db).await?;
|
||||||
|
@ -210,10 +203,10 @@ async fn delete_arch_repo(
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
if repo_removed {
|
if repo_removed {
|
||||||
let res = db::query::repo::by_name(&global.db, &repo).await?;
|
let res = global.db.repo.by_name(&repo).await?;
|
||||||
|
|
||||||
if let Some(repo_entry) = res {
|
if let Some(repo_entry) = res {
|
||||||
db::query::package::delete_with_arch(&global.db, repo_entry.id, &arch).await?;
|
global.db.pkg.delete_with_arch(repo_entry.id, &arch).await?;
|
||||||
}
|
}
|
||||||
tracing::info!("Removed architecture '{}' from repository '{}'", arch, repo);
|
tracing::info!("Removed architecture '{}' from repository '{}'", arch, repo);
|
||||||
|
|
||||||
|
@ -236,11 +229,13 @@ async fn delete_package(
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
if let Some((name, version, release, arch)) = res {
|
if let Some((name, version, release, arch)) = res {
|
||||||
let res = db::query::repo::by_name(&global.db, &repo).await?;
|
let res = global.db.repo.by_name(&repo).await?;
|
||||||
|
|
||||||
if let Some(repo_entry) = res {
|
if let Some(repo_entry) = res {
|
||||||
let res = db::query::package::by_fields(
|
let res = global
|
||||||
&global.db,
|
.db
|
||||||
|
.pkg
|
||||||
|
.by_fields(
|
||||||
repo_entry.id,
|
repo_entry.id,
|
||||||
&name,
|
&name,
|
||||||
Some(&format!("{}-{}", version, release)),
|
Some(&format!("{}-{}", version, release)),
|
||||||
|
|
Loading…
Reference in New Issue