refactor: move database entities into separate crate
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/lint Pipeline was successful Details

feat/mirror-api
Jef Roosens 2024-07-16 20:38:43 +02:00
parent 4225ce3471
commit f761e3b36d
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
26 changed files with 191 additions and 492 deletions

17
Cargo.lock generated
View File

@ -782,6 +782,14 @@ dependencies = [
"serde",
]
[[package]]
name = "entity"
version = "0.1.0"
dependencies = [
"sea-orm",
"serde",
]
[[package]]
name = "equivalent"
version = "1.0.1"
@ -2074,6 +2082,7 @@ dependencies = [
"axum",
"chrono",
"clap",
"entity",
"figment",
"futures",
"http-body-util",
@ -2431,18 +2440,18 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
[[package]]
name = "serde"
version = "1.0.203"
version = "1.0.204"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.203"
version = "1.0.204"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222"
dependencies = [
"proc-macro2",
"quote",

View File

@ -4,7 +4,7 @@ members = [
'server',
'libarchive',
'libarchive3-sys'
]
, "entity"]
[profile.release]
lto = "fat"

View File

@ -0,0 +1,8 @@
[package]
name = "entity"
version = "0.1.0"
edition = "2021"
[dependencies]
sea-orm = "0.12.15"
serde = { version = "1.0.204", features = ["derive"] }

View File

@ -3,8 +3,6 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
use crate::db::PackageState;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "package")]
pub struct Model {
@ -25,7 +23,7 @@ pub struct Model {
pub pgp_sig_size: Option<i64>,
pub sha256_sum: String,
pub compression: String,
pub state: PackageState,
pub state: crate::PackageState,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]

View File

@ -3,15 +3,13 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
use crate::db::PackageRelatedEnum;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "package_related")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub package_id: i32,
#[sea_orm(primary_key, auto_increment = false)]
pub r#type: PackageRelatedEnum,
pub r#type: crate::PackageRelatedEnum,
#[sea_orm(primary_key, auto_increment = false)]
pub name: String,
}

37
entity/src/lib.rs 100644
View File

@ -0,0 +1,37 @@
pub mod entity;
pub use entity::prelude::*;
pub use entity::*;
use sea_orm::{DeriveActiveEnum, EnumIter};
use serde::{Deserialize, Serialize};
#[derive(EnumIter, DeriveActiveEnum, Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
#[sea_orm(rs_type = "i32", db_type = "Integer")]
#[serde(rename_all = "lowercase")]
pub enum PackageRelatedEnum {
#[sea_orm(num_value = 0)]
Conflicts,
#[sea_orm(num_value = 1)]
Replaces,
#[sea_orm(num_value = 2)]
Provides,
#[sea_orm(num_value = 3)]
Depend,
#[sea_orm(num_value = 4)]
Makedepend,
#[sea_orm(num_value = 5)]
Checkdepend,
#[sea_orm(num_value = 6)]
Optdepend,
}
#[derive(EnumIter, DeriveActiveEnum, Deserialize, Serialize, PartialEq, Eq, Clone, Debug)]
#[sea_orm(rs_type = "i32", db_type = "Integer")]
pub enum PackageState {
#[sea_orm(num_value = 0)]
PendingCommit,
#[sea_orm(num_value = 1)]
Committed,
#[sea_orm(num_value = 2)]
PendingDeletion,
}

View File

@ -26,6 +26,7 @@ tracing = "0.1.37"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
uuid = { version = "1.4.0", features = ["v4"] }
migration = { path = "../migration" }
entity = { path = "../entity" }
[dependencies.sea-orm]
version = "0.12.1"

View File

@ -1,302 +0,0 @@
use sea_orm_migration::prelude::*;
pub struct Migration;
impl MigrationName for Migration {
fn name(&self) -> &str {
"m_20230730_000001_create_repo_tables"
}
}
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(Distro::Table)
.col(
ColumnDef::new(Distro::Id)
.integer()
.not_null()
.auto_increment()
.primary_key(),
)
.col(
ColumnDef::new(Distro::Name)
.string()
.not_null()
.unique_key(),
)
.col(ColumnDef::new(Distro::Description).string())
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(Repo::Table)
.col(
ColumnDef::new(Repo::Id)
.integer()
.not_null()
.auto_increment()
.primary_key(),
)
.col(ColumnDef::new(Repo::DistroId).integer().not_null())
.col(ColumnDef::new(Repo::Name).string().not_null().unique_key())
.col(ColumnDef::new(Repo::Description).string())
.foreign_key(
ForeignKey::create()
.name("fk-repo-distro_id")
.from(Repo::Table, Repo::DistroId)
.to(Distro::Table, Distro::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(Package::Table)
.col(
ColumnDef::new(Package::Id)
.integer()
.not_null()
.auto_increment()
.primary_key(),
)
.col(ColumnDef::new(Package::RepoId).integer().not_null())
.col(ColumnDef::new(Package::Base).string_len(255).not_null())
.col(ColumnDef::new(Package::Name).string_len(255).not_null())
.col(ColumnDef::new(Package::Version).string_len(255).not_null())
.col(ColumnDef::new(Package::Arch).string_len(255).not_null())
.col(ColumnDef::new(Package::Size).big_integer().not_null())
.col(ColumnDef::new(Package::CSize).big_integer().not_null())
.col(ColumnDef::new(Package::Description).string())
.col(ColumnDef::new(Package::Url).string_len(255))
.col(ColumnDef::new(Package::BuildDate).date_time().not_null())
.col(ColumnDef::new(Package::Packager).string_len(255))
.col(ColumnDef::new(Package::PgpSig).string_len(255))
.col(ColumnDef::new(Package::PgpSigSize).big_integer())
.col(ColumnDef::new(Package::Sha256Sum).char_len(64).not_null())
.col(
ColumnDef::new(Package::Compression)
.string_len(16)
.not_null(),
)
.col(ColumnDef::new(Package::State).integer().not_null())
.foreign_key(
ForeignKey::create()
.name("fk-package-repo_id")
.from(Package::Table, Package::RepoId)
.to(Repo::Table, Repo::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(PackageLicense::Table)
.col(
ColumnDef::new(PackageLicense::PackageId)
.integer()
.not_null(),
)
.col(
ColumnDef::new(PackageLicense::Name)
.string_len(255)
.not_null(),
)
.primary_key(
Index::create()
.col(PackageLicense::PackageId)
.col(PackageLicense::Name),
)
.foreign_key(
ForeignKey::create()
.name("fk-package_license-package_id")
.from(PackageLicense::Table, PackageLicense::PackageId)
.to(Package::Table, Package::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(PackageGroup::Table)
.col(ColumnDef::new(PackageGroup::PackageId).integer().not_null())
.col(
ColumnDef::new(PackageGroup::Name)
.string_len(255)
.not_null(),
)
.primary_key(
Index::create()
.col(PackageGroup::PackageId)
.col(PackageGroup::Name),
)
.foreign_key(
ForeignKey::create()
.name("fk-package_group-package_id")
.from(PackageGroup::Table, PackageGroup::PackageId)
.to(Package::Table, Package::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(PackageRelated::Table)
.col(
ColumnDef::new(PackageRelated::PackageId)
.integer()
.not_null(),
)
.col(ColumnDef::new(PackageRelated::Type).integer().not_null())
.col(
ColumnDef::new(PackageRelated::Name)
.string_len(255)
.not_null(),
)
.primary_key(
Index::create()
.col(PackageRelated::PackageId)
.col(PackageRelated::Type)
.col(PackageRelated::Name),
)
.foreign_key(
ForeignKey::create()
.name("fk-package_depends-package_id")
.from(PackageRelated::Table, PackageRelated::PackageId)
.to(Package::Table, Package::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(PackageFile::Table)
.col(ColumnDef::new(PackageFile::PackageId).integer().not_null())
.col(ColumnDef::new(PackageFile::Path).string_len(255).not_null())
.primary_key(
Index::create()
.col(PackageFile::PackageId)
.col(PackageFile::Path),
)
.foreign_key(
ForeignKey::create()
.name("fk-package_file-package_id")
.from(PackageFile::Table, PackageFile::PackageId)
.to(Package::Table, Package::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
Ok(())
}
// Define how to rollback this migration: Drop the Bakery table.
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(PackageLicense::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(PackageGroup::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(PackageRelated::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(PackageFile::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Package::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Repo::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Distro::Table).to_owned())
.await
}
}
#[derive(Iden)]
pub enum Distro {
Table,
Id,
Name,
Description,
}
#[derive(Iden)]
pub enum Repo {
Table,
Id,
DistroId,
Name,
Description,
}
#[derive(Iden)]
pub enum Package {
Table,
Id,
RepoId,
Name,
Base,
Version,
Description,
Size,
CSize,
Url,
Arch,
BuildDate,
Packager,
PgpSig,
PgpSigSize,
Sha256Sum,
Compression,
State,
}
#[derive(Iden)]
pub enum PackageLicense {
Table,
PackageId,
Name,
}
#[derive(Iden)]
pub enum PackageGroup {
Table,
PackageId,
Name,
}
#[derive(Iden)]
pub enum PackageRelated {
Table,
PackageId,
Type,
Name,
}
#[derive(Iden)]
pub enum PackageFile {
Table,
PackageId,
Path,
}

View File

@ -1,12 +0,0 @@
use sea_orm_migration::prelude::*;
pub struct Migrator;
mod m20230730_000001_create_repo_tables;
#[async_trait::async_trait]
impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![Box::new(m20230730_000001_create_repo_tables::Migration)]
}
}

View File

@ -1,53 +1,19 @@
pub mod entities;
pub mod query;
use crate::config::DbConfig;
pub use entities::{prelude::*, *};
use sea_orm::{ConnectionTrait, Database, DbConn, DeriveActiveEnum, EnumIter};
use serde::{Deserialize, Serialize};
use sea_orm::{ConnectionTrait, Database, DbConn};
use serde::Serialize;
type Result<T> = std::result::Result<T, sea_orm::DbErr>;
#[derive(EnumIter, DeriveActiveEnum, Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
#[sea_orm(rs_type = "i32", db_type = "Integer")]
#[serde(rename_all = "lowercase")]
pub enum PackageRelatedEnum {
#[sea_orm(num_value = 0)]
Conflicts,
#[sea_orm(num_value = 1)]
Replaces,
#[sea_orm(num_value = 2)]
Provides,
#[sea_orm(num_value = 3)]
Depend,
#[sea_orm(num_value = 4)]
Makedepend,
#[sea_orm(num_value = 5)]
Checkdepend,
#[sea_orm(num_value = 6)]
Optdepend,
}
#[derive(EnumIter, DeriveActiveEnum, Deserialize, Serialize, PartialEq, Eq, Clone, Debug)]
#[sea_orm(rs_type = "i32", db_type = "Integer")]
pub enum PackageState {
#[sea_orm(num_value = 0)]
PendingCommit,
#[sea_orm(num_value = 1)]
Committed,
#[sea_orm(num_value = 2)]
PendingDeletion,
}
#[derive(Serialize)]
pub struct FullPackage {
#[serde(flatten)]
entry: package::Model,
entry: entity::package::Model,
licenses: Vec<String>,
groups: Vec<String>,
related: Vec<(PackageRelatedEnum, String)>,
related: Vec<(entity::PackageRelatedEnum, String)>,
files: Vec<String>,
}

View File

@ -1,6 +1,11 @@
use crate::db::*;
use crate::db::Result;
use sea_orm::{sea_query::IntoCondition, *};
use entity::{distro, prelude::Distro};
use sea_orm::{
sea_query::IntoCondition, ActiveModelTrait, ColumnTrait, Condition, DbConn, EntityTrait,
NotSet, PaginatorTrait, QueryFilter, QueryOrder, Set,
};
use serde::Deserialize;
#[derive(Deserialize)]
pub struct Filter {
@ -21,7 +26,7 @@ pub async fn page(
per_page: u64,
page: u64,
filter: Filter,
) -> Result<Vec<distro::Model>> {
) -> Result<Vec<entity::distro::Model>> {
let paginator = Distro::find()
.filter(filter)
.order_by_asc(distro::Column::Id)

View File

@ -1,7 +1,16 @@
use crate::db::{self, *};
use crate::db::{FullPackage, Result};
use sea_orm::{sea_query::IntoCondition, *};
use sea_query::{Alias, Expr, Query, SelectStatement};
use entity::{
package, package_file, package_group, package_license, package_related,
prelude::{Package, PackageFile, PackageGroup, PackageLicense, PackageRelated},
PackageRelatedEnum, PackageState,
};
use sea_orm::{
sea_query::IntoCondition, ActiveModelTrait, ColumnTrait, Condition, ConnectionTrait, DbConn,
DeleteResult, EntityTrait, FromQueryResult, Iterable, ModelTrait, NotSet, PaginatorTrait,
QueryFilter, QuerySelect, SelectModel, SelectorRaw, Set, TransactionTrait,
};
use sea_query::{Alias, Expr, JoinType, Order, Query, SelectStatement};
use serde::Deserialize;
/// How many fields may be inserted at once into the database.
@ -31,22 +40,22 @@ pub async fn page(
) -> crate::Result<Vec<package::Model>> {
let p2 = Alias::new("p2");
let query = Query::select()
.columns(db::package::Column::iter().map(|c| (db::package::Entity, c)))
.from(db::package::Entity)
.columns(package::Column::iter().map(|c| (package::Entity, c)))
.from(package::Entity)
.join_subquery(
JoinType::InnerJoin,
max_pkg_ids_query(true),
p2.clone(),
Expr::col((db::package::Entity, db::package::Column::Id))
Expr::col((package::Entity, package::Column::Id))
.eq(Expr::col((p2.clone(), Alias::new("max_id")))),
)
.cond_where(filter)
.order_by((db::package::Entity, db::package::Column::Id), Order::Asc)
.order_by((package::Entity, package::Column::Id), Order::Asc)
.to_owned();
let builder = conn.get_database_backend();
let sql = builder.build(&query);
Ok(db::Package::find()
Ok(Package::find()
.from_raw_sql(sql)
.paginate(conn, per_page)
.fetch_page(page)
@ -213,7 +222,7 @@ pub async fn full(conn: &DbConn, id: i32) -> Result<Option<FullPackage>> {
.into_tuple()
.all(conn)
.await?;
let related: Vec<(db::PackageRelatedEnum, String)> = entry
let related: Vec<(PackageRelatedEnum, String)> = entry
.find_related(PackageRelated)
.select_only()
.columns([package_related::Column::Type, package_related::Column::Name])
@ -248,22 +257,22 @@ pub struct PkgToRemove {
fn max_pkg_ids_query(committed: bool) -> SelectStatement {
let mut query = Query::select()
.from(db::package::Entity)
.from(package::Entity)
.columns([
db::package::Column::RepoId,
db::package::Column::Arch,
db::package::Column::Name,
package::Column::RepoId,
package::Column::Arch,
package::Column::Name,
])
.expr_as(db::package::Column::Id.max(), Alias::new("max_id"))
.expr_as(package::Column::Id.max(), Alias::new("max_id"))
.group_by_columns([
db::package::Column::RepoId,
db::package::Column::Arch,
db::package::Column::Name,
package::Column::RepoId,
package::Column::Arch,
package::Column::Name,
])
.to_owned();
if committed {
query.cond_where(db::package::Column::State.eq(db::PackageState::Committed));
query.cond_where(package::Column::State.eq(PackageState::Committed));
}
query
@ -278,47 +287,44 @@ pub fn pkgs_to_sync(
) -> SelectorRaw<SelectModel<package::Model>> {
let (p1, p2) = (Alias::new("p1"), Alias::new("p2"));
let query = Query::select()
.columns(db::package::Column::iter().map(|c| (p1.clone(), c)))
.from_as(db::package::Entity, p1.clone())
.columns(package::Column::iter().map(|c| (p1.clone(), c)))
.from_as(package::Entity, p1.clone())
.join_subquery(
JoinType::InnerJoin,
max_pkg_ids_query(false),
p2.clone(),
Expr::col((p1.clone(), db::package::Column::Id))
Expr::col((p1.clone(), package::Column::Id))
.eq(Expr::col((p2.clone(), Alias::new("max_id")))),
)
.cond_where(
Condition::all()
.add(Expr::col((p1.clone(), db::package::Column::RepoId)).eq(repo))
.add(Expr::col((p1.clone(), package::Column::RepoId)).eq(repo))
.add(Expr::col((p1.clone(), package::Column::Arch)).is_in([arch, crate::ANY_ARCH]))
.add(
Expr::col((p1.clone(), db::package::Column::Arch))
.is_in([arch, crate::ANY_ARCH]),
)
.add(
Expr::col((p1.clone(), db::package::Column::State))
.ne(db::PackageState::PendingDeletion),
Expr::col((p1.clone(), package::Column::State))
.ne(PackageState::PendingDeletion),
),
)
.to_owned();
let builder = conn.get_database_backend();
let sql = builder.build(&query);
db::Package::find().from_raw_sql(sql)
Package::find().from_raw_sql(sql)
}
fn stale_pkgs_query(include_repo: bool) -> SelectStatement {
let (p1, p2) = (Alias::new("p1"), Alias::new("p2"));
let mut query = Query::select()
.from_as(db::package::Entity, p1.clone())
.from_as(package::Entity, p1.clone())
.to_owned();
if include_repo {
query.columns([
(p1.clone(), db::package::Column::RepoId),
(p1.clone(), db::package::Column::Id),
(p1.clone(), package::Column::RepoId),
(p1.clone(), package::Column::Id),
]);
} else {
query.column((p1.clone(), db::package::Column::Id));
query.column((p1.clone(), package::Column::Id));
}
// We left join on the max pkgs query because a repository that has all its packages set to
@ -331,27 +337,27 @@ fn stale_pkgs_query(include_repo: bool) -> SelectStatement {
p2.clone(),
Condition::all()
.add(
Expr::col((p1.clone(), db::package::Column::RepoId))
.eq(Expr::col((p2.clone(), db::package::Column::RepoId))),
Expr::col((p1.clone(), package::Column::RepoId))
.eq(Expr::col((p2.clone(), package::Column::RepoId))),
)
.add(
Expr::col((p1.clone(), db::package::Column::Arch))
.eq(Expr::col((p2.clone(), db::package::Column::Arch))),
Expr::col((p1.clone(), package::Column::Arch))
.eq(Expr::col((p2.clone(), package::Column::Arch))),
)
.add(
Expr::col((p1.clone(), db::package::Column::Name))
.eq(Expr::col((p2.clone(), db::package::Column::Name))),
Expr::col((p1.clone(), package::Column::Name))
.eq(Expr::col((p2.clone(), package::Column::Name))),
),
)
.cond_where(
Condition::any()
.add(
Expr::col((p1.clone(), db::package::Column::Id))
Expr::col((p1.clone(), package::Column::Id))
.lt(Expr::col((p2.clone(), Alias::new("max_id")))),
)
.add(
Expr::col((p1.clone(), db::package::Column::State))
.eq(db::PackageState::PendingDeletion),
Expr::col((p1.clone(), package::Column::State))
.eq(PackageState::PendingDeletion),
),
);
@ -367,9 +373,9 @@ pub fn stale_pkgs(conn: &DbConn) -> SelectorRaw<SelectModel<PkgToRemove>> {
}
pub async fn delete_stale_pkgs(conn: &DbConn, max_id: i32) -> crate::Result<()> {
Ok(db::Package::delete_many()
.filter(db::package::Column::Id.lte(max_id))
.filter(db::package::Column::Id.in_subquery(stale_pkgs_query(false)))
Ok(Package::delete_many()
.filter(package::Column::Id.lte(max_id))
.filter(package::Column::Id.in_subquery(stale_pkgs_query(false)))
.exec(conn)
.await
.map(|_| ())?)

View File

@ -1,6 +1,11 @@
use crate::db::*;
use crate::db::Result;
use sea_orm::{sea_query::IntoCondition, *};
use entity::{prelude::Repo, repo};
use sea_orm::{
sea_query::IntoCondition, ActiveModelTrait, ColumnTrait, Condition, DbConn, EntityTrait,
NotSet, PaginatorTrait, QueryFilter, QueryOrder, Set,
};
use serde::Deserialize;
#[derive(Deserialize)]
pub struct Filter {

View File

@ -99,10 +99,10 @@ impl Actor {
if let Some(_guard) = repos.get(&repo).map(|n| n.1.lock()) {
let archs: Vec<String> = self.rt.block_on(
db::Package::find()
.filter(db::package::Column::RepoId.eq(repo))
entity::Package::find()
.filter(entity::package::Column::RepoId.eq(repo))
.select_only()
.column(db::package::Column::Arch)
.column(entity::package::Column::Arch)
.distinct()
.into_tuple()
.all(&self.state.conn),
@ -171,12 +171,12 @@ impl Actor {
// Update the state for the newly committed packages
self.rt.block_on(
db::Package::update_many()
entity::Package::update_many()
.col_expr(
db::package::Column::State,
Expr::value(db::PackageState::Committed),
entity::package::Column::State,
Expr::value(entity::PackageState::Committed),
)
.filter(db::package::Column::Id.is_in(committed_ids))
.filter(entity::package::Column::Id.is_in(committed_ids))
.exec(&self.state.conn),
)?;

View File

@ -1,4 +1,3 @@
use crate::db;
use std::{
io::Write,
path::{Path, PathBuf},
@ -69,7 +68,7 @@ impl RepoArchivesWriter {
Ok(ar.append_path(&mut ar_entry, src_path)?)
}
pub fn append_pkg(&mut self, pkg: &db::package::Model) -> crate::Result<()> {
pub fn append_pkg(&mut self, pkg: &entity::package::Model) -> crate::Result<()> {
self.write_desc(&self.tmp_paths[0], pkg)?;
self.write_files(&self.tmp_paths[1], pkg)?;
@ -85,7 +84,11 @@ impl RepoArchivesWriter {
}
/// Generate a "files" archive entry for the package in the given path
fn write_files(&self, path: impl AsRef<Path>, pkg: &db::package::Model) -> crate::Result<()> {
fn write_files(
&self,
path: impl AsRef<Path>,
pkg: &entity::package::Model,
) -> crate::Result<()> {
let mut f = std::io::BufWriter::new(std::fs::File::create(path)?);
writeln!(f, "%FILES%")?;
@ -93,7 +96,7 @@ impl RepoArchivesWriter {
let (tx, mut rx) = mpsc::channel(1);
let conn = self.conn.clone();
let query = pkg.find_related(db::PackageFile);
let query = pkg.find_related(entity::prelude::PackageFile);
self.rt.spawn(async move {
match query.stream(&conn).await {
@ -121,7 +124,11 @@ impl RepoArchivesWriter {
Ok(())
}
fn write_desc(&self, path: impl AsRef<Path>, pkg: &db::package::Model) -> crate::Result<()> {
fn write_desc(
&self,
path: impl AsRef<Path>,
pkg: &entity::package::Model,
) -> crate::Result<()> {
let mut f = std::io::BufWriter::new(std::fs::File::create(path)?);
let filename = format!(
@ -147,9 +154,9 @@ impl RepoArchivesWriter {
}
let groups: Vec<String> = self.rt.block_on(
pkg.find_related(db::PackageGroup)
pkg.find_related(entity::prelude::PackageGroup)
.select_only()
.column(db::package_group::Column::Name)
.column(entity::package_group::Column::Name)
.into_tuple()
.all(&self.conn),
)?;
@ -165,9 +172,9 @@ impl RepoArchivesWriter {
}
let licenses: Vec<String> = self.rt.block_on(
pkg.find_related(db::PackageLicense)
pkg.find_related(entity::prelude::PackageLicense)
.select_only()
.column(db::package_license::Column::Name)
.column(entity::package_license::Column::Name)
.into_tuple()
.all(&self.conn),
)?;
@ -186,21 +193,21 @@ impl RepoArchivesWriter {
}
let related = [
("REPLACES", db::PackageRelatedEnum::Replaces),
("CONFLICTS", db::PackageRelatedEnum::Conflicts),
("PROVIDES", db::PackageRelatedEnum::Provides),
("DEPENDS", db::PackageRelatedEnum::Depend),
("OPTDEPENDS", db::PackageRelatedEnum::Optdepend),
("MAKEDEPENDS", db::PackageRelatedEnum::Makedepend),
("CHECKDEPENDS", db::PackageRelatedEnum::Checkdepend),
("REPLACES", entity::PackageRelatedEnum::Replaces),
("CONFLICTS", entity::PackageRelatedEnum::Conflicts),
("PROVIDES", entity::PackageRelatedEnum::Provides),
("DEPENDS", entity::PackageRelatedEnum::Depend),
("OPTDEPENDS", entity::PackageRelatedEnum::Optdepend),
("MAKEDEPENDS", entity::PackageRelatedEnum::Makedepend),
("CHECKDEPENDS", entity::PackageRelatedEnum::Checkdepend),
];
for (key, attr) in related.into_iter() {
let items: Vec<String> = self.rt.block_on(
pkg.find_related(db::PackageRelated)
.filter(db::package_related::Column::Type.eq(attr))
pkg.find_related(entity::prelude::PackageRelated)
.filter(entity::package_related::Column::Type.eq(attr))
.select_only()
.column(db::package_related::Column::Name)
.column(entity::package_related::Column::Name)
.into_tuple()
.all(&self.conn),
)?;

View File

@ -1,5 +1,4 @@
use super::{Command, SharedState};
use crate::db;
use std::{
path::PathBuf,
@ -34,10 +33,10 @@ impl Handle {
pub async fn get_or_create_repo(&self, distro: &str, repo: &str) -> crate::Result<i32> {
let mut repos = self.state.repos.write().await;
let distro_id: Option<i32> = db::Distro::find()
.filter(db::distro::Column::Name.eq(distro))
let distro_id: Option<i32> = entity::Distro::find()
.filter(entity::distro::Column::Name.eq(distro))
.select_only()
.column(db::distro::Column::Id)
.column(entity::distro::Column::Id)
.into_tuple()
.one(&self.state.conn)
.await?;
@ -45,7 +44,7 @@ impl Handle {
let distro_id = if let Some(id) = distro_id {
id
} else {
let new_distro = db::distro::ActiveModel {
let new_distro = entity::distro::ActiveModel {
id: NotSet,
name: Set(distro.to_string()),
description: NotSet,
@ -54,11 +53,11 @@ impl Handle {
new_distro.insert(&self.state.conn).await?.id
};
let repo_id: Option<i32> = db::Repo::find()
.filter(db::repo::Column::DistroId.eq(distro_id))
.filter(db::repo::Column::Name.eq(repo))
let repo_id: Option<i32> = entity::Repo::find()
.filter(entity::repo::Column::DistroId.eq(distro_id))
.filter(entity::repo::Column::Name.eq(repo))
.select_only()
.column(db::repo::Column::Id)
.column(entity::repo::Column::Id)
.into_tuple()
.one(&self.state.conn)
.await?;
@ -66,7 +65,7 @@ impl Handle {
let repo_id = if let Some(id) = repo_id {
id
} else {
let new_repo = db::repo::ActiveModel {
let new_repo = entity::repo::ActiveModel {
id: NotSet,
distro_id: Set(distro_id),
name: Set(repo.to_string()),
@ -84,12 +83,12 @@ impl Handle {
}
pub async fn get_repo(&self, distro: &str, repo: &str) -> crate::Result<Option<i32>> {
Ok(db::Repo::find()
.find_also_related(db::Distro)
Ok(entity::Repo::find()
.find_also_related(entity::Distro)
.filter(
Condition::all()
.add(db::repo::Column::Name.eq(repo))
.add(db::distro::Column::Name.eq(distro)),
.add(entity::repo::Column::Name.eq(repo))
.add(entity::distro::Column::Name.eq(distro)),
)
.one(&self.state.conn)
.await
@ -98,7 +97,9 @@ impl Handle {
pub async fn remove_repo(&self, repo: i32) -> crate::Result<()> {
self.state.repos.write().await.remove(&repo);
db::Repo::delete_by_id(repo).exec(&self.state.conn).await?;
entity::Repo::delete_by_id(repo)
.exec(&self.state.conn)
.await?;
let _ = tokio::fs::remove_dir_all(self.state.repos_dir.join(repo.to_string())).await;
Ok(())
@ -108,15 +109,15 @@ impl Handle {
/// packages with the given architecture as "pending deletion", before performing a manual sync
/// & removal of stale packages.
pub async fn remove_repo_arch(&self, repo: i32, arch: &str) -> crate::Result<()> {
db::Package::update_many()
entity::Package::update_many()
.col_expr(
db::package::Column::State,
Expr::value(db::PackageState::PendingDeletion),
entity::package::Column::State,
Expr::value(entity::PackageState::PendingDeletion),
)
.filter(
Condition::all()
.add(db::package::Column::RepoId.eq(repo))
.add(db::package::Column::Arch.eq(arch)),
.add(entity::package::Column::RepoId.eq(repo))
.add(entity::package::Column::Arch.eq(arch)),
)
.exec(&self.state.conn)
.await?;

View File

@ -6,8 +6,6 @@ pub mod package;
pub use actor::Actor;
pub use handle::Handle;
use crate::db;
use std::{
collections::HashMap,
path::{Path, PathBuf},
@ -67,9 +65,9 @@ pub fn start(
let mut repos = HashMap::new();
let repo_ids: Vec<i32> = rt.block_on(
db::Repo::find()
entity::prelude::Repo::find()
.select_only()
.column(db::repo::Column::Id)
.column(entity::repo::Column::Id)
.into_tuple()
.all(&conn),
)?;

View File

@ -1,5 +1,3 @@
use crate::db::entities::package;
use std::{
fmt, fs,
io::{self, BufRead, BufReader, Read},
@ -11,7 +9,6 @@ use libarchive::{
read::{Archive, Builder},
Entry, ReadFilter,
};
use sea_orm::ActiveValue::Set;
#[derive(Debug, Clone)]
pub struct Package {
@ -194,26 +191,3 @@ impl Package {
)
}
}
impl From<Package> for package::ActiveModel {
fn from(pkg: Package) -> Self {
let info = pkg.info;
package::ActiveModel {
base: Set(info.base),
name: Set(info.name),
version: Set(info.version),
arch: Set(info.arch),
size: Set(info.size),
c_size: Set(info.csize),
description: Set(info.description),
url: Set(info.url),
build_date: Set(info.build_date),
packager: Set(info.packager),
pgp_sig: Set(info.pgpsig),
pgp_sig_size: Set(info.pgpsigsize),
sha256_sum: Set(info.sha256sum),
..Default::default()
}
}
}

View File

@ -21,7 +21,7 @@ async fn get_repos(
State(global): State<crate::Global>,
Query(pagination): Query<pagination::Query>,
Query(filter): Query<db::query::repo::Filter>,
) -> crate::Result<Json<PaginatedResponse<db::repo::Model>>> {
) -> crate::Result<Json<PaginatedResponse<entity::repo::Model>>> {
let items =
db::query::repo::page(&global.db, pagination.per_page, pagination.page - 1, filter).await?;
@ -31,7 +31,7 @@ async fn get_repos(
async fn get_single_repo(
State(global): State<crate::Global>,
Path(id): Path<i32>,
) -> crate::Result<Json<db::repo::Model>> {
) -> crate::Result<Json<entity::repo::Model>> {
let repo = db::query::repo::by_id(&global.db, id)
.await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
@ -43,7 +43,7 @@ async fn get_packages(
State(global): State<crate::Global>,
Query(pagination): Query<pagination::Query>,
Query(filter): Query<db::query::package::Filter>,
) -> crate::Result<Json<PaginatedResponse<db::package::Model>>> {
) -> crate::Result<Json<PaginatedResponse<entity::package::Model>>> {
let items =
db::query::package::page(&global.db, pagination.per_page, pagination.page - 1, filter)
.await?;