385 lines
12 KiB
Rust
385 lines
12 KiB
Rust
use crate::db::{self, *};
|
|
|
|
use futures::Stream;
|
|
use sea_orm::{sea_query::IntoCondition, *};
|
|
use sea_query::{Alias, Asterisk, Expr, Query, SelectStatement};
|
|
use serde::Deserialize;
|
|
|
|
#[derive(Deserialize)]
|
|
pub struct Filter {
|
|
repo: Option<i32>,
|
|
arch: Option<String>,
|
|
name: Option<String>,
|
|
}
|
|
|
|
impl IntoCondition for Filter {
|
|
fn into_condition(self) -> Condition {
|
|
Condition::all()
|
|
.add_option(self.repo.map(|repo| package::Column::RepoId.eq(repo)))
|
|
.add_option(self.arch.map(|arch| package::Column::Arch.eq(arch)))
|
|
.add_option(
|
|
self.name
|
|
.map(|name| package::Column::Name.like(format!("%{}%", name))),
|
|
)
|
|
}
|
|
}
|
|
|
|
pub async fn page(
|
|
conn: &DbConn,
|
|
per_page: u64,
|
|
page: u64,
|
|
filter: Filter,
|
|
) -> super::Result<(u64, Vec<package::Model>)> {
|
|
let paginator = Package::find()
|
|
.filter(filter)
|
|
.order_by_asc(package::Column::Id)
|
|
.paginate(conn, per_page);
|
|
let packages = paginator.fetch_page(page).await?;
|
|
let total_pages = paginator.num_pages().await?;
|
|
|
|
Ok((total_pages, packages))
|
|
}
|
|
|
|
pub async fn by_id(conn: &DbConn, id: i32) -> Result<Option<package::Model>> {
|
|
package::Entity::find_by_id(id).one(conn).await
|
|
}
|
|
|
|
pub async fn by_fields(
|
|
conn: &DbConn,
|
|
repo_id: i32,
|
|
arch: &str,
|
|
name: &str,
|
|
version: Option<&str>,
|
|
compression: Option<&str>,
|
|
) -> Result<Option<package::Model>> {
|
|
let cond = Condition::all()
|
|
.add(package::Column::RepoId.eq(repo_id))
|
|
.add(package::Column::Name.eq(name))
|
|
.add(package::Column::Arch.eq(arch))
|
|
.add_option(version.map(|version| package::Column::Version.eq(version)))
|
|
.add_option(compression.map(|compression| package::Column::Compression.eq(compression)));
|
|
|
|
Package::find().filter(cond).one(conn).await
|
|
}
|
|
|
|
pub async fn delete_with_arch(conn: &DbConn, repo_id: i32, arch: &str) -> Result<DeleteResult> {
|
|
Package::delete_many()
|
|
.filter(package::Column::RepoId.eq(repo_id))
|
|
.filter(package::Column::Arch.eq(arch))
|
|
.exec(conn)
|
|
.await
|
|
}
|
|
|
|
pub async fn insert(
|
|
conn: &DbConn,
|
|
repo_id: i32,
|
|
pkg: crate::repo::package::Package,
|
|
) -> Result<package::Model> {
|
|
let info = pkg.info;
|
|
|
|
// Doing this manually is not the recommended way, but the generic error type of the
|
|
// transaction function didn't play well with my current error handling
|
|
let txn = conn.begin().await?;
|
|
|
|
let model = package::ActiveModel {
|
|
id: NotSet,
|
|
repo_id: Set(repo_id),
|
|
base: Set(info.base),
|
|
name: Set(info.name),
|
|
version: Set(info.version),
|
|
arch: Set(info.arch),
|
|
size: Set(info.size),
|
|
c_size: Set(info.csize),
|
|
description: Set(info.description),
|
|
url: Set(info.url),
|
|
build_date: Set(info.build_date),
|
|
packager: Set(info.packager),
|
|
pgp_sig: Set(info.pgpsig),
|
|
pgp_sig_size: Set(info.pgpsigsize),
|
|
sha256_sum: Set(info.sha256sum),
|
|
compression: Set(pkg.compression.extension().unwrap().to_string()),
|
|
state: Set(PackageState::PendingCommit),
|
|
};
|
|
|
|
let pkg_entry = model.insert(&txn).await?;
|
|
|
|
// Insert all the related tables
|
|
PackageLicense::insert_many(info.licenses.iter().map(|s| package_license::ActiveModel {
|
|
package_id: Set(pkg_entry.id),
|
|
name: Set(s.to_string()),
|
|
}))
|
|
.on_empty_do_nothing()
|
|
.exec(&txn)
|
|
.await?;
|
|
|
|
PackageGroup::insert_many(info.groups.iter().map(|s| package_group::ActiveModel {
|
|
package_id: Set(pkg_entry.id),
|
|
name: Set(s.to_string()),
|
|
}))
|
|
.on_empty_do_nothing()
|
|
.exec(&txn)
|
|
.await?;
|
|
|
|
let related = info
|
|
.conflicts
|
|
.iter()
|
|
.map(|s| (PackageRelatedEnum::Conflicts, s))
|
|
.chain(
|
|
info.replaces
|
|
.iter()
|
|
.map(|s| (PackageRelatedEnum::Replaces, s)),
|
|
)
|
|
.chain(
|
|
info.provides
|
|
.iter()
|
|
.map(|s| (PackageRelatedEnum::Provides, s)),
|
|
)
|
|
.chain(info.depends.iter().map(|s| (PackageRelatedEnum::Depend, s)))
|
|
.chain(
|
|
info.makedepends
|
|
.iter()
|
|
.map(|s| (PackageRelatedEnum::Makedepend, s)),
|
|
)
|
|
.chain(
|
|
info.checkdepends
|
|
.iter()
|
|
.map(|s| (PackageRelatedEnum::Checkdepend, s)),
|
|
)
|
|
.chain(
|
|
info.optdepends
|
|
.iter()
|
|
.map(|s| (PackageRelatedEnum::Optdepend, s)),
|
|
);
|
|
|
|
PackageRelated::insert_many(related.map(|(t, s)| package_related::ActiveModel {
|
|
package_id: Set(pkg_entry.id),
|
|
r#type: Set(t),
|
|
name: Set(s.to_string()),
|
|
}))
|
|
.on_empty_do_nothing()
|
|
.exec(&txn)
|
|
.await?;
|
|
|
|
PackageFile::insert_many(pkg.files.iter().map(|s| package_file::ActiveModel {
|
|
package_id: Set(pkg_entry.id),
|
|
path: Set(s.display().to_string()),
|
|
}))
|
|
.on_empty_do_nothing()
|
|
.exec(&txn)
|
|
.await?;
|
|
|
|
txn.commit().await?;
|
|
|
|
Ok(pkg_entry)
|
|
}
|
|
|
|
pub async fn full(conn: &DbConn, id: i32) -> Result<Option<FullPackage>> {
|
|
if let Some(entry) = by_id(conn, id).await? {
|
|
let licenses: Vec<String> = entry
|
|
.find_related(PackageLicense)
|
|
.select_only()
|
|
.column(package_license::Column::Name)
|
|
.into_tuple()
|
|
.all(conn)
|
|
.await?;
|
|
let groups: Vec<String> = entry
|
|
.find_related(PackageGroup)
|
|
.select_only()
|
|
.column(package_group::Column::Name)
|
|
.into_tuple()
|
|
.all(conn)
|
|
.await?;
|
|
let related: Vec<(db::PackageRelatedEnum, String)> = entry
|
|
.find_related(PackageRelated)
|
|
.select_only()
|
|
.columns([package_related::Column::Type, package_related::Column::Name])
|
|
.into_tuple()
|
|
.all(conn)
|
|
.await?;
|
|
let files: Vec<String> = entry
|
|
.find_related(PackageFile)
|
|
.select_only()
|
|
.column(package_file::Column::Path)
|
|
.into_tuple()
|
|
.all(conn)
|
|
.await?;
|
|
|
|
Ok(Some(FullPackage {
|
|
entry,
|
|
licenses,
|
|
groups,
|
|
related,
|
|
files,
|
|
}))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
#[derive(FromQueryResult)]
|
|
pub struct PkgToRemove {
|
|
pub repo_id: i32,
|
|
pub id: i32,
|
|
}
|
|
|
|
fn max_pkg_ids_query() -> SelectStatement {
|
|
Query::select()
|
|
.from(db::package::Entity)
|
|
.columns([
|
|
db::package::Column::RepoId,
|
|
db::package::Column::Arch,
|
|
db::package::Column::Name,
|
|
])
|
|
.expr_as(db::package::Column::Id.max(), Alias::new("max_id"))
|
|
.group_by_columns([
|
|
db::package::Column::RepoId,
|
|
db::package::Column::Arch,
|
|
db::package::Column::Name,
|
|
])
|
|
.cond_where(
|
|
Condition::all().add(db::package::Column::State.eq(db::PackageState::Committed)),
|
|
)
|
|
.to_owned()
|
|
}
|
|
|
|
pub fn pkgs_to_sync(
|
|
conn: &DbConn,
|
|
repo: i32,
|
|
arch: &str,
|
|
) -> SelectorRaw<SelectModel<package::Model>> {
|
|
let max_id_query = Query::select()
|
|
.columns([
|
|
db::package::Column::RepoId,
|
|
db::package::Column::Arch,
|
|
db::package::Column::Name,
|
|
])
|
|
.expr_as(db::package::Column::Id.max(), Alias::new("max_id"))
|
|
.from(db::package::Entity)
|
|
.group_by_columns([
|
|
db::package::Column::RepoId,
|
|
db::package::Column::Arch,
|
|
db::package::Column::Name,
|
|
])
|
|
.to_owned();
|
|
|
|
let (p1, p2) = (Alias::new("p1"), Alias::new("p2"));
|
|
let query = Query::select()
|
|
.column((p1.clone(), Asterisk))
|
|
.from_as(db::package::Entity, p1.clone())
|
|
.join_subquery(
|
|
JoinType::InnerJoin,
|
|
max_id_query,
|
|
p2.clone(),
|
|
Expr::col((p1.clone(), db::package::Column::Id))
|
|
.eq(Expr::col((p2.clone(), Alias::new("max_id")))),
|
|
)
|
|
.cond_where(
|
|
Condition::all()
|
|
.add(Expr::col((p1.clone(), db::package::Column::RepoId)).eq(repo))
|
|
.add(
|
|
Expr::col((p1.clone(), db::package::Column::State))
|
|
.ne(db::PackageState::PendingDeletion),
|
|
)
|
|
.add(
|
|
Expr::col((p1.clone(), db::package::Column::Arch))
|
|
.is_in([arch, crate::ANY_ARCH]),
|
|
),
|
|
)
|
|
.to_owned();
|
|
let builder = conn.get_database_backend();
|
|
let sql = builder.build(&query);
|
|
|
|
db::Package::find().from_raw_sql(sql)
|
|
}
|
|
|
|
fn stale_pkgs_query(include_repo: bool) -> SelectStatement {
|
|
// In each repository, only one version of a package can exist for any given arch. Because ids
|
|
// are monotonically increasing, we know that the row that represents the actual package
|
|
// currently in the repository is the row with the largest id whose state is "committed". This
|
|
// query finds this id for each (repo, arch, name) tuple.
|
|
let mut max_id_query = Query::select();
|
|
max_id_query
|
|
.from(db::package::Entity)
|
|
.columns([
|
|
db::package::Column::RepoId,
|
|
db::package::Column::Arch,
|
|
db::package::Column::Name,
|
|
])
|
|
.expr_as(db::package::Column::Id.max(), Alias::new("max_id"))
|
|
.group_by_columns([
|
|
db::package::Column::RepoId,
|
|
db::package::Column::Arch,
|
|
db::package::Column::Name,
|
|
])
|
|
.cond_where(
|
|
Condition::all().add(db::package::Column::State.eq(db::PackageState::Committed)),
|
|
);
|
|
|
|
let (p1, p2) = (Alias::new("p1"), Alias::new("p2"));
|
|
let mut query = Query::select();
|
|
|
|
// We then perform an inner join between the max id query above and the package table, where we
|
|
// filter on rows whose id is less than their respective package's max id or whose state is set
|
|
// to "pending deletion". This gives us all rows in the database that correspond to packages
|
|
// that are no longer needed, and can thus be removed.
|
|
query.from_as(db::package::Entity, p1.clone());
|
|
|
|
if include_repo {
|
|
query.columns([
|
|
(p1.clone(), db::package::Column::RepoId),
|
|
(p1.clone(), db::package::Column::Id),
|
|
]);
|
|
} else {
|
|
query.column((p1.clone(), db::package::Column::Id));
|
|
}
|
|
|
|
query
|
|
.join_subquery(
|
|
JoinType::InnerJoin,
|
|
max_id_query,
|
|
p2.clone(),
|
|
Condition::all()
|
|
.add(
|
|
Expr::col((p1.clone(), db::package::Column::RepoId))
|
|
.eq(Expr::col((p2.clone(), db::package::Column::RepoId))),
|
|
)
|
|
.add(
|
|
Expr::col((p1.clone(), db::package::Column::Arch))
|
|
.eq(Expr::col((p2.clone(), db::package::Column::Arch))),
|
|
)
|
|
.add(
|
|
Expr::col((p1.clone(), db::package::Column::Name))
|
|
.eq(Expr::col((p2.clone(), db::package::Column::Name))),
|
|
),
|
|
)
|
|
.cond_where(
|
|
Condition::any()
|
|
.add(
|
|
Expr::col((p1.clone(), db::package::Column::Id))
|
|
.lt(Expr::col((p2.clone(), Alias::new("max_id")))),
|
|
)
|
|
.add(
|
|
Expr::col((p1.clone(), db::package::Column::Id))
|
|
.eq(db::PackageState::PendingDeletion),
|
|
),
|
|
)
|
|
.to_owned()
|
|
}
|
|
|
|
pub fn stale_pkgs(conn: &DbConn) -> SelectorRaw<SelectModel<PkgToRemove>> {
|
|
let query = stale_pkgs_query(true);
|
|
let builder = conn.get_database_backend();
|
|
let sql = builder.build(&query);
|
|
|
|
PkgToRemove::find_by_statement(sql)
|
|
}
|
|
|
|
pub async fn delete_stale_pkgs(conn: &DbConn, max_id: i32) -> crate::Result<()> {
|
|
Ok(db::Package::delete_many()
|
|
.filter(db::package::Column::Id.lte(max_id))
|
|
.filter(db::package::Column::Id.in_subquery(stale_pkgs_query(false)))
|
|
.exec(conn)
|
|
.await
|
|
.map(|_| ())?)
|
|
}
|