Compare commits
No commits in common. "27afb3496d6c1c1df4165426bfe2e8fe999b9a66" and "a408c14ab1effdd2647e4eca75632330a006a504" have entirely different histories.
27afb3496d
...
a408c14ab1
|
@ -44,7 +44,7 @@ pub struct Cli {
|
||||||
#[arg(
|
#[arg(
|
||||||
long,
|
long,
|
||||||
value_name = "LOG_LEVEL",
|
value_name = "LOG_LEVEL",
|
||||||
default_value = "tower_http=debug,rieterd=debug,sea_orm=debug",
|
default_value = "tower_http=debug,rieterd=debug",
|
||||||
env = "RIETER_LOG"
|
env = "RIETER_LOG"
|
||||||
)]
|
)]
|
||||||
pub log: String,
|
pub log: String,
|
||||||
|
|
|
@ -2,7 +2,7 @@ use crate::db::{self, *};
|
||||||
|
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use sea_orm::{sea_query::IntoCondition, *};
|
use sea_orm::{sea_query::IntoCondition, *};
|
||||||
use sea_query::{Alias, Asterisk, Expr, Query, SelectStatement};
|
use sea_query::{Alias, Expr, Query};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
|
@ -218,85 +218,11 @@ pub async fn full(conn: &DbConn, id: i32) -> Result<Option<FullPackage>> {
|
||||||
|
|
||||||
#[derive(FromQueryResult)]
|
#[derive(FromQueryResult)]
|
||||||
pub struct PkgToRemove {
|
pub struct PkgToRemove {
|
||||||
pub repo_id: i32,
|
repo_id: i32,
|
||||||
pub id: i32,
|
id: i32,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_pkg_ids_query() -> SelectStatement {
|
pub fn to_be_removed_query(conn: &DbConn) -> SelectorRaw<SelectModel<PkgToRemove>> {
|
||||||
Query::select()
|
|
||||||
.from(db::package::Entity)
|
|
||||||
.columns([
|
|
||||||
db::package::Column::RepoId,
|
|
||||||
db::package::Column::Arch,
|
|
||||||
db::package::Column::Name,
|
|
||||||
])
|
|
||||||
.expr_as(db::package::Column::Id.max(), Alias::new("max_id"))
|
|
||||||
.group_by_columns([
|
|
||||||
db::package::Column::RepoId,
|
|
||||||
db::package::Column::Arch,
|
|
||||||
db::package::Column::Name,
|
|
||||||
])
|
|
||||||
.cond_where(
|
|
||||||
Condition::all().add(db::package::Column::State.eq(db::PackageState::Committed)),
|
|
||||||
)
|
|
||||||
.to_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pkgs_to_sync(
|
|
||||||
conn: &DbConn,
|
|
||||||
repo: i32,
|
|
||||||
arch: &str,
|
|
||||||
) -> SelectorRaw<SelectModel<package::Model>> {
|
|
||||||
let max_id_query = Query::select()
|
|
||||||
.columns([
|
|
||||||
db::package::Column::RepoId,
|
|
||||||
db::package::Column::Arch,
|
|
||||||
db::package::Column::Name,
|
|
||||||
])
|
|
||||||
.expr_as(db::package::Column::Id.max(), Alias::new("max_id"))
|
|
||||||
.from(db::package::Entity)
|
|
||||||
.group_by_columns([
|
|
||||||
db::package::Column::RepoId,
|
|
||||||
db::package::Column::Arch,
|
|
||||||
db::package::Column::Name,
|
|
||||||
])
|
|
||||||
.to_owned();
|
|
||||||
|
|
||||||
let (p1, p2) = (Alias::new("p1"), Alias::new("p2"));
|
|
||||||
let query = Query::select()
|
|
||||||
.column((p1.clone(), Asterisk))
|
|
||||||
.from_as(db::package::Entity, p1.clone())
|
|
||||||
.join_subquery(
|
|
||||||
JoinType::InnerJoin,
|
|
||||||
max_id_query,
|
|
||||||
p2.clone(),
|
|
||||||
Expr::col((p1.clone(), db::package::Column::Id))
|
|
||||||
.eq(Expr::col((p2.clone(), Alias::new("max_id")))),
|
|
||||||
)
|
|
||||||
.cond_where(
|
|
||||||
Condition::all()
|
|
||||||
.add(Expr::col((p1.clone(), db::package::Column::RepoId)).eq(repo))
|
|
||||||
.add(
|
|
||||||
Expr::col((p1.clone(), db::package::Column::State))
|
|
||||||
.ne(db::PackageState::PendingDeletion),
|
|
||||||
)
|
|
||||||
.add(
|
|
||||||
Expr::col((p1.clone(), db::package::Column::Arch))
|
|
||||||
.is_in([arch, crate::ANY_ARCH]),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.to_owned();
|
|
||||||
let builder = conn.get_database_backend();
|
|
||||||
let sql = builder.build(&query);
|
|
||||||
|
|
||||||
db::Package::find().from_raw_sql(sql)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn stale_pkgs_query(include_repo: bool) -> SelectStatement {
|
|
||||||
// In each repository, only one version of a package can exist for any given arch. Because ids
|
|
||||||
// are monotonically increasing, we know that the row that represents the actual package
|
|
||||||
// currently in the repository is the row with the largest id whose state is "committed". This
|
|
||||||
// query finds this id for each (repo, arch, name) tuple.
|
|
||||||
let mut max_id_query = Query::select();
|
let mut max_id_query = Query::select();
|
||||||
max_id_query
|
max_id_query
|
||||||
.from(db::package::Entity)
|
.from(db::package::Entity)
|
||||||
|
@ -317,23 +243,12 @@ fn stale_pkgs_query(include_repo: bool) -> SelectStatement {
|
||||||
|
|
||||||
let (p1, p2) = (Alias::new("p1"), Alias::new("p2"));
|
let (p1, p2) = (Alias::new("p1"), Alias::new("p2"));
|
||||||
let mut query = Query::select();
|
let mut query = Query::select();
|
||||||
|
query
|
||||||
// We then perform an inner join between the max id query above and the package table, where we
|
.from_as(db::package::Entity, p1.clone())
|
||||||
// filter on rows whose id is less than their respective package's max id or whose state is set
|
.columns([
|
||||||
// to "pending deletion". This gives us all rows in the database that correspond to packages
|
|
||||||
// that are no longer needed, and can thus be removed.
|
|
||||||
query.from_as(db::package::Entity, p1.clone());
|
|
||||||
|
|
||||||
if include_repo {
|
|
||||||
query.columns([
|
|
||||||
(p1.clone(), db::package::Column::RepoId),
|
(p1.clone(), db::package::Column::RepoId),
|
||||||
(p1.clone(), db::package::Column::Id),
|
(p1.clone(), db::package::Column::Id),
|
||||||
]);
|
])
|
||||||
} else {
|
|
||||||
query.column((p1.clone(), db::package::Column::Id));
|
|
||||||
}
|
|
||||||
|
|
||||||
query
|
|
||||||
.join_subquery(
|
.join_subquery(
|
||||||
JoinType::InnerJoin,
|
JoinType::InnerJoin,
|
||||||
max_id_query,
|
max_id_query,
|
||||||
|
@ -362,23 +277,9 @@ fn stale_pkgs_query(include_repo: bool) -> SelectStatement {
|
||||||
Expr::col((p1.clone(), db::package::Column::Id))
|
Expr::col((p1.clone(), db::package::Column::Id))
|
||||||
.eq(db::PackageState::PendingDeletion),
|
.eq(db::PackageState::PendingDeletion),
|
||||||
),
|
),
|
||||||
)
|
);
|
||||||
.to_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn stale_pkgs(conn: &DbConn) -> SelectorRaw<SelectModel<PkgToRemove>> {
|
|
||||||
let query = stale_pkgs_query(true);
|
|
||||||
let builder = conn.get_database_backend();
|
let builder = conn.get_database_backend();
|
||||||
let sql = builder.build(&query);
|
let sql = builder.build(&query);
|
||||||
|
|
||||||
PkgToRemove::find_by_statement(sql)
|
PkgToRemove::find_by_statement(sql)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_stale_pkgs(conn: &DbConn, max_id: i32) -> crate::Result<()> {
|
|
||||||
Ok(db::Package::delete_many()
|
|
||||||
.filter(db::package::Column::Id.lte(max_id))
|
|
||||||
.filter(db::package::Column::Id.in_subquery(stale_pkgs_query(false)))
|
|
||||||
.exec(conn)
|
|
||||||
.await
|
|
||||||
.map(|_| ())?)
|
|
||||||
}
|
|
||||||
|
|
|
@ -12,8 +12,6 @@ use repo::DistroMgr;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use std::{path::PathBuf, sync::Arc};
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
pub const ANY_ARCH: &'static str = "any";
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
data_dir: PathBuf,
|
data_dir: PathBuf,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use super::{archive, package};
|
use super::{archive, package};
|
||||||
use crate::db::{self, query::package::delete_stale_pkgs};
|
use crate::db;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
|
@ -22,6 +22,8 @@ use tokio::sync::{
|
||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
pub const ANY_ARCH: &'static str = "any";
|
||||||
|
|
||||||
struct PkgQueueMsg {
|
struct PkgQueueMsg {
|
||||||
repo: i32,
|
repo: i32,
|
||||||
path: PathBuf,
|
path: PathBuf,
|
||||||
|
@ -101,37 +103,16 @@ impl RepoMgr {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Clean any remaining old package files from the database and file system
|
/// Clean any remaining old package files from the database and file system
|
||||||
pub async fn remove_stale_pkgs(&self) -> crate::Result<()> {
|
pub async fn clean(&self) -> crate::Result<()> {
|
||||||
let mut pkgs = db::query::package::stale_pkgs(&self.conn)
|
let mut pkgs = db::query::package::to_be_removed_query(&self.conn)
|
||||||
.stream(&self.conn)
|
.stream(&self.conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Ids are monotonically increasing, so the max id suffices to know which packages to
|
|
||||||
// remove later
|
|
||||||
let mut max_id = -1;
|
|
||||||
let mut removed_pkgs = 0;
|
|
||||||
|
|
||||||
while let Some(pkg) = pkgs.next().await.transpose()? {
|
while let Some(pkg) = pkgs.next().await.transpose()? {
|
||||||
// Failing to remove the package file isn't the biggest problem
|
// TODO remove package from file system and database
|
||||||
let _ = tokio::fs::remove_file(
|
|
||||||
self.repos_dir
|
|
||||||
.join(pkg.repo_id.to_string())
|
|
||||||
.join(pkg.id.to_string()),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if pkg.id > max_id {
|
|
||||||
max_id = pkg.id;
|
|
||||||
}
|
|
||||||
|
|
||||||
removed_pkgs += 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if removed_pkgs > 0 {
|
// TODO log indicating how many packages were cleaned
|
||||||
db::query::package::delete_stale_pkgs(&self.conn, max_id).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::info!("Removed {removed_pkgs} stale package(s)");
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -145,7 +126,18 @@ impl RepoMgr {
|
||||||
|
|
||||||
// Query all packages in the repo that have the given architecture or the "any"
|
// Query all packages in the repo that have the given architecture or the "any"
|
||||||
// architecture
|
// architecture
|
||||||
let mut pkgs = db::query::package::pkgs_to_sync(&self.conn, repo, arch)
|
let mut pkgs = db::Package::find()
|
||||||
|
.filter(db::package::Column::RepoId.eq(repo))
|
||||||
|
.filter(db::package::Column::Arch.is_in([arch, ANY_ARCH]))
|
||||||
|
.filter(
|
||||||
|
db::package::Column::Id.in_subquery(
|
||||||
|
Query::select()
|
||||||
|
.expr(db::package::Column::Id.max())
|
||||||
|
.from(db::package::Entity)
|
||||||
|
.group_by_columns([db::package::Column::Arch, db::package::Column::Name])
|
||||||
|
.to_owned(),
|
||||||
|
),
|
||||||
|
)
|
||||||
.stream(&self.conn)
|
.stream(&self.conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -241,7 +233,7 @@ impl RepoMgr {
|
||||||
|
|
||||||
// TODO move this so that we only clean if entire queue is empty, not just
|
// TODO move this so that we only clean if entire queue is empty, not just
|
||||||
// queue for specific repo
|
// queue for specific repo
|
||||||
let _ = self.remove_stale_pkgs().await;
|
let _ = self.clean().await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -254,19 +246,6 @@ impl RepoMgr {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_repo(&self, distro: &str, repo: &str) -> crate::Result<Option<i32>> {
|
|
||||||
Ok(db::Repo::find()
|
|
||||||
.find_also_related(db::Distro)
|
|
||||||
.filter(
|
|
||||||
Condition::all()
|
|
||||||
.add(db::repo::Column::Name.eq(repo))
|
|
||||||
.add(db::distro::Column::Name.eq(distro)),
|
|
||||||
)
|
|
||||||
.one(&self.conn)
|
|
||||||
.await
|
|
||||||
.map(|res| res.map(|(repo, _)| repo.id))?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_or_create_repo(&self, distro: &str, repo: &str) -> crate::Result<i32> {
|
pub async fn get_or_create_repo(&self, distro: &str, repo: &str) -> crate::Result<i32> {
|
||||||
let mut repos = self.repos.write().await;
|
let mut repos = self.repos.write().await;
|
||||||
|
|
||||||
|
@ -344,37 +323,6 @@ impl RepoMgr {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn remove_repo(&self, repo: i32) -> crate::Result<()> {
|
|
||||||
self.repos.write().await.remove(&repo);
|
|
||||||
db::Repo::delete_by_id(repo).exec(&self.conn).await?;
|
|
||||||
let _ = tokio::fs::remove_dir_all(self.repos_dir.join(repo.to_string())).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove all packages in the repository that have a given arch. This method marks all
|
|
||||||
/// packages with the given architecture as "pending deletion", before performing a manual sync
|
|
||||||
/// & removal of stale packages.
|
|
||||||
pub async fn remove_repo_arch(&self, repo: i32, arch: &str) -> crate::Result<()> {
|
|
||||||
db::Package::update_many()
|
|
||||||
.col_expr(
|
|
||||||
db::package::Column::State,
|
|
||||||
Expr::value(db::PackageState::PendingDeletion),
|
|
||||||
)
|
|
||||||
.filter(
|
|
||||||
Condition::all()
|
|
||||||
.add(db::package::Column::RepoId.eq(repo))
|
|
||||||
.add(db::package::Column::Arch.eq(arch)),
|
|
||||||
)
|
|
||||||
.exec(&self.conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
self.sync_repo(repo).await?;
|
|
||||||
self.remove_stale_pkgs().await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn random_file_paths<const C: usize>(&self) -> [PathBuf; C] {
|
pub fn random_file_paths<const C: usize>(&self) -> [PathBuf; C] {
|
||||||
std::array::from_fn(|_| {
|
std::array::from_fn(|_| {
|
||||||
let uuid: uuid::fmt::Simple = Uuid::new_v4().into();
|
let uuid: uuid::fmt::Simple = Uuid::new_v4().into();
|
||||||
|
|
|
@ -49,29 +49,25 @@ async fn get_file(
|
||||||
Path((distro, repo, arch, file_name)): Path<(String, String, String, String)>,
|
Path((distro, repo, arch, file_name)): Path<(String, String, String, String)>,
|
||||||
req: Request<Body>,
|
req: Request<Body>,
|
||||||
) -> crate::Result<impl IntoResponse> {
|
) -> crate::Result<impl IntoResponse> {
|
||||||
if let Some(repo_id) = global.mgr.get_repo(&distro, &repo).await? {
|
let repo_dir = global
|
||||||
let repo_dir = global
|
.config
|
||||||
.config
|
.data_dir
|
||||||
.data_dir
|
.join("distros")
|
||||||
.join("repos")
|
.join(&distro)
|
||||||
.join(repo_id.to_string());
|
.join(&repo);
|
||||||
|
|
||||||
let file_name =
|
let file_name =
|
||||||
if file_name == format!("{}.db", repo) || file_name == format!("{}.db.tar.gz", repo) {
|
if file_name == format!("{}.db", repo) || file_name == format!("{}.db.tar.gz", repo) {
|
||||||
format!("{}.db.tar.gz", arch)
|
format!("{}.db.tar.gz", arch)
|
||||||
} else if file_name == format!("{}.files", repo)
|
} else if file_name == format!("{}.files", repo)
|
||||||
|| file_name == format!("{}.files.tar.gz", repo)
|
|| file_name == format!("{}.files.tar.gz", repo)
|
||||||
{
|
{
|
||||||
format!("{}.files.tar.gz", arch)
|
format!("{}.files.tar.gz", arch)
|
||||||
} else {
|
} else {
|
||||||
file_name
|
file_name
|
||||||
};
|
};
|
||||||
|
|
||||||
let path = repo_dir.join(file_name);
|
Ok(ServeFile::new(repo_dir.join(file_name)).oneshot(req).await)
|
||||||
Ok(ServeFile::new(path).oneshot(req).await)
|
|
||||||
} else {
|
|
||||||
Err(StatusCode::NOT_FOUND.into())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn post_package_archive(
|
async fn post_package_archive(
|
||||||
|
@ -88,6 +84,18 @@ async fn post_package_archive(
|
||||||
|
|
||||||
global.mgr.queue_pkg(repo, tmp_path).await;
|
global.mgr.queue_pkg(repo, tmp_path).await;
|
||||||
|
|
||||||
|
//let (name, version, arch) = mgr.add_pkg_from_path(&mut body, &repo).await?;
|
||||||
|
//
|
||||||
|
//tracing::info!(
|
||||||
|
// "Added '{}-{}' to repository '{}' ({})",
|
||||||
|
// name,
|
||||||
|
// version,
|
||||||
|
// repo,
|
||||||
|
// arch
|
||||||
|
//);
|
||||||
|
|
||||||
|
//tokio::spawn(async move { mgr.sync_repo(&repo).await });
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,15 +103,20 @@ async fn delete_repo(
|
||||||
State(global): State<crate::Global>,
|
State(global): State<crate::Global>,
|
||||||
Path((distro, repo)): Path<(String, String)>,
|
Path((distro, repo)): Path<(String, String)>,
|
||||||
) -> crate::Result<StatusCode> {
|
) -> crate::Result<StatusCode> {
|
||||||
if let Some(repo) = global.mgr.get_repo(&distro, &repo).await? {
|
Ok(StatusCode::NOT_FOUND)
|
||||||
global.mgr.remove_repo(repo).await?;
|
//if let Some(mgr) = global.mgr.get_mgr(&distro).await {
|
||||||
|
// let repo_removed = mgr.remove_repo(&repo).await?;
|
||||||
tracing::info!("Removed repository {repo}");
|
//
|
||||||
|
// if repo_removed {
|
||||||
Ok(StatusCode::OK)
|
// tracing::info!("Removed repository '{}'", repo);
|
||||||
} else {
|
//
|
||||||
Ok(StatusCode::NOT_FOUND)
|
// Ok(StatusCode::OK)
|
||||||
}
|
// } else {
|
||||||
|
// Ok(StatusCode::NOT_FOUND)
|
||||||
|
// }
|
||||||
|
//} else {
|
||||||
|
// Ok(StatusCode::NOT_FOUND)
|
||||||
|
//}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_arch_repo(
|
async fn delete_arch_repo(
|
||||||
|
|
Loading…
Reference in New Issue