Compare commits
No commits in common. "32e27978ec2aa51d0f4feadedd404b22d3d4dcda" and "513a760040513765225ff83f2b008555d61eafa6" have entirely different histories.
32e27978ec
...
513a760040
|
@ -1,4 +1,4 @@
|
||||||
use crate::repo::MetaRepoMgr;
|
use crate::repo::{MetaRepoMgr, RepoGroupManager};
|
||||||
use crate::{Config, Global};
|
use crate::{Config, Global};
|
||||||
|
|
||||||
use axum::extract::FromRef;
|
use axum::extract::FromRef;
|
||||||
|
@ -82,6 +82,7 @@ impl Cli {
|
||||||
|
|
||||||
let config = Config {
|
let config = Config {
|
||||||
data_dir: self.data_dir.clone(),
|
data_dir: self.data_dir.clone(),
|
||||||
|
api_key: self.api_key.clone(),
|
||||||
};
|
};
|
||||||
let repo_manager = MetaRepoMgr::new(&self.data_dir.join("repos"));
|
let repo_manager = MetaRepoMgr::new(&self.data_dir.join("repos"));
|
||||||
|
|
||||||
|
|
|
@ -9,10 +9,7 @@ pub struct Filter {
|
||||||
|
|
||||||
impl IntoCondition for Filter {
|
impl IntoCondition for Filter {
|
||||||
fn into_condition(self) -> Condition {
|
fn into_condition(self) -> Condition {
|
||||||
Condition::all().add_option(
|
Condition::all().add_option(self.name.map(|name| package::Column::Name.like(name)))
|
||||||
self.name
|
|
||||||
.map(|name| repo::Column::Name.like(format!("%{}%", name))),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ mod repo;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
pub use error::{Result, ServerError};
|
pub use error::{Result, ServerError};
|
||||||
use repo::MetaRepoMgr;
|
use repo::MetaRepoMgr;
|
||||||
|
use repo::RepoGroupManager;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
@ -14,6 +15,7 @@ use tokio::sync::RwLock;
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
data_dir: PathBuf,
|
data_dir: PathBuf,
|
||||||
|
api_key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
|
|
@ -1,10 +1,17 @@
|
||||||
use std::io;
|
use std::io::{self, Write};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
use tokio::sync::{mpsc, oneshot};
|
||||||
|
|
||||||
use libarchive::write::{Builder, FileWriter, WriteEntry};
|
use libarchive::write::{Builder, FileWriter, WriteEntry};
|
||||||
use libarchive::{Entry, WriteFilter, WriteFormat};
|
use libarchive::{Entry, WriteFilter, WriteFormat};
|
||||||
|
|
||||||
|
enum Message {
|
||||||
|
AppendFilesEntry(oneshot::Sender<io::Result<()>>, String),
|
||||||
|
AppendLine(oneshot::Sender<io::Result<()>>, String),
|
||||||
|
Close(oneshot::Sender<io::Result<()>>),
|
||||||
|
}
|
||||||
|
|
||||||
/// Struct to abstract away the intrinsics of writing entries to an archive file
|
/// Struct to abstract away the intrinsics of writing entries to an archive file
|
||||||
pub struct RepoArchiveWriter {
|
pub struct RepoArchiveWriter {
|
||||||
ar: Arc<Mutex<FileWriter>>,
|
ar: Arc<Mutex<FileWriter>>,
|
||||||
|
|
|
@ -133,51 +133,6 @@ impl MetaRepoMgr {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove all packages from the repository with the given arch.
|
|
||||||
pub async fn remove_repo_arch(&self, conn: &DbConn, repo: &str, arch: &str) -> Result<bool> {
|
|
||||||
let repo = db::query::repo::by_name(conn, repo).await?;
|
|
||||||
|
|
||||||
if let Some(repo) = repo {
|
|
||||||
let mut pkgs = repo
|
|
||||||
.find_related(db::Package)
|
|
||||||
.filter(db::package::Column::Arch.eq(arch))
|
|
||||||
.stream(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
while let Some(pkg) = pkgs.next().await.transpose()? {
|
|
||||||
let path = self
|
|
||||||
.repo_dir
|
|
||||||
.join(&repo.name)
|
|
||||||
.join(super::package::filename(&pkg));
|
|
||||||
tokio::fs::remove_file(path).await?;
|
|
||||||
|
|
||||||
pkg.delete(conn).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
tokio::fs::remove_file(
|
|
||||||
self.repo_dir
|
|
||||||
.join(&repo.name)
|
|
||||||
.join(format!("{}.db.tar.gz", arch)),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tokio::fs::remove_file(
|
|
||||||
self.repo_dir
|
|
||||||
.join(&repo.name)
|
|
||||||
.join(format!("{}.files.tar.gz", arch)),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// If we removed all "any" packages, we need to resync all databases
|
|
||||||
if arch == ANY_ARCH {
|
|
||||||
self.generate_archives_all(conn, &repo.name).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(true)
|
|
||||||
} else {
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn remove_pkg(
|
pub async fn remove_pkg(
|
||||||
&self,
|
&self,
|
||||||
conn: &DbConn,
|
conn: &DbConn,
|
||||||
|
|
|
@ -3,9 +3,13 @@ mod manager;
|
||||||
mod manager_new;
|
mod manager_new;
|
||||||
pub mod package;
|
pub mod package;
|
||||||
|
|
||||||
|
pub use manager::RepoGroupManager;
|
||||||
pub use manager_new::MetaRepoMgr;
|
pub use manager_new::MetaRepoMgr;
|
||||||
use tokio_util::io::StreamReader;
|
use tokio_util::io::StreamReader;
|
||||||
|
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use crate::db;
|
||||||
use axum::body::Body;
|
use axum::body::Body;
|
||||||
use axum::extract::{Path, State};
|
use axum::extract::{Path, State};
|
||||||
use axum::http::Request;
|
use axum::http::Request;
|
||||||
|
@ -14,9 +18,17 @@ use axum::response::IntoResponse;
|
||||||
use axum::routing::{delete, post};
|
use axum::routing::{delete, post};
|
||||||
use axum::Router;
|
use axum::Router;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
|
use futures::{Stream, StreamExt};
|
||||||
|
use regex::Regex;
|
||||||
|
use sea_orm::ModelTrait;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::{fs, io::AsyncWriteExt};
|
||||||
use tower::util::ServiceExt;
|
use tower::util::ServiceExt;
|
||||||
use tower_http::services::ServeFile;
|
use tower_http::services::{ServeDir, ServeFile};
|
||||||
use tower_http::validate_request::ValidateRequestHeaderLayer;
|
use tower_http::validate_request::ValidateRequestHeaderLayer;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
const DB_FILE_EXTS: [&str; 4] = [".db", ".files", ".db.tar.gz", ".files.tar.gz"];
|
||||||
|
|
||||||
pub fn router(api_key: &str) -> Router<crate::Global> {
|
pub fn router(api_key: &str) -> Router<crate::Global> {
|
||||||
Router::new()
|
Router::new()
|
||||||
|
@ -108,20 +120,31 @@ async fn delete_arch_repo(
|
||||||
State(global): State<crate::Global>,
|
State(global): State<crate::Global>,
|
||||||
Path((repo, arch)): Path<(String, String)>,
|
Path((repo, arch)): Path<(String, String)>,
|
||||||
) -> crate::Result<StatusCode> {
|
) -> crate::Result<StatusCode> {
|
||||||
let repo_removed = global
|
Ok(StatusCode::NOT_FOUND)
|
||||||
.repo_manager
|
//let clone = Arc::clone(&global.repo_manager);
|
||||||
.write()
|
//
|
||||||
.await
|
//let arch_clone = arch.clone();
|
||||||
.remove_repo_arch(&global.db, &repo, &arch)
|
//let repo_clone = repo.clone();
|
||||||
.await?;
|
//let repo_removed = tokio::task::spawn_blocking(move || {
|
||||||
|
// clone
|
||||||
if repo_removed {
|
// .write()
|
||||||
tracing::info!("Removed arch '{}' from repository '{}'", arch, repo);
|
// .unwrap()
|
||||||
|
// .remove_repo_arch(&repo_clone, &arch_clone)
|
||||||
Ok(StatusCode::OK)
|
//})
|
||||||
} else {
|
//.await??;
|
||||||
Ok(StatusCode::NOT_FOUND)
|
//
|
||||||
}
|
//if repo_removed {
|
||||||
|
// let res = db::query::repo::by_name(&global.db, &repo).await?;
|
||||||
|
//
|
||||||
|
// if let Some(repo_entry) = res {
|
||||||
|
// db::query::package::delete_with_arch(&global.db, repo_entry.id, &arch).await?;
|
||||||
|
// }
|
||||||
|
// tracing::info!("Removed architecture '{}' from repository '{}'", arch, repo);
|
||||||
|
//
|
||||||
|
// Ok(StatusCode::OK)
|
||||||
|
//} else {
|
||||||
|
// Ok(StatusCode::NOT_FOUND)
|
||||||
|
//}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_package(
|
async fn delete_package(
|
||||||
|
|
Loading…
Reference in New Issue