Compare commits
3 Commits
fd1c2d3647
...
731ad37a2a
| Author | SHA1 | Date |
|---|---|---|
|
|
731ad37a2a | |
|
|
b85f57b112 | |
|
|
b8d53f43b6 |
|
|
@ -314,6 +314,12 @@ pub trait Entry {
|
|||
ffi::archive_entry_set_mode(self.entry_mut(), mode);
|
||||
}
|
||||
}
|
||||
|
||||
fn set_size(&mut self, size: i64) {
|
||||
unsafe {
|
||||
ffi::archive_entry_set_size(self.entry_mut(), size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
|
|
|
|||
|
|
@ -39,22 +39,32 @@ impl FileWriter {
|
|||
}
|
||||
}
|
||||
|
||||
let mut buf = [0; 8192];
|
||||
let mut buf = [0; 4096];
|
||||
|
||||
loop {
|
||||
match r.read(&mut buf) {
|
||||
Ok(0) => return Ok(()),
|
||||
Ok(written) => unsafe {
|
||||
match ffi::archive_write_data(
|
||||
self.handle_mut(),
|
||||
buf.as_ptr() as *const _,
|
||||
written,
|
||||
) as i32
|
||||
{
|
||||
ffi::ARCHIVE_OK => (),
|
||||
_ => return Err(ArchiveError::from(self as &dyn Handle).into()),
|
||||
};
|
||||
},
|
||||
// Write entire buffer
|
||||
Ok(buf_len) => {
|
||||
let mut written: usize = 0;
|
||||
|
||||
while written < buf_len {
|
||||
let res = unsafe {
|
||||
ffi::archive_write_data(
|
||||
self.handle_mut(),
|
||||
&buf[written] as *const u8 as *const _,
|
||||
buf_len - written,
|
||||
)
|
||||
} as isize;
|
||||
|
||||
// Negative values signal errors
|
||||
if res < 0 {
|
||||
return Err(ArchiveError::from(self as &dyn Handle).into());
|
||||
}
|
||||
|
||||
written += usize::try_from(res).unwrap();
|
||||
}
|
||||
}
|
||||
Err(err) => match err.kind() {
|
||||
io::ErrorKind::Interrupted => (),
|
||||
_ => return Err(err.into()),
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ impl Query {
|
|||
pub fn res<T: for<'de> Serialize>(self, items: Vec<T>) -> PaginatedResponse<T> {
|
||||
PaginatedResponse {
|
||||
page: self.page.unwrap_or(DEFAULT_PAGE),
|
||||
per_page: self.page.unwrap_or(DEFAULT_PER_PAGE),
|
||||
per_page: self.per_page.unwrap_or(DEFAULT_PER_PAGE),
|
||||
count: items.len(),
|
||||
items,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,9 +65,13 @@ impl RepoGroupManager {
|
|||
// The desc file needs to be added to both archives
|
||||
let path_in_tar = PathBuf::from(entry.file_name()).join("desc");
|
||||
let src_path = entry.path().join("desc");
|
||||
let metadata = src_path.metadata()?;
|
||||
|
||||
let mut ar_entry = WriteEntry::new();
|
||||
ar_entry.set_pathname(&path_in_tar);
|
||||
// These small text files will definitely fit inside an i64
|
||||
ar_entry.set_size(metadata.len().try_into().unwrap());
|
||||
ar_entry.set_filetype(libarchive::archive::FileType::RegularFile);
|
||||
ar_entry.set_mode(0o100644);
|
||||
|
||||
ar_db.append_path(&mut ar_entry, &src_path)?;
|
||||
|
|
@ -76,10 +80,14 @@ impl RepoGroupManager {
|
|||
// The files file is only required in the files database
|
||||
let path_in_tar = PathBuf::from(entry.file_name()).join("files");
|
||||
let src_path = entry.path().join("files");
|
||||
let metadata = src_path.metadata()?;
|
||||
|
||||
let mut ar_entry = WriteEntry::new();
|
||||
ar_entry.set_filetype(libarchive::archive::FileType::RegularFile);
|
||||
ar_entry.set_pathname(&path_in_tar);
|
||||
ar_entry.set_mode(0o100644);
|
||||
// These small text files will definitely fit inside an i64
|
||||
ar_entry.set_size(metadata.len().try_into().unwrap());
|
||||
|
||||
ar_files.append_path(&mut ar_entry, src_path)?;
|
||||
}
|
||||
|
|
@ -251,4 +259,49 @@ impl RepoGroupManager {
|
|||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// Wrapper around `remove_pkg` that accepts a path relative to the package directory to a
|
||||
/// package archive.
|
||||
pub fn remove_pkg_from_path<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
path: P,
|
||||
sync: bool,
|
||||
) -> io::Result<Option<(String, String, String, String)>> {
|
||||
let path = path.as_ref();
|
||||
let components: Vec<_> = path.iter().collect();
|
||||
|
||||
if let [repo, _arch, file_name] = components[..] {
|
||||
let full_path = self.pkg_dir.join(path);
|
||||
|
||||
if full_path.try_exists()? {
|
||||
let file_name = file_name.to_string_lossy();
|
||||
let (name, version, release, arch) = parse_pkg_filename(&file_name);
|
||||
|
||||
let metadata_dir_name = format!("{}-{}-{}", name, version, release);
|
||||
|
||||
// Remove package archive and entry in database
|
||||
fs::remove_file(full_path)?;
|
||||
fs::remove_dir_all(self.repo_dir.join(repo).join(arch).join(metadata_dir_name))?;
|
||||
|
||||
if sync {
|
||||
if arch == ANY_ARCH {
|
||||
self.sync_all(&repo.to_string_lossy())?;
|
||||
} else {
|
||||
self.sync(&repo.to_string_lossy(), arch)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some((
|
||||
name,
|
||||
version.to_string(),
|
||||
release.to_string(),
|
||||
arch.to_string(),
|
||||
)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ mod package;
|
|||
|
||||
pub use manager::RepoGroupManager;
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::db::entities::{package as db_package, repo as db_repo};
|
||||
use axum::body::Body;
|
||||
use axum::extract::{BodyStream, Path, State};
|
||||
|
|
@ -12,7 +14,7 @@ use axum::response::IntoResponse;
|
|||
use axum::routing::{delete, post};
|
||||
use axum::Router;
|
||||
use futures::StreamExt;
|
||||
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter};
|
||||
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, ModelTrait, QueryFilter};
|
||||
use std::sync::Arc;
|
||||
use tokio::{fs, io::AsyncWriteExt};
|
||||
use tower::util::ServiceExt;
|
||||
|
|
@ -20,6 +22,8 @@ use tower_http::services::{ServeDir, ServeFile};
|
|||
use tower_http::validate_request::ValidateRequestHeaderLayer;
|
||||
use uuid::Uuid;
|
||||
|
||||
const DB_FILE_EXTS: [&str; 4] = [".db", ".files", ".db.tar.gz", ".files.tar.gz"];
|
||||
|
||||
pub fn router(api_key: &str) -> Router<crate::Global> {
|
||||
Router::new()
|
||||
.route(
|
||||
|
|
@ -42,73 +46,6 @@ pub fn router(api_key: &str) -> Router<crate::Global> {
|
|||
)
|
||||
}
|
||||
|
||||
async fn post_package_archive(
|
||||
State(global): State<crate::Global>,
|
||||
Path(repo): Path<String>,
|
||||
mut body: BodyStream,
|
||||
) -> crate::Result<()> {
|
||||
// We first stream the uploaded file to disk
|
||||
let uuid: uuid::fmt::Simple = Uuid::new_v4().into();
|
||||
let path = global.config.pkg_dir.join(uuid.to_string());
|
||||
let mut f = fs::File::create(&path).await?;
|
||||
|
||||
while let Some(chunk) = body.next().await {
|
||||
f.write_all(&chunk?).await?;
|
||||
}
|
||||
|
||||
let clone = Arc::clone(&global.repo_manager);
|
||||
let path_clone = path.clone();
|
||||
let repo_clone = repo.clone();
|
||||
let res = tokio::task::spawn_blocking(move || {
|
||||
clone
|
||||
.write()
|
||||
.unwrap()
|
||||
.add_pkg_from_path(&repo_clone, &path_clone)
|
||||
})
|
||||
.await?;
|
||||
|
||||
match res {
|
||||
// Insert the newly added package into the database
|
||||
Ok(pkg) => {
|
||||
tracing::info!("Added '{}' to repository '{}'", pkg.file_name(), repo);
|
||||
|
||||
// Query the repo for its ID, or create it if it does not already exist
|
||||
let repo_entity = db_repo::Entity::find()
|
||||
.filter(db_repo::Column::Name.eq(&repo))
|
||||
.one(&global.db)
|
||||
.await?;
|
||||
|
||||
let repo_id = if let Some(repo_entity) = repo_entity {
|
||||
repo_entity.id
|
||||
} else {
|
||||
let model = db_repo::ActiveModel {
|
||||
name: sea_orm::Set(repo.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
db_repo::Entity::insert(model)
|
||||
.exec(&global.db)
|
||||
.await?
|
||||
.last_insert_id
|
||||
};
|
||||
|
||||
// Insert the package's data into the database
|
||||
let mut model: db_package::ActiveModel = pkg.into();
|
||||
model.repo_id = sea_orm::Set(repo_id);
|
||||
|
||||
model.insert(&global.db).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Remove the uploaded file and return the error
|
||||
Err(err) => {
|
||||
tokio::fs::remove_file(path).await?;
|
||||
|
||||
Err(err.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Serve the package archive files and database archives. If files are requested for an
|
||||
/// architecture that does not have any explicit packages, a repository containing only "any" files
|
||||
/// is returned.
|
||||
|
|
@ -120,9 +57,9 @@ async fn get_file(
|
|||
let repo_dir = global.config.repo_dir.join(&repo).join(&arch);
|
||||
let repo_exists = tokio::fs::try_exists(&repo_dir).await?;
|
||||
|
||||
let res = if file_name.ends_with(".db") || file_name.ends_with(".db.tar.gz") {
|
||||
let res = if DB_FILE_EXTS.iter().any(|ext| file_name.ends_with(ext)) {
|
||||
// Append tar extension to ensure we find the file
|
||||
if file_name.ends_with(".db") {
|
||||
if !file_name.ends_with(".tar.gz") {
|
||||
file_name.push_str(".tar.gz");
|
||||
};
|
||||
|
||||
|
|
@ -159,6 +96,85 @@ async fn get_file(
|
|||
Ok(res)
|
||||
}
|
||||
|
||||
async fn post_package_archive(
|
||||
State(global): State<crate::Global>,
|
||||
Path(repo): Path<String>,
|
||||
mut body: BodyStream,
|
||||
) -> crate::Result<()> {
|
||||
// We first stream the uploaded file to disk
|
||||
let uuid: uuid::fmt::Simple = Uuid::new_v4().into();
|
||||
let path = global.config.pkg_dir.join(uuid.to_string());
|
||||
let mut f = fs::File::create(&path).await?;
|
||||
|
||||
while let Some(chunk) = body.next().await {
|
||||
f.write_all(&chunk?).await?;
|
||||
}
|
||||
|
||||
let clone = Arc::clone(&global.repo_manager);
|
||||
let path_clone = path.clone();
|
||||
let repo_clone = repo.clone();
|
||||
let res = tokio::task::spawn_blocking(move || {
|
||||
clone
|
||||
.write()
|
||||
.unwrap()
|
||||
.add_pkg_from_path(&repo_clone, &path_clone)
|
||||
})
|
||||
.await?;
|
||||
|
||||
match res {
|
||||
// Insert the newly added package into the database
|
||||
Ok(pkg) => {
|
||||
tracing::info!("Added '{}' to repository '{}'", pkg.file_name(), repo);
|
||||
|
||||
// Query the repo for its ID, or create it if it does not already exist
|
||||
let res = db_repo::Entity::find()
|
||||
.filter(db_repo::Column::Name.eq(&repo))
|
||||
.one(&global.db)
|
||||
.await?;
|
||||
|
||||
let repo_id = if let Some(repo_entity) = res {
|
||||
repo_entity.id
|
||||
} else {
|
||||
let model = db_repo::ActiveModel {
|
||||
name: sea_orm::Set(repo.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
db_repo::Entity::insert(model)
|
||||
.exec(&global.db)
|
||||
.await?
|
||||
.last_insert_id
|
||||
};
|
||||
|
||||
// If the package already exists in the database, we remove it first
|
||||
let res = db_package::Entity::find()
|
||||
.filter(db_package::Column::RepoId.eq(repo_id))
|
||||
.filter(db_package::Column::Name.eq(&pkg.info.name))
|
||||
.filter(db_package::Column::Arch.eq(&pkg.info.arch))
|
||||
.one(&global.db)
|
||||
.await?;
|
||||
|
||||
if let Some(entry) = res {
|
||||
entry.delete(&global.db).await?;
|
||||
}
|
||||
|
||||
// Insert the package's data into the database
|
||||
let mut model: db_package::ActiveModel = pkg.into();
|
||||
model.repo_id = sea_orm::Set(repo_id);
|
||||
|
||||
model.insert(&global.db).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Remove the uploaded file and return the error
|
||||
Err(err) => {
|
||||
tokio::fs::remove_file(path).await?;
|
||||
|
||||
Err(err.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_repo(
|
||||
State(global): State<crate::Global>,
|
||||
Path(repo): Path<String>,
|
||||
|
|
@ -171,6 +187,15 @@ async fn delete_repo(
|
|||
.await??;
|
||||
|
||||
if repo_removed {
|
||||
let res = db_repo::Entity::find()
|
||||
.filter(db_repo::Column::Name.eq(&repo))
|
||||
.one(&global.db)
|
||||
.await?;
|
||||
|
||||
if let Some(repo_entry) = res {
|
||||
repo_entry.delete(&global.db).await?;
|
||||
}
|
||||
|
||||
tracing::info!("Removed repository '{}'", repo);
|
||||
|
||||
Ok(StatusCode::OK)
|
||||
|
|
@ -185,13 +210,31 @@ async fn delete_arch_repo(
|
|||
) -> crate::Result<StatusCode> {
|
||||
let clone = Arc::clone(&global.repo_manager);
|
||||
|
||||
let log = format!("Removed architecture '{}' from repository '{}'", arch, repo);
|
||||
let repo_removed =
|
||||
tokio::task::spawn_blocking(move || clone.write().unwrap().remove_repo_arch(&repo, &arch))
|
||||
.await??;
|
||||
let arch_clone = arch.clone();
|
||||
let repo_clone = repo.clone();
|
||||
let repo_removed = tokio::task::spawn_blocking(move || {
|
||||
clone
|
||||
.write()
|
||||
.unwrap()
|
||||
.remove_repo_arch(&repo_clone, &arch_clone)
|
||||
})
|
||||
.await??;
|
||||
|
||||
if repo_removed {
|
||||
tracing::info!(log);
|
||||
let res = db_repo::Entity::find()
|
||||
.filter(db_repo::Column::Name.eq(&repo))
|
||||
.one(&global.db)
|
||||
.await?;
|
||||
|
||||
if let Some(repo_entry) = res {
|
||||
// Also remove all packages for that architecture from database
|
||||
db_package::Entity::delete_many()
|
||||
.filter(db_package::Column::RepoId.eq(repo_entry.id))
|
||||
.filter(db_package::Column::Arch.eq(&arch))
|
||||
.exec(&global.db)
|
||||
.await?;
|
||||
}
|
||||
tracing::info!("Removed architecture '{}' from repository '{}'", arch, repo);
|
||||
|
||||
Ok(StatusCode::OK)
|
||||
} else {
|
||||
|
|
@ -203,26 +246,36 @@ async fn delete_package(
|
|||
State(global): State<crate::Global>,
|
||||
Path((repo, arch, file_name)): Path<(String, String, String)>,
|
||||
) -> crate::Result<StatusCode> {
|
||||
let name_parts = file_name.split('-').collect::<Vec<_>>();
|
||||
|
||||
// Package archive files use the naming scheme pkgname-pkgver-pkgrel-arch, so a valid
|
||||
// name contains at least 4 dash-separated sections
|
||||
if name_parts.len() < 4 {
|
||||
return Ok(StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
let name = name_parts[..name_parts.len() - 3].join("-");
|
||||
let log = format!("Removed '{}' from repository '{}'", file_name, repo);
|
||||
|
||||
let clone = Arc::clone(&global.repo_manager);
|
||||
let path = PathBuf::from(&repo).join(arch).join(&file_name);
|
||||
|
||||
let pkg_removed = tokio::task::spawn_blocking(move || {
|
||||
clone.write().unwrap().remove_pkg(&repo, &arch, &name, true)
|
||||
let res = tokio::task::spawn_blocking(move || {
|
||||
clone.write().unwrap().remove_pkg_from_path(path, true)
|
||||
})
|
||||
.await??;
|
||||
|
||||
if pkg_removed {
|
||||
tracing::info!(log);
|
||||
if let Some((name, version, release, arch)) = res {
|
||||
let res = db_repo::Entity::find()
|
||||
.filter(db_repo::Column::Name.eq(&repo))
|
||||
.one(&global.db)
|
||||
.await?;
|
||||
|
||||
if let Some(repo_entry) = res {
|
||||
// Also remove entry from database
|
||||
let res = db_package::Entity::find()
|
||||
.filter(db_package::Column::RepoId.eq(repo_entry.id))
|
||||
.filter(db_package::Column::Name.eq(name))
|
||||
.filter(db_package::Column::Version.eq(format!("{}-{}", version, release)))
|
||||
.filter(db_package::Column::Arch.eq(arch))
|
||||
.one(&global.db)
|
||||
.await?;
|
||||
|
||||
if let Some(entry) = res {
|
||||
entry.delete(&global.db).await?;
|
||||
}
|
||||
}
|
||||
|
||||
tracing::info!("Removed '{}' from repository '{}'", file_name, repo);
|
||||
|
||||
Ok(StatusCode::OK)
|
||||
} else {
|
||||
|
|
|
|||
Loading…
Reference in New Issue