feat(server): working archive upload for non-any architecture

main
Jef Roosens 2023-07-13 20:24:45 +02:00
parent eb9f6a5cb1
commit c2f35aebac
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
6 changed files with 283 additions and 39 deletions

View File

@ -68,7 +68,7 @@ impl ReadFilter {
ReadFilter::Bzip2 => Some(".bz2"),
ReadFilter::Lzma => Some(".lzma"),
ReadFilter::Xz => Some(".xz"),
ReadFilter::Zstd => Some(".zstd"),
ReadFilter::Zstd => Some(".zst"),
_ => None,
}
}
@ -308,6 +308,12 @@ pub trait Entry {
ffi::archive_entry_set_pathname(self.entry_mut(), c_str.as_ptr());
}
}
fn set_mode(&mut self, mode: u32) {
unsafe {
ffi::archive_entry_set_mode(self.entry_mut(), mode);
}
}
}
#[derive(Debug, PartialEq, Eq)]

View File

@ -1,7 +1,7 @@
pub mod archive;
pub mod error;
pub mod read;
mod write;
pub mod write;
pub use archive::{
Entry, ExtractOption, ExtractOptions, Handle, ReadCompression, ReadFilter, ReadFormat,

View File

@ -1,8 +1,8 @@
use super::WriteEntry;
use crate::error::ArchiveError;
use crate::Entry;
use crate::Handle;
use libarchive3_sys::ffi;
use libarchive3_sys::ffi::c_void;
use std::fs;
use std::io;
use std::io::Read;
@ -37,7 +37,10 @@ impl FileWriter {
path: P,
) -> io::Result<()> {
unsafe {
ffi::archive_write_header(self.handle_mut(), entry.entry_mut());
match ffi::archive_write_header(self.handle_mut(), entry.entry_mut()) {
ffi::ARCHIVE_OK => (),
_ => return Err(ArchiveError::from(self as &dyn Handle).into()),
}
}
let mut f = fs::File::open(path)?;
@ -47,7 +50,15 @@ impl FileWriter {
match f.read(&mut buf) {
Ok(0) => return Ok(()),
Ok(written) => unsafe {
ffi::archive_write_data(self.handle_mut(), buf.as_ptr() as *const _, written);
match ffi::archive_write_data(
self.handle_mut(),
buf.as_ptr() as *const _,
written,
) as i32
{
ffi::ARCHIVE_OK => (),
_ => return Err(ArchiveError::from(self as &dyn Handle).into()),
};
},
Err(err) => match err.kind() {
io::ErrorKind::Interrupted => (),

View File

@ -1,3 +1,8 @@
use super::package::Package;
use libarchive::write::{Builder, WriteEntry};
use libarchive::{Entry, WriteFilter, WriteFormat};
use std::fs;
use std::io;
use std::path::{Path, PathBuf};
/// Overarching abstraction that orchestrates updating the repositories stored on the server
@ -13,4 +18,160 @@ impl RepoGroupManager {
pkg_dir: pkg_dir.as_ref().to_path_buf(),
}
}
pub fn sync(&mut self, repo: &str, arch: &str) -> io::Result<()> {
let subrepo_path = self.repo_dir.join(repo).join(arch);
let mut ar_db = Builder::new();
ar_db.add_filter(WriteFilter::Gzip)?;
ar_db.set_format(WriteFormat::PaxRestricted)?;
let mut ar_files = Builder::new();
ar_files.add_filter(WriteFilter::Gzip)?;
ar_files.set_format(WriteFormat::PaxRestricted)?;
let mut ar_db = ar_db.open_file(subrepo_path.join(format!("{}.tar.gz", repo)))?;
let mut ar_files =
ar_files.open_file(subrepo_path.join(format!("{}.files.tar.gz", repo)))?;
for entry in subrepo_path.read_dir()? {
let entry = entry?;
if entry.file_type()?.is_dir() {
// The desc file needs to be added to both archives
let path_in_tar = PathBuf::from(entry.file_name()).join("desc");
let src_path = entry.path().join("desc");
let mut ar_entry = WriteEntry::new();
ar_entry.set_pathname(&path_in_tar);
ar_entry.set_mode(0o100644);
ar_db.append_path(&mut ar_entry, &src_path)?;
ar_files.append_path(&mut ar_entry, src_path)?;
// The files file is only required in the files database
let path_in_tar = PathBuf::from(entry.file_name()).join("files");
let src_path = entry.path().join("files");
let mut ar_entry = WriteEntry::new();
ar_entry.set_pathname(&path_in_tar);
ar_entry.set_mode(0o100644);
ar_files.append_path(&mut ar_entry, src_path)?;
}
}
Ok(())
}
pub fn remove_pkg_from_arch_repo(
&mut self,
repo: &str,
arch: &str,
pkg_name: &str,
) -> io::Result<bool> {
let arch_repo_dir = self.repo_dir.join(repo).join(arch);
if !arch_repo_dir.exists() {
return Ok(false);
}
for entry in arch_repo_dir.read_dir()? {
let entry = entry?;
// Make sure we skip the archive files
if !entry.metadata()?.is_dir() {
continue;
}
let file_name = entry.file_name();
let file_name = file_name.to_string_lossy();
// The directory name should only contain the name of the package. The last two parts
// when splitting on a dash are the pkgver and pkgrel, so we trim those
let name_parts = file_name.split('-').collect::<Vec<_>>();
let name = name_parts[..name_parts.len() - 2].join("-");
if name == pkg_name {
fs::remove_dir_all(entry.path())?;
// Also remove the old package archive
let arch_repo_pkg_dir = self.pkg_dir.join(repo).join(arch);
for entry in arch_repo_pkg_dir.read_dir()? {
let entry = entry?;
let file_name = entry.file_name();
let file_name = file_name.to_string_lossy();
// Same trick, but for package files, we also need to trim the arch
let name_parts = file_name.split('-').collect::<Vec<_>>();
let name = name_parts[..name_parts.len() - 3].join("-");
if name == pkg_name {
fs::remove_file(entry.path())?;
}
}
return Ok(true);
}
}
Ok(false)
}
pub fn add_pkg_from_path<P: AsRef<Path>>(&mut self, repo: &str, path: P) -> io::Result<()> {
let mut pkg = Package::open(&path)?;
pkg.calculate_checksum()?;
let archs = self.add_pkg_in_repo(repo, &pkg)?;
// We add the package to each architecture it was added to by hard-linking the provided
// package file. This prevents storing a package of type "any" multiple times on disk.
for arch in archs {
let arch_repo_pkg_path = self.pkg_dir.join(repo).join(arch);
let dest_pkg_path = arch_repo_pkg_path.join(pkg.file_name());
fs::create_dir_all(&arch_repo_pkg_path)?;
fs::hard_link(&path, dest_pkg_path)?;
}
fs::remove_file(path)
}
/// Add a package to the given repo, returning to what architectures the package was added.
pub fn add_pkg_in_repo(&mut self, repo: &str, pkg: &Package) -> io::Result<Vec<String>> {
if pkg.info.arch != "any" {
self.add_pkg_in_arch_repo(repo, &pkg.info.arch, pkg)?;
return Ok(vec![String::from(&pkg.info.arch)]);
}
todo!()
}
pub fn add_pkg_in_arch_repo(
&mut self,
repo: &str,
arch: &str,
pkg: &Package,
) -> io::Result<()> {
let pkg_dir = self
.repo_dir
.join(repo)
.join(arch)
.join(format!("{}-{}", pkg.info.name, pkg.info.version));
// We first remove the previous version of the package, if present
self.remove_pkg_from_arch_repo(repo, arch, &pkg.info.name)?;
fs::create_dir_all(&pkg_dir)?;
let mut desc_file = fs::File::create(pkg_dir.join("desc"))?;
pkg.write_desc(&mut desc_file)?;
let mut files_file = fs::File::create(pkg_dir.join("files"))?;
pkg.write_files(&mut files_file)?;
self.sync(repo, arch)
}
}

View File

@ -12,6 +12,7 @@ use futures::StreamExt;
use futures::TryFutureExt;
use futures::TryStreamExt;
use std::io::Read;
use std::sync::Arc;
use tokio::{fs, io, io::AsyncWriteExt};
use tower_http::services::ServeDir;
use uuid::Uuid;
@ -53,11 +54,15 @@ async fn post_package_archive(
.await?;
}
let pkg = tokio::task::spawn_blocking(move || package::Package::open(&path))
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
.await?;
println!("{:?}", pkg);
Ok(())
let clone = Arc::clone(&global.repo_manager);
tokio::task::spawn_blocking(move || clone.write().unwrap().add_pkg_from_path(&repo, &path))
.map_err(|err| {
println!("{}", err);
StatusCode::INTERNAL_SERVER_ERROR
})
.await?
.map_err(|err| {
println!("{}", err);
StatusCode::INTERNAL_SERVER_ERROR
})
}

View File

@ -2,43 +2,43 @@ use libarchive::read::{Archive, Builder};
use libarchive::{Entry, ReadFilter};
use std::fmt;
use std::fs;
use std::io::{self, BufRead, BufReader, Read};
use std::io::{self, BufRead, BufReader, BufWriter, Read, Write};
use std::path::{Path, PathBuf};
const IGNORED_FILES: [&str; 5] = [".BUILDINFO", ".INSTALL", ".MTREE", ".PKGINFO", ".CHANGELOG"];
#[derive(Debug)]
pub struct Package {
path: PathBuf,
info: PkgInfo,
files: Vec<PathBuf>,
compression: ReadFilter,
pub path: PathBuf,
pub info: PkgInfo,
pub files: Vec<PathBuf>,
pub compression: ReadFilter,
}
#[derive(Debug, Default)]
pub struct PkgInfo {
name: String,
base: String,
version: String,
description: String,
size: u64,
csize: u64,
url: String,
arch: String,
build_date: i64,
packager: String,
pgpsig: String,
pgpsigsize: i64,
groups: Vec<String>,
licenses: Vec<String>,
replaces: Vec<String>,
depends: Vec<String>,
conflicts: Vec<String>,
provides: Vec<String>,
optdepends: Vec<String>,
makedepends: Vec<String>,
checkdepends: Vec<String>,
sha256sum: Option<String>,
pub name: String,
pub base: String,
pub version: String,
pub description: String,
pub size: u64,
pub csize: u64,
pub url: String,
pub arch: String,
pub build_date: i64,
pub packager: String,
pub pgpsig: String,
pub pgpsigsize: i64,
pub groups: Vec<String>,
pub licenses: Vec<String>,
pub replaces: Vec<String>,
pub depends: Vec<String>,
pub conflicts: Vec<String>,
pub provides: Vec<String>,
pub optdepends: Vec<String>,
pub makedepends: Vec<String>,
pub checkdepends: Vec<String>,
pub sha256sum: Option<String>,
}
#[derive(Debug, PartialEq, Eq)]
@ -193,4 +193,65 @@ impl Package {
self.compression.extension().unwrap()
)
}
/// Write the formatted desc file to the provided writer
pub fn write_desc<W: Write>(&self, w: &mut W) -> io::Result<()> {
// We write a lot of small strings to the writer, so wrapping it in a BufWriter is
// beneficial
let mut w = BufWriter::new(w);
let info = &self.info;
writeln!(w, "%FILENAME%\n{}", self.file_name())?;
let mut write = |key: &str, value: &str| {
if !value.is_empty() {
writeln!(w, "\n%{}%\n{}", key, value)
} else {
Ok(())
}
};
write("NAME", &info.name)?;
write("BASE", &info.base)?;
write("VERSION", &info.version)?;
write("DESC", &info.description)?;
write("GROUPS", &info.groups.join("\n"))?;
write("CSIZE", &info.csize.to_string())?;
write("ISIZE", &info.size.to_string())?;
if let Some(checksum) = &info.sha256sum {
write("SHA256SUM", checksum)?;
}
write("URL", &info.url)?;
write("LICENSE", &info.licenses.join("\n"))?;
write("ARCH", &info.arch)?;
write("BUILDDATE", &info.build_date.to_string())?;
write("PACKAGER", &info.packager)?;
write("REPLACES", &info.replaces.join("\n"))?;
write("CONFLICTS", &info.conflicts.join("\n"))?;
write("PROVIDES", &info.provides.join("\n"))?;
write("DEPENDS", &info.depends.join("\n"))?;
write("OPTDEPENDS", &info.optdepends.join("\n"))?;
write("MAKEDEPENDS", &info.makedepends.join("\n"))?;
write("CHECKDEPENDS", &info.checkdepends.join("\n"))?;
Ok(())
}
pub fn write_files<W: Write>(&self, w: &mut W) -> io::Result<()> {
// We write a lot of small strings to the writer, so wrapping it in a BufWriter is
// beneficial
let mut w = BufWriter::new(w);
writeln!(w, "%FILES%")?;
for file in &self.files {
writeln!(w, "{}", file.to_string_lossy())?;
}
Ok(())
}
}