Compare commits

...

3 Commits

Author SHA1 Message Date
Jef Roosens af27b06df1
feat(server): correctly serve repo files
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-08-02 22:00:31 +02:00
Jef Roosens 2e5c84a48d
refactor(server): rewrite part of repo logic; remove need for default
arch
2023-08-02 18:48:17 +02:00
Jef Roosens efc8114704
feat: start of server Dockerfile 2023-07-31 22:19:56 +02:00
7 changed files with 251 additions and 106 deletions

4
.dockerignore 100644
View File

@ -0,0 +1,4 @@
target/
.git/
server/data/
server/test.db/

View File

@ -14,3 +14,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Serve packages from any number of repositories & architectures
* Publish packages to and delete packages from repositories using HTTP
requests
* Packages of architecture "any" are part of every architecture's
database

64
Dockerfile 100644
View File

@ -0,0 +1,64 @@
FROM rust:1.70-alpine3.18 AS builder
ARG DI_VER=1.2.5
WORKDIR /app
# RUN apk add --no-cache \
# build-base \
# curl \
# make \
# unzip \
# pkgconf \
# openssl openssl-libs-static openssl-dev \
# libarchive-static libarchive-dev \
# zlib-static zlib-dev \
# bzip2-static bzip2-dev \
# xz-static xz-dev \
# expat-static expat-dev \
# zstd-static zstd-dev \
# lz4-static lz4-dev \
# acl-static acl-dev
RUN apk add --no-cache \
build-base \
curl \
make \
unzip \
pkgconf \
libarchive libarchive-dev
# Build dumb-init
RUN curl -Lo - "https://github.com/Yelp/dumb-init/archive/refs/tags/v${DI_VER}.tar.gz" | tar -xzf - && \
cd "dumb-init-${DI_VER}" && \
make SHELL=/bin/sh && \
mv dumb-init .. && \
cd ..
COPY . .
# ENV LIBARCHIVE_STATIC=1 \
# LIBARCHIVE_LIB_DIR=/usr/lib \
# LIBARCHIVE_INCLUDE_DIR=/usr/include \
# LIBARCHIVE_LDFLAGS='-lssl -lcrypto -L/lib -lz -lbz2 -llzma -lexpat -lzstd -llz4'
# LIBARCHIVE_LDFLAGS='-L/usr/lib -lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3'
RUN cargo build --release && \
du -h target/release/rieterd && \
readelf -d target/release/rieterd && \
chmod +x target/release/rieterd
# [ "$(readelf -d target/debug/rieterd | grep NEEDED | wc -l)" = 0 ] && \
# chmod +x target/debug/rieterd
FROM alpine:3.18
WORKDIR /app
RUN apk add --no-cache \
libarchive
COPY --from=builder /app/dumb-init /bin/dumb-init
COPY --from=builder /app/target/debug/rieterd /bin/rieterd
ENTRYPOINT ["/bin/dumb-init", "--"]
CMD ["/bin/rieterd"]

View File

@ -26,8 +26,6 @@ pub struct Cli {
pub pkg_dir: PathBuf,
/// Directory where repository metadata is stored
pub repo_dir: PathBuf,
/// Default architecture to add packages with arch "any" to
pub default_arch: String,
}
impl FromRef<Global> for Arc<RwLock<RepoGroupManager>> {
@ -51,7 +49,7 @@ impl Cli {
repo_dir: self.repo_dir.clone(),
pkg_dir: self.pkg_dir.clone(),
};
let repo_manager = RepoGroupManager::new(&self.repo_dir, &self.pkg_dir, &self.default_arch);
let repo_manager = RepoGroupManager::new(&self.repo_dir, &self.pkg_dir);
let global = Global {
config,
@ -60,7 +58,7 @@ impl Cli {
// build our application with a single route
let app = Router::new()
.merge(crate::repo::router(&global))
.merge(crate::repo::router())
.with_state(global)
.layer(TraceLayer::new_for_http());

View File

@ -10,6 +10,7 @@ pub type Result<T> = std::result::Result<T, ServerError>;
pub enum ServerError {
IO(io::Error),
Axum(axum::Error),
Status(StatusCode),
}
impl fmt::Display for ServerError {
@ -17,6 +18,7 @@ impl fmt::Display for ServerError {
match self {
ServerError::IO(err) => write!(fmt, "{}", err),
ServerError::Axum(err) => write!(fmt, "{}", err),
ServerError::Status(status) => write!(fmt, "{}", status),
}
}
}
@ -25,9 +27,12 @@ impl Error for ServerError {}
impl IntoResponse for ServerError {
fn into_response(self) -> Response {
tracing::error!("{:?}", self);
match self {
ServerError::IO(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
ServerError::Axum(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
ServerError::Status(status) => status.into_response(),
}
}
}
@ -49,3 +54,9 @@ impl From<tokio::task::JoinError> for ServerError {
ServerError::IO(err.into())
}
}
impl From<StatusCode> for ServerError {
fn from(status: StatusCode) -> Self {
Self::Status(status)
}
}

View File

@ -1,28 +1,33 @@
use super::package::Package;
use libarchive::write::{Builder, WriteEntry};
use libarchive::{Entry, WriteFilter, WriteFormat};
use std::collections::HashSet;
use std::fs;
use std::io;
use std::path::{Path, PathBuf};
pub const ANY_ARCH: &str = "any";
/// Overarching abstraction that orchestrates updating the repositories stored on the server
pub struct RepoGroupManager {
repo_dir: PathBuf,
pkg_dir: PathBuf,
default_arch: String,
}
fn parse_pkg_filename(file_name: &str) -> (String, &str, &str, &str) {
let name_parts = file_name.split('-').collect::<Vec<_>>();
let name = name_parts[..name_parts.len() - 3].join("-");
let version = name_parts[name_parts.len() - 3];
let release = name_parts[name_parts.len() - 2];
let (arch, _) = name_parts[name_parts.len() - 1].split_once('.').unwrap();
(name, version, release, arch)
}
impl RepoGroupManager {
pub fn new<P1: AsRef<Path>, P2: AsRef<Path>>(
repo_dir: P1,
pkg_dir: P2,
default_arch: &str,
) -> Self {
pub fn new<P1: AsRef<Path>, P2: AsRef<Path>>(repo_dir: P1, pkg_dir: P2) -> Self {
RepoGroupManager {
repo_dir: repo_dir.as_ref().to_path_buf(),
pkg_dir: pkg_dir.as_ref().to_path_buf(),
default_arch: String::from(default_arch),
}
}
@ -37,11 +42,23 @@ impl RepoGroupManager {
ar_files.add_filter(WriteFilter::Gzip)?;
ar_files.set_format(WriteFormat::PaxRestricted)?;
let mut ar_db = ar_db.open_file(subrepo_path.join(format!("{}.tar.gz", repo)))?;
let mut ar_db = ar_db.open_file(subrepo_path.join(format!("{}.db.tar.gz", repo)))?;
let mut ar_files =
ar_files.open_file(subrepo_path.join(format!("{}.files.tar.gz", repo)))?;
for entry in subrepo_path.read_dir()? {
// All architectures should also include the "any" architecture, except for the "any"
// architecture itself.
let repo_any_dir = self.repo_dir.join(repo).join(ANY_ARCH);
let any_entries_iter = if arch != ANY_ARCH && repo_any_dir.try_exists()? {
Some(repo_any_dir.read_dir()?)
} else {
None
}
.into_iter()
.flatten();
for entry in subrepo_path.read_dir()?.chain(any_entries_iter) {
let entry = entry?;
if entry.file_type()?.is_dir() {
@ -68,123 +85,118 @@ impl RepoGroupManager {
}
}
ar_db.close().and(ar_files.close()).map_err(Into::into)
ar_db.close()?;
ar_files.close()?;
Ok(())
}
/// Synchronize all present architectures' db archives in the given repository.
pub fn sync_all(&mut self, repo: &str) -> io::Result<()> {
for entry in self.repo_dir.join(repo).read_dir()? {
let entry = entry?;
if entry.file_type()?.is_dir() {
self.sync(repo, &entry.file_name().to_string_lossy())?;
}
}
Ok(())
}
pub fn add_pkg_from_path<P: AsRef<Path>>(&mut self, repo: &str, path: P) -> io::Result<()> {
let mut pkg = Package::open(&path)?;
pkg.calculate_checksum()?;
let archs = self.add_pkg_in_repo(repo, &pkg)?;
self.add_pkg(repo, &pkg)?;
// We add the package to each architecture it was added to by hard-linking the provided
// package file. This prevents storing a package of type "any" multiple times on disk.
for arch in archs {
let arch_repo_pkg_path = self.pkg_dir.join(repo).join(arch);
let dest_pkg_path = arch_repo_pkg_path.join(pkg.file_name());
// After successfully adding the package, we move it to the packages directory
let dest_pkg_path = self
.pkg_dir
.join(repo)
.join(&pkg.info.arch)
.join(pkg.file_name());
fs::create_dir_all(&arch_repo_pkg_path)?;
fs::hard_link(&path, dest_pkg_path)?;
}
fs::remove_file(path)
fs::create_dir_all(dest_pkg_path.parent().unwrap())?;
fs::rename(&path, dest_pkg_path)
}
/// Add a package to the given repo, returning to what architectures the package was added.
pub fn add_pkg_in_repo(&mut self, repo: &str, pkg: &Package) -> io::Result<HashSet<String>> {
let mut arch_repos: HashSet<String> = HashSet::new();
pub fn add_pkg(&mut self, repo: &str, pkg: &Package) -> io::Result<()> {
// We first remove any existing version of the package
self.remove_pkg(repo, &pkg.info.arch, &pkg.info.name, false)?;
if pkg.info.arch != "any" {
self.add_pkg_in_arch_repo(repo, &pkg.info.arch, pkg)?;
arch_repos.insert(pkg.info.arch.clone());
}
// Packages of arch "any" are added to every existing arch
else {
arch_repos.insert(self.default_arch.clone());
let repo_dir = self.repo_dir.join(repo);
if repo_dir.exists() {
for entry in repo_dir.read_dir()? {
arch_repos.insert(entry?.file_name().to_string_lossy().to_string());
}
}
for arch in arch_repos.iter() {
self.add_pkg_in_arch_repo(repo, arch, pkg)?;
}
}
Ok(arch_repos)
}
pub fn add_pkg_in_arch_repo(
&mut self,
repo: &str,
arch: &str,
pkg: &Package,
) -> io::Result<()> {
let pkg_dir = self
// Write the `desc` and `files` metadata files to disk
let metadata_dir = self
.repo_dir
.join(repo)
.join(arch)
.join(&pkg.info.arch)
.join(format!("{}-{}", pkg.info.name, pkg.info.version));
// We first remove the previous version of the package, if present
self.remove_pkg_from_arch_repo(repo, arch, &pkg.info.name, false)?;
fs::create_dir_all(&metadata_dir)?;
fs::create_dir_all(&pkg_dir)?;
let mut desc_file = fs::File::create(pkg_dir.join("desc"))?;
let mut desc_file = fs::File::create(metadata_dir.join("desc"))?;
pkg.write_desc(&mut desc_file)?;
let mut files_file = fs::File::create(pkg_dir.join("files"))?;
let mut files_file = fs::File::create(metadata_dir.join("files"))?;
pkg.write_files(&mut files_file)?;
self.sync(repo, arch)
// If a package of type "any" is added, we need to update every existing database
if pkg.info.arch == ANY_ARCH {
self.sync_all(repo)?;
} else {
self.sync(repo, &pkg.info.arch)?;
}
Ok(())
}
pub fn remove_repo(&mut self, repo: &str) -> io::Result<bool> {
let repo_dir = self.repo_dir.join(&repo);
let repo_dir = self.repo_dir.join(repo);
if !repo_dir.exists() {
Ok(false)
} else {
fs::remove_dir_all(&repo_dir)
.and_then(|_| fs::remove_dir_all(self.pkg_dir.join(repo)))?;
fs::remove_dir_all(&repo_dir)?;
fs::remove_dir_all(self.pkg_dir.join(repo))?;
Ok(true)
}
}
pub fn remove_arch_repo(&mut self, repo: &str, arch: &str) -> io::Result<bool> {
pub fn remove_repo_arch(&mut self, repo: &str, arch: &str) -> io::Result<bool> {
let sub_path = PathBuf::from(repo).join(arch);
let repo_dir = self.repo_dir.join(&sub_path);
if !repo_dir.exists() {
Ok(false)
} else {
fs::remove_dir_all(&repo_dir)
.and_then(|_| fs::remove_dir_all(self.pkg_dir.join(sub_path)))?;
Ok(true)
return Ok(false);
}
fs::remove_dir_all(&repo_dir)?;
fs::remove_dir_all(self.pkg_dir.join(sub_path))?;
// Removing the "any" architecture updates all other repositories
if arch == ANY_ARCH {
self.sync_all(repo)?;
}
Ok(true)
}
pub fn remove_pkg_from_arch_repo(
pub fn remove_pkg(
&mut self,
repo: &str,
arch: &str,
pkg_name: &str,
sync: bool,
) -> io::Result<bool> {
let arch_repo_dir = self.repo_dir.join(repo).join(arch);
let repo_arch_dir = self.repo_dir.join(repo).join(arch);
if !arch_repo_dir.exists() {
if !repo_arch_dir.exists() {
return Ok(false);
}
for entry in arch_repo_dir.read_dir()? {
for entry in repo_arch_dir.read_dir()? {
let entry = entry?;
// Make sure we skip the archive files
@ -204,16 +216,13 @@ impl RepoGroupManager {
fs::remove_dir_all(entry.path())?;
// Also remove the old package archive
let arch_repo_pkg_dir = self.pkg_dir.join(repo).join(arch);
let repo_arch_pkg_dir = self.pkg_dir.join(repo).join(arch);
arch_repo_pkg_dir.read_dir()?.try_for_each(|res| {
repo_arch_pkg_dir.read_dir()?.try_for_each(|res| {
res.and_then(|entry: fs::DirEntry| {
let file_name = entry.file_name();
let file_name = file_name.to_string_lossy();
// Same trick, but for package files, we also need to trim the arch
let name_parts = file_name.split('-').collect::<Vec<_>>();
let name = name_parts[..name_parts.len() - 3].join("-");
let (name, _, _, _) = parse_pkg_filename(&file_name);
if name == pkg_name {
fs::remove_file(entry.path())
@ -224,7 +233,11 @@ impl RepoGroupManager {
})?;
if sync {
self.sync(repo, arch)?;
if arch == ANY_ARCH {
self.sync_all(repo)?;
} else {
self.sync(repo, arch)?;
}
}
return Ok(true);

View File

@ -3,30 +3,28 @@ mod package;
pub use manager::RepoGroupManager;
use axum::body::Body;
use axum::extract::{BodyStream, Path, State};
use axum::http::Request;
use axum::http::StatusCode;
use axum::routing::{delete, get_service, post};
use axum::response::IntoResponse;
use axum::routing::{delete, post};
use axum::Router;
use futures::StreamExt;
use std::sync::Arc;
use tokio::{fs, io::AsyncWriteExt};
use tower_http::services::ServeDir;
use tower::util::ServiceExt;
use tower_http::services::{ServeDir, ServeFile};
use uuid::Uuid;
pub fn router(global: &crate::Global) -> Router<crate::Global> {
// Try to serve packages by default, and try the database files instead if not found
let serve_repos = get_service(
ServeDir::new(&global.config.pkg_dir).fallback(ServeDir::new(&global.config.repo_dir)),
);
pub fn router() -> Router<crate::Global> {
Router::new()
.route("/:repo", post(post_package_archive).delete(delete_repo))
.route("/:repo/:arch", delete(delete_arch_repo))
.route(
"/:repo/:arch/:filename",
delete(delete_package).get(serve_repos.clone()),
delete(delete_package).get(get_file),
)
.fallback(serve_repos)
.with_state(global.clone())
}
async fn post_package_archive(
@ -44,10 +42,68 @@ async fn post_package_archive(
}
let clone = Arc::clone(&global.repo_manager);
tokio::task::spawn_blocking(move || clone.write().unwrap().add_pkg_from_path(&repo, &path))
.await??;
let path_clone = path.clone();
let res = tokio::task::spawn_blocking(move || {
clone.write().unwrap().add_pkg_from_path(&repo, &path_clone)
})
.await?;
Ok(())
// Remove the downloaded file if the adding failed
if res.is_err() {
let _ = tokio::fs::remove_file(path).await;
}
Ok(res?)
}
/// Serve the package archive files and database archives. If files are requested for an
/// architecture that does not have any explicit packages, a repository containing only "any" files
/// is returned.
async fn get_file(
State(global): State<crate::Global>,
Path((repo, arch, mut file_name)): Path<(String, String, String)>,
req: Request<Body>,
) -> crate::Result<impl IntoResponse> {
let repo_dir = global.config.repo_dir.join(&repo).join(&arch);
let repo_exists = tokio::fs::try_exists(&repo_dir).await?;
let res = if file_name.ends_with(".db") || file_name.ends_with(".db.tar.gz") {
// Append tar extension to ensure we find the file
if file_name.ends_with(".db") {
file_name.push_str(".tar.gz");
};
if repo_exists {
ServeFile::new(repo_dir.join(file_name)).oneshot(req).await
} else {
let path = global
.config
.repo_dir
.join(repo)
.join(manager::ANY_ARCH)
.join(file_name);
ServeFile::new(path).oneshot(req).await
}
} else {
let any_file = global
.config
.pkg_dir
.join(repo)
.join(manager::ANY_ARCH)
.join(file_name);
if repo_exists {
ServeDir::new(global.config.pkg_dir)
.fallback(ServeFile::new(any_file))
.oneshot(req)
.await
} else {
ServeFile::new(any_file).oneshot(req).await
}
};
Ok(res)
}
async fn delete_repo(
@ -73,7 +129,7 @@ async fn delete_arch_repo(
let clone = Arc::clone(&global.repo_manager);
let repo_removed =
tokio::task::spawn_blocking(move || clone.write().unwrap().remove_arch_repo(&repo, &arch))
tokio::task::spawn_blocking(move || clone.write().unwrap().remove_repo_arch(&repo, &arch))
.await??;
if repo_removed {
@ -100,10 +156,7 @@ async fn delete_package(
let clone = Arc::clone(&global.repo_manager);
let pkg_removed = tokio::task::spawn_blocking(move || {
clone
.write()
.unwrap()
.remove_pkg_from_arch_repo(&repo, &arch, &name, true)
clone.write().unwrap().remove_pkg(&repo, &arch, &name, true)
})
.await??;