Compare commits

..

No commits in common. "dev" and "concurrent-repos" have entirely different histories.

33 changed files with 1121 additions and 1170 deletions

View File

@ -1,39 +0,0 @@
matrix:
PLATFORM:
- 'linux/amd64'
platform: ${PLATFORM}
when:
branch: [main, dev]
event: [push, tag]
steps:
build:
image: 'git.rustybever.be/chewing_bever/rieter-builder:1.79-alpine3.19'
commands:
- cargo build --verbose --release
- '[ "$(readelf -d target/release/rieterd | grep NEEDED | wc -l)" = 0 ]'
publish-dev:
image: 'git.rustybever.be/chewing_bever/rieter-builder:1.79-alpine3.19'
commands:
- apk add --no-cache minio-client
- mcli alias set rb 'https://s3.rustybever.be' "$MINIO_ACCESS_KEY" "$MINIO_SECRET_KEY"
- mcli cp target/release/rieterd "rb/rieter/commits/$CI_COMMIT_SHA/rieterd-$(echo '${PLATFORM}' | sed 's:/:-:g')"
secrets:
- minio_access_key
- minio_secret_key
publish-rel:
image: 'curlimages/curl'
commands:
- >
curl -s --fail
--user "Chewing_Bever:$GITEA_PASSWORD"
--upload-file target/release/rieterd
https://git.rustybever.be/api/packages/Chewing_Bever/generic/rieter/"${CI_COMMIT_TAG}"/rieterd-"$(echo '${PLATFORM}' | sed 's:/:-:g')"
secrets:
- gitea_password
when:
event: tag

View File

@ -2,20 +2,13 @@ platform: 'linux/amd64'
when:
branch:
exclude: [dev, main]
exclude: [main]
event: push
steps:
build:
image: 'git.rustybever.be/chewing_bever/rieter-builder:1.79-alpine3.19'
image: 'rust:1.70-alpine3.18'
commands:
- apk add --no-cache build-base libarchive libarchive-dev
- cargo build --verbose
# Binaries, even debug ones, should be statically compiled
- '[ "$(readelf -d target/debug/rieterd | grep NEEDED | wc -l)" = 0 ]'
# Clippy also performs a full build, so putting it here saves the CI a
# lot of work
clippy:
image: 'git.rustybever.be/chewing_bever/rieter-builder:1.79-alpine3.19'
commands:
- cargo clippy -- --no-deps -Dwarnings

View File

@ -0,0 +1,13 @@
platform: 'linux/amd64'
when:
branch:
exclude: [main]
event: push
steps:
clippy:
image: 'rust:1.70-alpine3.18'
commands:
- rustup component add clippy
- cargo clippy -- --no-deps -Dwarnings

View File

@ -1,11 +1,11 @@
platform: 'linux/amd64'
when:
branch: [main, dev]
event: [push, tag]
branch: dev
event: push
depends_on:
- build-rel
- build
steps:
dev:
@ -19,25 +19,4 @@ steps:
tags:
- 'dev'
platforms: [ 'linux/amd64' ]
build_args_from_env:
- 'CI_COMMIT_SHA'
mtu: 1300
when:
branch: dev
event: push
release:
image: 'woodpeckerci/plugin-docker-buildx'
secrets:
- 'docker_username'
- 'docker_password'
settings:
registry: 'git.rustybever.be'
repo: 'git.rustybever.be/chewing_bever/rieter'
auto_tag: true
platforms: [ 'linux/amd64' ]
build_args_from_env:
- 'CI_COMMIT_SHA'
mtu: 1300
when:
event: tag

View File

@ -7,6 +7,7 @@ when:
steps:
lint:
image: 'git.rustybever.be/chewing_bever/rieter-builder:1.79-alpine3.19'
image: 'rust:1.70-alpine3.18'
commands:
- rustup component add rustfmt
- cargo fmt -- --check

View File

@ -7,15 +7,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/Chewing_Bever/rieter/src/branch/dev)
## [0.1.0](https://git.rustybever.be/Chewing_Bever/rieter/src/tag/0.1.0)
### Added
* Functional repository server
* Supports any number of repositories, grouped into distros, each
supporting any number of architectures
* Server
* Functional repository server
* Serve packages from any number of repositories & architectures
* Publish packages to and delete packages from repositories using HTTP
requests
* Packages of architecture "any" are part of every architecture's
database
* Bearer authentication for private routes
* REST API
* Repository & package information available using JSON REST API
* Queueing system with configurable number of workers for resilient
concurrency
* TOML configuration file
* SQLite & Postgres support

5
Cargo.lock generated
View File

@ -1670,9 +1670,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.10.5"
version = "1.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
dependencies = [
"aho-corasick",
"memchr",
@ -1732,7 +1732,6 @@ dependencies = [
"futures",
"http-body-util",
"libarchive",
"regex",
"sea-orm",
"sea-orm-migration",
"sea-query",

View File

@ -1,16 +1,16 @@
FROM git.rustybever.be/chewing_bever/rieter-builder:1.79-alpine3.19 AS builder
FROM rust:1.70-alpine3.18 AS builder
ARG TARGETPLATFORM
ARG CI_COMMIT_SHA
ARG DI_VER=1.2.5
WORKDIR /app
RUN apk add --no-cache \
build-base \
curl \
make \
unzip \
pkgconf
pkgconf \
libarchive libarchive-dev
# Build dumb-init
RUN curl -Lo - "https://github.com/Yelp/dumb-init/archive/refs/tags/v${DI_VER}.tar.gz" | tar -xzf - && \
@ -21,17 +21,33 @@ RUN curl -Lo - "https://github.com/Yelp/dumb-init/archive/refs/tags/v${DI_VER}.t
COPY . .
RUN curl \
--fail \
-o rieterd \
"https://s3.rustybever.be/rieter/commits/${CI_COMMIT_SHA}/rieterd-$(echo "${TARGETPLATFORM}" | sed 's:/:-:g')" && \
chmod +x rieterd
# ENV LIBARCHIVE_STATIC=1 \
# LIBARCHIVE_LIB_DIR=/usr/lib \
# LIBARCHIVE_INCLUDE_DIR=/usr/include \
# LIBARCHIVE_LDFLAGS='-lssl -lcrypto -L/lib -lz -lbz2 -llzma -lexpat -lzstd -llz4'
# LIBARCHIVE_LDFLAGS='-L/usr/lib -lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3'
# https://users.rust-lang.org/t/sigsegv-with-program-linked-against-openssl-in-an-alpine-container/52172
ENV RUSTFLAGS='-C target-feature=-crt-static'
RUN cargo build --release && \
du -h target/release/rieterd && \
readelf -d target/release/rieterd && \
chmod +x target/release/rieterd
FROM alpine:3.19
FROM alpine:3.18
RUN apk add --no-cache \
libgcc \
libarchive \
openssl
COPY --from=builder /app/dumb-init /bin/dumb-init
COPY --from=builder /app/rieterd /bin/rieterd
COPY --from=builder /app/target/release/rieterd /bin/rieterd
ENV RIETER_PKG_DIR=/data/pkgs \
RIETER_DATA_DIR=/data
WORKDIR /data

View File

@ -1,20 +0,0 @@
# Command to build and push builder image (change tags as necessary):
# docker buildx build -f build.Dockerfile -t git.rustybever.be/chewing_bever/rieter-builder:1.79-alpine3.19 --platform linux/amd64,linux/arm64 --push .
FROM rust:1.79-alpine3.19
# Dependencies required to statically compile libarchive and libsqlite3
RUN apk add --no-cache \
build-base \
libarchive-static libarchive-dev \
zlib-static \
openssl-libs-static \
bzip2-static \
xz-static \
expat-static \
zstd-static \
lz4-static \
acl-static && \
rustup component add clippy rustfmt
# Tell the libarchive3-sys package to statically link libarchive
ENV LIBARCHIVE_STATIC=1

View File

@ -386,7 +386,6 @@ pub enum ExtractOption {
ClearNoChangeFFlags,
}
#[derive(Default)]
pub struct ExtractOptions {
pub flags: i32,
}
@ -421,3 +420,9 @@ impl ExtractOptions {
self
}
}
impl Default for ExtractOptions {
fn default() -> ExtractOptions {
ExtractOptions { flags: 0 }
}
}

View File

@ -78,7 +78,7 @@ impl Builder {
ffi::archive_read_support_filter_program_signature(
self.handle_mut(),
c_prog.as_ptr(),
mem::transmute::<std::option::Option<extern "C" fn()>, *const std::ffi::c_void>(cb),
mem::transmute(cb),
size,
)
}

View File

@ -41,7 +41,7 @@ impl FileWriter {
unsafe {
match ffi::archive_write_header(self.handle_mut(), entry.entry_mut()) {
ffi::ARCHIVE_OK => Ok(()),
_ => Err(ArchiveError::from(self as &dyn Handle)),
_ => Err(ArchiveError::from(self as &dyn Handle).into()),
}
}
}
@ -50,7 +50,7 @@ impl FileWriter {
unsafe {
match ffi::archive_write_header(self.handle_mut(), entry.entry_mut()) {
ffi::ARCHIVE_OK => (),
_ => return Err(ArchiveError::from(self as &dyn Handle)),
_ => return Err(ArchiveError::from(self as &dyn Handle).into()),
}
}
@ -74,7 +74,7 @@ impl FileWriter {
// Negative values signal errors
if res < 0 {
return Err(ArchiveError::from(self as &dyn Handle));
return Err(ArchiveError::from(self as &dyn Handle).into());
}
written += usize::try_from(res).unwrap();

View File

@ -30,12 +30,6 @@ impl Entry for WriteEntry {
}
}
impl Default for WriteEntry {
fn default() -> Self {
Self::new()
}
}
impl Drop for WriteEntry {
fn drop(&mut self) {
unsafe { ffi::archive_entry_free(self.entry_mut()) }

View File

@ -4,7 +4,3 @@
DYLD_LIBRARY_PATH=/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib
xcode-select --install
# 64-bit timestamps
`time_t` has been replaced with `i64` as Musl no longer supports 32-bit `time_t` values.

View File

@ -1,6 +1,35 @@
extern crate pkg_config;
use std::env;
fn main() {
pkg_config::Config::new()
.atleast_version("3")
.probe("libarchive")
.unwrap();
let lib_dir = env::var("LIBARCHIVE_LIB_DIR").ok();
let include_dir = env::var("LIBARCHIVE_INCLUDE_DIR").ok();
if lib_dir.is_some() && include_dir.is_some() {
println!("cargo:rustc-flags=-L native={}", lib_dir.unwrap());
println!("cargo:include={}", include_dir.unwrap());
let mode = match env::var_os("LIBARCHIVE_STATIC") {
Some(_) => "static",
None => "dylib",
};
println!("cargo:rustc-flags=-l {0}=archive", mode);
if mode == "static" {
if let Ok(ldflags) = env::var("LIBARCHIVE_LDFLAGS") {
for token in ldflags.split_whitespace() {
if token.starts_with("-L") {
println!("cargo:rustc-flags=-L native={}", token.replace("-L", ""));
} else if token.starts_with("-l") {
println!("cargo:rustc-flags=-l static={}", token.replace("-l", ""));
}
}
}
}
} else {
match pkg_config::find_library("libarchive") {
Ok(_) => (),
Err(msg) => panic!("Unable to locate libarchive, err={:?}", msg),
}
}
}

View File

@ -294,10 +294,14 @@ extern "C" {
) -> c_int;
pub fn archive_read_extract_set_progress_callback(
arg1: *mut Struct_archive,
_progress_func: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void)>,
_progress_func: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void) -> ()>,
_user_data: *mut c_void,
);
pub fn archive_read_extract_set_skip_file(arg1: *mut Struct_archive, arg2: i64, arg3: i64);
) -> ();
pub fn archive_read_extract_set_skip_file(
arg1: *mut Struct_archive,
arg2: i64,
arg3: i64,
) -> ();
pub fn archive_read_close(arg1: *mut Struct_archive) -> c_int;
pub fn archive_read_free(arg1: *mut Struct_archive) -> c_int;
pub fn archive_read_finish(arg1: *mut Struct_archive) -> c_int;
@ -439,7 +443,7 @@ extern "C" {
arg3: ::std::option::Option<
unsafe extern "C" fn(arg1: *mut c_void, arg2: *const c_char, arg3: i64) -> i64,
>,
arg4: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void)>,
arg4: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void) -> ()>,
) -> c_int;
pub fn archive_write_disk_set_user_lookup(
arg1: *mut Struct_archive,
@ -447,7 +451,7 @@ extern "C" {
arg3: ::std::option::Option<
unsafe extern "C" fn(arg1: *mut c_void, arg2: *const c_char, arg3: i64) -> i64,
>,
arg4: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void)>,
arg4: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void) -> ()>,
) -> c_int;
pub fn archive_write_disk_gid(arg1: *mut Struct_archive, arg2: *const c_char, arg3: i64)
-> i64;
@ -471,7 +475,7 @@ extern "C" {
arg3: ::std::option::Option<
unsafe extern "C" fn(arg1: *mut c_void, arg2: i64) -> *const c_char,
>,
arg4: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void)>,
arg4: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void) -> ()>,
) -> c_int;
pub fn archive_read_disk_set_uname_lookup(
arg1: *mut Struct_archive,
@ -479,7 +483,7 @@ extern "C" {
arg3: ::std::option::Option<
unsafe extern "C" fn(arg1: *mut c_void, arg2: i64) -> *const c_char,
>,
arg4: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void)>,
arg4: ::std::option::Option<unsafe extern "C" fn(arg1: *mut c_void) -> ()>,
) -> c_int;
pub fn archive_read_disk_open(arg1: *mut Struct_archive, arg2: *const c_char) -> c_int;
pub fn archive_read_disk_open_w(arg1: *mut Struct_archive, arg2: *const wchar_t) -> c_int;
@ -498,7 +502,7 @@ extern "C" {
arg1: *mut Struct_archive,
arg2: *mut c_void,
arg3: *mut Struct_archive_entry,
),
) -> (),
>,
_client_data: *mut c_void,
) -> c_int;
@ -525,9 +529,10 @@ extern "C" {
pub fn archive_error_string(arg1: *mut Struct_archive) -> *const c_char;
pub fn archive_format_name(arg1: *mut Struct_archive) -> *const c_char;
pub fn archive_format(arg1: *mut Struct_archive) -> c_int;
pub fn archive_clear_error(arg1: *mut Struct_archive);
pub fn archive_set_error(arg1: *mut Struct_archive, _err: c_int, fmt: *const c_char, ...);
pub fn archive_copy_error(dest: *mut Struct_archive, src: *mut Struct_archive);
pub fn archive_clear_error(arg1: *mut Struct_archive) -> ();
pub fn archive_set_error(arg1: *mut Struct_archive, _err: c_int, fmt: *const c_char, ...)
-> ();
pub fn archive_copy_error(dest: *mut Struct_archive, src: *mut Struct_archive) -> ();
pub fn archive_file_count(arg1: *mut Struct_archive) -> c_int;
pub fn archive_match_new() -> *mut Struct_archive;
pub fn archive_match_free(arg1: *mut Struct_archive) -> c_int;
@ -585,7 +590,7 @@ extern "C" {
pub fn archive_match_include_time(
arg1: *mut Struct_archive,
_flag: c_int,
_sec: i64,
_sec: time_t,
_nsec: c_long,
) -> c_int;
pub fn archive_match_include_date(
@ -625,16 +630,16 @@ extern "C" {
pub fn archive_match_include_gname_w(arg1: *mut Struct_archive, arg2: *const wchar_t) -> c_int;
pub fn archive_entry_clear(arg1: *mut Struct_archive_entry) -> *mut Struct_archive_entry;
pub fn archive_entry_clone(arg1: *mut Struct_archive_entry) -> *mut Struct_archive_entry;
pub fn archive_entry_free(arg1: *mut Struct_archive_entry);
pub fn archive_entry_free(arg1: *mut Struct_archive_entry) -> ();
pub fn archive_entry_new() -> *mut Struct_archive_entry;
pub fn archive_entry_new2(arg1: *mut Struct_archive) -> *mut Struct_archive_entry;
pub fn archive_entry_atime(arg1: *mut Struct_archive_entry) -> i64;
pub fn archive_entry_atime(arg1: *mut Struct_archive_entry) -> time_t;
pub fn archive_entry_atime_nsec(arg1: *mut Struct_archive_entry) -> c_long;
pub fn archive_entry_atime_is_set(arg1: *mut Struct_archive_entry) -> c_int;
pub fn archive_entry_birthtime(arg1: *mut Struct_archive_entry) -> i64;
pub fn archive_entry_birthtime(arg1: *mut Struct_archive_entry) -> time_t;
pub fn archive_entry_birthtime_nsec(arg1: *mut Struct_archive_entry) -> c_long;
pub fn archive_entry_birthtime_is_set(arg1: *mut Struct_archive_entry) -> c_int;
pub fn archive_entry_ctime(arg1: *mut Struct_archive_entry) -> i64;
pub fn archive_entry_ctime(arg1: *mut Struct_archive_entry) -> time_t;
pub fn archive_entry_ctime_nsec(arg1: *mut Struct_archive_entry) -> c_long;
pub fn archive_entry_ctime_is_set(arg1: *mut Struct_archive_entry) -> c_int;
pub fn archive_entry_dev(arg1: *mut Struct_archive_entry) -> dev_t;
@ -646,7 +651,7 @@ extern "C" {
arg1: *mut Struct_archive_entry,
arg2: *mut c_ulong,
arg3: *mut c_ulong,
);
) -> ();
pub fn archive_entry_fflags_text(arg1: *mut Struct_archive_entry) -> *const c_char;
pub fn archive_entry_gid(arg1: *mut Struct_archive_entry) -> i64;
pub fn archive_entry_gname(arg1: *mut Struct_archive_entry) -> *const c_char;
@ -657,7 +662,7 @@ extern "C" {
pub fn archive_entry_ino64(arg1: *mut Struct_archive_entry) -> i64;
pub fn archive_entry_ino_is_set(arg1: *mut Struct_archive_entry) -> c_int;
pub fn archive_entry_mode(arg1: *mut Struct_archive_entry) -> mode_t;
pub fn archive_entry_mtime(arg1: *mut Struct_archive_entry) -> i64;
pub fn archive_entry_mtime(arg1: *mut Struct_archive_entry) -> time_t;
pub fn archive_entry_mtime_nsec(arg1: *mut Struct_archive_entry) -> c_long;
pub fn archive_entry_mtime_is_set(arg1: *mut Struct_archive_entry) -> c_int;
pub fn archive_entry_nlink(arg1: *mut Struct_archive_entry) -> c_uint;
@ -677,17 +682,33 @@ extern "C" {
pub fn archive_entry_uid(arg1: *mut Struct_archive_entry) -> i64;
pub fn archive_entry_uname(arg1: *mut Struct_archive_entry) -> *const c_char;
pub fn archive_entry_uname_w(arg1: *mut Struct_archive_entry) -> *const wchar_t;
pub fn archive_entry_set_atime(arg1: *mut Struct_archive_entry, arg2: i64, arg3: c_long);
pub fn archive_entry_unset_atime(arg1: *mut Struct_archive_entry);
pub fn archive_entry_set_birthtime(arg1: *mut Struct_archive_entry, arg2: i64, arg3: c_long);
pub fn archive_entry_unset_birthtime(arg1: *mut Struct_archive_entry);
pub fn archive_entry_set_ctime(arg1: *mut Struct_archive_entry, arg2: i64, arg3: c_long);
pub fn archive_entry_unset_ctime(arg1: *mut Struct_archive_entry);
pub fn archive_entry_set_dev(arg1: *mut Struct_archive_entry, arg2: dev_t);
pub fn archive_entry_set_devmajor(arg1: *mut Struct_archive_entry, arg2: dev_t);
pub fn archive_entry_set_devminor(arg1: *mut Struct_archive_entry, arg2: dev_t);
pub fn archive_entry_set_filetype(arg1: *mut Struct_archive_entry, arg2: c_uint);
pub fn archive_entry_set_fflags(arg1: *mut Struct_archive_entry, arg2: c_ulong, arg3: c_ulong);
pub fn archive_entry_set_atime(
arg1: *mut Struct_archive_entry,
arg2: time_t,
arg3: c_long,
) -> ();
pub fn archive_entry_unset_atime(arg1: *mut Struct_archive_entry) -> ();
pub fn archive_entry_set_birthtime(
arg1: *mut Struct_archive_entry,
arg2: time_t,
arg3: c_long,
) -> ();
pub fn archive_entry_unset_birthtime(arg1: *mut Struct_archive_entry) -> ();
pub fn archive_entry_set_ctime(
arg1: *mut Struct_archive_entry,
arg2: time_t,
arg3: c_long,
) -> ();
pub fn archive_entry_unset_ctime(arg1: *mut Struct_archive_entry) -> ();
pub fn archive_entry_set_dev(arg1: *mut Struct_archive_entry, arg2: dev_t) -> ();
pub fn archive_entry_set_devmajor(arg1: *mut Struct_archive_entry, arg2: dev_t) -> ();
pub fn archive_entry_set_devminor(arg1: *mut Struct_archive_entry, arg2: dev_t) -> ();
pub fn archive_entry_set_filetype(arg1: *mut Struct_archive_entry, arg2: c_uint) -> ();
pub fn archive_entry_set_fflags(
arg1: *mut Struct_archive_entry,
arg2: c_ulong,
arg3: c_ulong,
) -> ();
pub fn archive_entry_copy_fflags_text(
arg1: *mut Struct_archive_entry,
arg2: *const c_char,
@ -696,60 +717,79 @@ extern "C" {
arg1: *mut Struct_archive_entry,
arg2: *const wchar_t,
) -> *const wchar_t;
pub fn archive_entry_set_gid(arg1: *mut Struct_archive_entry, arg2: i64);
pub fn archive_entry_set_gname(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_gname(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_gname_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t);
pub fn archive_entry_set_gid(arg1: *mut Struct_archive_entry, arg2: i64) -> ();
pub fn archive_entry_set_gname(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_gname(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_gname_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t) -> ();
pub fn archive_entry_update_gname_utf8(
arg1: *mut Struct_archive_entry,
arg2: *const c_char,
) -> c_int;
pub fn archive_entry_set_hardlink(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_hardlink(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_hardlink_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t);
pub fn archive_entry_set_hardlink(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_hardlink(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_hardlink_w(
arg1: *mut Struct_archive_entry,
arg2: *const wchar_t,
) -> ();
pub fn archive_entry_update_hardlink_utf8(
arg1: *mut Struct_archive_entry,
arg2: *const c_char,
) -> c_int;
pub fn archive_entry_set_ino(arg1: *mut Struct_archive_entry, arg2: i64);
pub fn archive_entry_set_ino64(arg1: *mut Struct_archive_entry, arg2: i64);
pub fn archive_entry_set_link(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_link(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_link_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t);
pub fn archive_entry_set_ino(arg1: *mut Struct_archive_entry, arg2: i64) -> ();
pub fn archive_entry_set_ino64(arg1: *mut Struct_archive_entry, arg2: i64) -> ();
pub fn archive_entry_set_link(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_link(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_link_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t) -> ();
pub fn archive_entry_update_link_utf8(
arg1: *mut Struct_archive_entry,
arg2: *const c_char,
) -> c_int;
pub fn archive_entry_set_mode(arg1: *mut Struct_archive_entry, arg2: mode_t);
pub fn archive_entry_set_mtime(arg1: *mut Struct_archive_entry, arg2: i64, arg3: c_long);
pub fn archive_entry_unset_mtime(arg1: *mut Struct_archive_entry);
pub fn archive_entry_set_nlink(arg1: *mut Struct_archive_entry, arg2: c_uint);
pub fn archive_entry_set_pathname(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_pathname(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_pathname_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t);
pub fn archive_entry_set_mode(arg1: *mut Struct_archive_entry, arg2: mode_t) -> ();
pub fn archive_entry_set_mtime(
arg1: *mut Struct_archive_entry,
arg2: time_t,
arg3: c_long,
) -> ();
pub fn archive_entry_unset_mtime(arg1: *mut Struct_archive_entry) -> ();
pub fn archive_entry_set_nlink(arg1: *mut Struct_archive_entry, arg2: c_uint) -> ();
pub fn archive_entry_set_pathname(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_pathname(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_pathname_w(
arg1: *mut Struct_archive_entry,
arg2: *const wchar_t,
) -> ();
pub fn archive_entry_update_pathname_utf8(
arg1: *mut Struct_archive_entry,
arg2: *const c_char,
) -> c_int;
pub fn archive_entry_set_perm(arg1: *mut Struct_archive_entry, arg2: mode_t);
pub fn archive_entry_set_rdev(arg1: *mut Struct_archive_entry, arg2: dev_t);
pub fn archive_entry_set_rdevmajor(arg1: *mut Struct_archive_entry, arg2: dev_t);
pub fn archive_entry_set_rdevminor(arg1: *mut Struct_archive_entry, arg2: dev_t);
pub fn archive_entry_set_size(arg1: *mut Struct_archive_entry, arg2: i64);
pub fn archive_entry_unset_size(arg1: *mut Struct_archive_entry);
pub fn archive_entry_copy_sourcepath(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_sourcepath_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t);
pub fn archive_entry_set_symlink(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_symlink(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_symlink_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t);
pub fn archive_entry_set_perm(arg1: *mut Struct_archive_entry, arg2: mode_t) -> ();
pub fn archive_entry_set_rdev(arg1: *mut Struct_archive_entry, arg2: dev_t) -> ();
pub fn archive_entry_set_rdevmajor(arg1: *mut Struct_archive_entry, arg2: dev_t) -> ();
pub fn archive_entry_set_rdevminor(arg1: *mut Struct_archive_entry, arg2: dev_t) -> ();
pub fn archive_entry_set_size(arg1: *mut Struct_archive_entry, arg2: i64) -> ();
pub fn archive_entry_unset_size(arg1: *mut Struct_archive_entry) -> ();
pub fn archive_entry_copy_sourcepath(
arg1: *mut Struct_archive_entry,
arg2: *const c_char,
) -> ();
pub fn archive_entry_copy_sourcepath_w(
arg1: *mut Struct_archive_entry,
arg2: *const wchar_t,
) -> ();
pub fn archive_entry_set_symlink(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_symlink(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_symlink_w(
arg1: *mut Struct_archive_entry,
arg2: *const wchar_t,
) -> ();
pub fn archive_entry_update_symlink_utf8(
arg1: *mut Struct_archive_entry,
arg2: *const c_char,
) -> c_int;
pub fn archive_entry_set_uid(arg1: *mut Struct_archive_entry, arg2: i64);
pub fn archive_entry_set_uname(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_uname(arg1: *mut Struct_archive_entry, arg2: *const c_char);
pub fn archive_entry_copy_uname_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t);
pub fn archive_entry_set_uid(arg1: *mut Struct_archive_entry, arg2: i64) -> ();
pub fn archive_entry_set_uname(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_uname(arg1: *mut Struct_archive_entry, arg2: *const c_char) -> ();
pub fn archive_entry_copy_uname_w(arg1: *mut Struct_archive_entry, arg2: *const wchar_t) -> ();
pub fn archive_entry_update_uname_utf8(
arg1: *mut Struct_archive_entry,
arg2: *const c_char,
@ -757,7 +797,7 @@ extern "C" {
// pub fn archive_entry_stat(arg1: *mut Struct_archive_entry) -> *const Struct_stat;
// pub fn archive_entry_copy_stat(arg1: *mut Struct_archive_entry,
// arg2: *const Struct_stat)
// ;
// -> ();
pub fn archive_entry_mac_metadata(
arg1: *mut Struct_archive_entry,
arg2: *mut size_t,
@ -766,8 +806,8 @@ extern "C" {
arg1: *mut Struct_archive_entry,
arg2: *const c_void,
arg3: size_t,
);
pub fn archive_entry_acl_clear(arg1: *mut Struct_archive_entry);
) -> ();
pub fn archive_entry_acl_clear(arg1: *mut Struct_archive_entry) -> ();
pub fn archive_entry_acl_add_entry(
arg1: *mut Struct_archive_entry,
arg2: c_int,
@ -808,13 +848,13 @@ extern "C" {
pub fn archive_entry_acl_text(arg1: *mut Struct_archive_entry, arg2: c_int) -> *const c_char;
pub fn archive_entry_acl_count(arg1: *mut Struct_archive_entry, arg2: c_int) -> c_int;
pub fn archive_entry_acl(arg1: *mut Struct_archive_entry) -> *mut Struct_archive_acl;
pub fn archive_entry_xattr_clear(arg1: *mut Struct_archive_entry);
pub fn archive_entry_xattr_clear(arg1: *mut Struct_archive_entry) -> ();
pub fn archive_entry_xattr_add_entry(
arg1: *mut Struct_archive_entry,
arg2: *const c_char,
arg3: *const c_void,
arg4: size_t,
);
) -> ();
pub fn archive_entry_xattr_count(arg1: *mut Struct_archive_entry) -> c_int;
pub fn archive_entry_xattr_reset(arg1: *mut Struct_archive_entry) -> c_int;
pub fn archive_entry_xattr_next(
@ -823,8 +863,12 @@ extern "C" {
arg3: *mut *const c_void,
arg4: *mut size_t,
) -> c_int;
pub fn archive_entry_sparse_clear(arg1: *mut Struct_archive_entry);
pub fn archive_entry_sparse_add_entry(arg1: *mut Struct_archive_entry, arg2: i64, arg3: i64);
pub fn archive_entry_sparse_clear(arg1: *mut Struct_archive_entry) -> ();
pub fn archive_entry_sparse_add_entry(
arg1: *mut Struct_archive_entry,
arg2: i64,
arg3: i64,
) -> ();
pub fn archive_entry_sparse_count(arg1: *mut Struct_archive_entry) -> c_int;
pub fn archive_entry_sparse_reset(arg1: *mut Struct_archive_entry) -> c_int;
pub fn archive_entry_sparse_next(
@ -836,13 +880,13 @@ extern "C" {
pub fn archive_entry_linkresolver_set_strategy(
arg1: *mut Struct_archive_entry_linkresolver,
arg2: c_int,
);
pub fn archive_entry_linkresolver_free(arg1: *mut Struct_archive_entry_linkresolver);
) -> ();
pub fn archive_entry_linkresolver_free(arg1: *mut Struct_archive_entry_linkresolver) -> ();
pub fn archive_entry_linkify(
arg1: *mut Struct_archive_entry_linkresolver,
arg2: *mut *mut Struct_archive_entry,
arg3: *mut *mut Struct_archive_entry,
);
) -> ();
pub fn archive_entry_partial_links(
res: *mut Struct_archive_entry_linkresolver,
links: *mut c_uint,

View File

@ -14,7 +14,6 @@ figment = { version = "0.10.19", features = ["env", "toml"] }
futures = "0.3.28"
http-body-util = "0.1.1"
libarchive = { path = "../libarchive" }
regex = "1.10.5"
sea-orm-migration = "0.12.1"
sea-query = { version = "0.30.7", features = ["backend-postgres", "backend-sqlite"] }
serde = { version = "1.0.178", features = ["derive"] }

View File

@ -1,6 +1,12 @@
use std::path::PathBuf;
use crate::{Config, FsConfig, Global};
use std::{io, path::PathBuf, sync::Arc};
use axum::Router;
use clap::Parser;
use sea_orm_migration::MigratorTrait;
use tower_http::trace::TraceLayer;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
@ -13,3 +19,57 @@ pub struct Cli {
)]
pub config_file: PathBuf,
}
impl Cli {
pub async fn run(&self) -> crate::Result<()> {
let config: Config = Config::figment(&self.config_file)
.extract()
.inspect_err(|e| tracing::error!("{}", e))?;
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(config.log_level.clone()))
.with(tracing_subscriber::fmt::layer())
.init();
tracing::info!("Connecting to database");
let db = crate::db::connect(&config.db).await?;
crate::db::Migrator::up(&db, None).await?;
let mgr = match &config.fs {
FsConfig::Local { data_dir } => {
crate::repo::RepoMgr::new(data_dir.join("repos"), db.clone()).await?
}
};
let mgr = Arc::new(mgr);
for _ in 0..config.pkg_workers {
let clone = Arc::clone(&mgr);
tokio::spawn(async move { clone.pkg_parse_task().await });
}
let global = Global {
config: config.clone(),
mgr,
db,
};
// build our application with a single route
let app = Router::new()
.nest("/api", crate::api::router())
.merge(crate::repo::router(&config.api_key))
.with_state(global)
.layer(TraceLayer::new_for_http());
let domain: String = format!("{}:{}", config.domain, config.port)
.parse()
.unwrap();
let listener = tokio::net::TcpListener::bind(domain).await?;
// run it with hyper on localhost:3000
Ok(axum::serve(listener, app.into_make_service())
.await
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?)
}
}

View File

@ -88,7 +88,7 @@ pub async fn connect(conn: &DbConfig) -> crate::Result<DbConn> {
} => {
let mut url = format!("postgres://{}:{}@{}:{}/{}", user, password, host, port, db);
if !schema.is_empty() {
if schema != "" {
url = format!("{url}?currentSchema={schema}");
}

View File

@ -1,3 +1,5 @@
pub mod distro;
pub mod package;
pub mod repo;
type Result<T> = std::result::Result<T, sea_orm::DbErr>;

View File

@ -1,12 +1,10 @@
use crate::db::{self, *};
use futures::Stream;
use sea_orm::{sea_query::IntoCondition, *};
use sea_query::{Alias, Expr, Query, SelectStatement};
use sea_query::{Alias, Asterisk, Expr, IntoColumnRef, Query, SelectStatement};
use serde::Deserialize;
/// How many fields may be inserted at once into the database.
const PACKAGE_INSERT_LIMIT: usize = 1000;
#[derive(Deserialize)]
pub struct Filter {
repo: Option<i32>,
@ -60,17 +58,17 @@ pub async fn by_id(conn: &DbConn, id: i32) -> Result<Option<package::Model>> {
pub async fn by_fields(
conn: &DbConn,
repo_id: i32,
name: &str,
version: &str,
arch: &str,
compression: &str,
name: &str,
version: Option<&str>,
compression: Option<&str>,
) -> Result<Option<package::Model>> {
let cond = Condition::all()
.add(package::Column::RepoId.eq(repo_id))
.add(package::Column::Name.eq(name))
.add(package::Column::Arch.eq(arch))
.add(package::Column::Version.eq(version))
.add(package::Column::Compression.eq(compression));
.add_option(version.map(|version| package::Column::Version.eq(version)))
.add_option(compression.map(|compression| package::Column::Compression.eq(compression)));
Package::find().filter(cond).one(conn).await
}
@ -163,34 +161,23 @@ pub async fn insert(
.iter()
.map(|s| (PackageRelatedEnum::Optdepend, s)),
);
let related = crate::util::Chunked::new(related, PACKAGE_INSERT_LIMIT);
for chunk in related {
PackageRelated::insert_many(
chunk
.into_iter()
.map(|(t, s)| package_related::ActiveModel {
PackageRelated::insert_many(related.map(|(t, s)| package_related::ActiveModel {
package_id: Set(pkg_entry.id),
r#type: Set(t),
name: Set(s.to_string()),
}),
)
}))
.on_empty_do_nothing()
.exec(&txn)
.await?;
}
let files = crate::util::Chunked::new(pkg.files, PACKAGE_INSERT_LIMIT);
for chunk in files {
PackageFile::insert_many(chunk.into_iter().map(|s| package_file::ActiveModel {
PackageFile::insert_many(pkg.files.iter().map(|s| package_file::ActiveModel {
package_id: Set(pkg_entry.id),
path: Set(s.display().to_string()),
}))
.on_empty_do_nothing()
.exec(&txn)
.await?;
}
txn.commit().await?;

View File

@ -1,100 +1,28 @@
mod api;
mod cli;
mod config;
pub mod db;
mod error;
mod repo;
mod util;
mod web;
pub use config::{Config, DbConfig, FsConfig};
pub use error::{Result, ServerError};
use std::{io, path::PathBuf};
use std::sync::Arc;
use clap::Parser;
use sea_orm_migration::MigratorTrait;
use tokio::runtime;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
pub const ANY_ARCH: &str = "any";
pub const PKG_FILENAME_REGEX: &str = "^([a-z0-9@._+-]+)-((?:[0-9]+:)?[a-zA-Z0-9@._+]+-[0-9]+)-([a-zA-z0-9_]+).pkg.tar.([a-zA-Z0-9]+)$";
pub const ANY_ARCH: &'static str = "any";
#[derive(Clone)]
pub struct Global {
config: crate::config::Config,
repo: repo::Handle,
mgr: Arc<repo::RepoMgr>,
db: sea_orm::DbConn,
pkg_filename_re: regex::Regex,
}
fn main() -> crate::Result<()> {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap();
let handle = rt.handle();
#[tokio::main]
async fn main() -> crate::Result<()> {
let cli = cli::Cli::parse();
let global = setup(handle, cli.config_file)?;
handle.block_on(run(global))
}
fn setup(rt: &runtime::Handle, config_file: PathBuf) -> crate::Result<Global> {
let config: Config = Config::figment(config_file)
.extract()
.inspect_err(|e| tracing::error!("{}", e))?;
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(config.log_level.clone()))
.with(tracing_subscriber::fmt::layer())
.init();
tracing::info!("Connecting to database");
let db = rt.block_on(crate::db::connect(&config.db))?;
rt.block_on(crate::db::Migrator::up(&db, None))?;
let repo = match &config.fs {
FsConfig::Local { data_dir } => {
crate::repo::start(
data_dir.join("repos"),
db.clone(),
rt.clone(),
config.pkg_workers,
)?
//rt.block_on(crate::repo::RepoMgr::new(
// data_dir.join("repos"),
// db.clone(),
//))?
//RepoHandle::start(data_dir.join("repos"), db.clone(), config.pkg_workers, rt.clone())?
}
};
//let mgr = Arc::new(mgr);
//
//for _ in 0..config.pkg_workers {
// let clone = Arc::clone(&mgr);
//
// rt.spawn(async move { clone.pkg_parse_task().await });
//}
Ok(Global {
config: config.clone(),
repo,
db,
pkg_filename_re: regex::Regex::new(PKG_FILENAME_REGEX).unwrap(),
})
}
async fn run(global: Global) -> crate::Result<()> {
let domain: String = format!("{}:{}", &global.config.domain, global.config.port)
.parse()
.unwrap();
let listener = tokio::net::TcpListener::bind(domain).await?;
let app = web::router(global);
// run it with hyper on localhost:3000
Ok(axum::serve(listener, app.into_make_service())
.await
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?)
cli.run().await
}

View File

@ -1,245 +0,0 @@
use super::{archive, package, Command, SharedState};
use crate::db;
use std::{
path::PathBuf,
sync::{atomic::Ordering, Arc},
};
use futures::StreamExt;
use sea_orm::{ColumnTrait, EntityTrait, QueryFilter, QuerySelect};
use sea_query::Expr;
use tokio::{runtime, sync::mpsc};
use uuid::Uuid;
/// The actor is responsible for mutating the repositories. They receive their commands their
/// messages and process these commands in both a synchronous and asynchronous way.
pub struct Actor {
rt: runtime::Handle,
state: Arc<SharedState>,
}
impl Actor {
pub fn new(rt: runtime::Handle, state: Arc<SharedState>) -> Self {
Self {
rt,
state: Arc::clone(&state),
}
}
pub fn random_file_paths<const C: usize>(&self) -> [PathBuf; C] {
std::array::from_fn(|_| {
let uuid: uuid::fmt::Simple = Uuid::new_v4().into();
self.state.repos_dir.join(uuid.to_string())
})
}
/// Run the main actor loop
pub fn run(self) {
while let Some(msg) = {
let mut rx = self.state.rx.lock().unwrap();
rx.blocking_recv()
} {
match msg {
Command::ParsePkg(repo, path) => {
let _ = self.parse_pkg(repo, path);
if self
.state
.repos
.blocking_read()
.get(&repo)
.map(|n| n.0.load(Ordering::SeqCst))
== Some(0)
{
let _ = self.sync_repo(repo);
let _ = self.clean();
}
}
Command::SyncRepo(repo) => {
let _ = self.sync_repo(repo);
}
Command::Clean => {
let _ = self.clean();
}
}
}
}
/// Parse a queued package for the given repository.
fn parse_pkg(&self, repo: i32, path: PathBuf) -> crate::Result<()> {
let pkg = package::Package::open(&path)?;
let pkg = self
.rt
.block_on(db::query::package::insert(&self.state.conn, repo, pkg))?;
let dest_path = self
.state
.repos_dir
.join(repo.to_string())
.join(pkg.id.to_string());
std::fs::rename(path, dest_path)?;
tracing::info!(
"Added '{}-{}-{}' to repository {}",
pkg.name,
pkg.version,
pkg.arch,
repo,
);
self.state.repos.blocking_read().get(&repo).inspect(|n| {
n.0.fetch_sub(1, Ordering::SeqCst);
});
Ok(())
}
fn sync_repo(&self, repo: i32) -> crate::Result<()> {
let repos = self.state.repos.blocking_read();
if let Some(_guard) = repos.get(&repo).map(|n| n.1.lock()) {
let archs: Vec<String> = self.rt.block_on(
db::Package::find()
.filter(db::package::Column::RepoId.eq(repo))
.select_only()
.column(db::package::Column::Arch)
.distinct()
.into_tuple()
.all(&self.state.conn),
)?;
for arch in archs {
self.generate_archives(repo, &arch)?;
}
}
Ok(())
}
fn generate_archives(&self, repo: i32, arch: &str) -> crate::Result<()> {
let [tmp_ar_db_path, tmp_ar_files_path] = self.random_file_paths();
let mut ars = archive::RepoArchivesWriter::new(
&tmp_ar_db_path,
&tmp_ar_files_path,
self.random_file_paths(),
&self.rt,
&self.state.conn,
)?;
let (tx, mut rx) = mpsc::channel(1);
let conn = self.state.conn.clone();
let query = db::query::package::pkgs_to_sync(&self.state.conn, repo, arch);
// sea_orm needs its connections to be dropped inside an async context, so we spawn a task
// that streams the responses to the synchronous context via message passing
self.rt.spawn(async move {
match query.stream(&conn).await {
Ok(mut stream) => {
while let Some(res) = stream.next().await {
let is_err = res.is_err();
let _ = tx.send(res).await;
if is_err {
return;
}
}
}
Err(err) => {
let _ = tx.send(Err(err)).await;
}
}
});
let mut committed_ids: Vec<i32> = Vec::new();
while let Some(pkg) = rx.blocking_recv().transpose()? {
committed_ids.push(pkg.id);
ars.append_pkg(&pkg)?;
}
ars.close()?;
// Move newly generated package archives to their correct place
let repo_dir = self.state.repos_dir.join(repo.to_string());
std::fs::rename(tmp_ar_db_path, repo_dir.join(format!("{}.db.tar.gz", arch)))?;
std::fs::rename(
tmp_ar_files_path,
repo_dir.join(format!("{}.files.tar.gz", arch)),
)?;
// Update the state for the newly committed packages
self.rt.block_on(
db::Package::update_many()
.col_expr(
db::package::Column::State,
Expr::value(db::PackageState::Committed),
)
.filter(db::package::Column::Id.is_in(committed_ids))
.exec(&self.state.conn),
)?;
tracing::info!("Package archives generated for repo {} ('{}')", repo, arch);
Ok(())
}
fn clean(&self) -> crate::Result<()> {
let (tx, mut rx) = mpsc::channel(1);
let conn = self.state.conn.clone();
let query = db::query::package::stale_pkgs(&self.state.conn);
// sea_orm needs its connections to be dropped inside an async context, so we spawn a task
// that streams the responses to the synchronous context via message passing
self.rt.spawn(async move {
match query.stream(&conn).await {
Ok(mut stream) => {
while let Some(res) = stream.next().await {
let is_err = res.is_err();
let _ = tx.send(res).await;
if is_err {
return;
}
}
}
Err(err) => {
let _ = tx.send(Err(err)).await;
}
}
});
// Ids are monotonically increasing, so the max id suffices to know which packages to
// remove later
let mut max_id = -1;
let mut removed_pkgs = 0;
while let Some(pkg) = rx.blocking_recv().transpose()? {
// Failing to remove the package file isn't the biggest problem
let _ = std::fs::remove_file(
self.state
.repos_dir
.join(pkg.repo_id.to_string())
.join(pkg.id.to_string()),
);
if pkg.id > max_id {
max_id = pkg.id;
}
removed_pkgs += 1;
}
if removed_pkgs > 0 {
self.rt.block_on(db::query::package::delete_stale_pkgs(
&self.state.conn,
max_id,
))?;
}
tracing::info!("Cleaned up {removed_pkgs} old package(s)");
Ok(())
}
}

View File

@ -1,224 +1,78 @@
use crate::db;
use std::{
io::Write,
io,
path::{Path, PathBuf},
sync::{Arc, Mutex},
};
use futures::StreamExt;
use libarchive::{
write::{Builder, FileWriter, WriteEntry},
Entry, WriteFilter, WriteFormat,
};
use sea_orm::{ColumnTrait, DbConn, ModelTrait, QueryFilter, QuerySelect};
use tokio::{runtime, sync::mpsc};
pub struct RepoArchivesWriter {
ar_db: FileWriter,
ar_files: FileWriter,
rt: runtime::Handle,
conn: DbConn,
tmp_paths: [PathBuf; 2],
/// Struct to abstract away the intrinsics of writing entries to an archive file
pub struct RepoArchiveWriter {
ar: Arc<Mutex<FileWriter>>,
}
impl RepoArchivesWriter {
pub fn new(
ar_db_path: impl AsRef<Path>,
ar_files_path: impl AsRef<Path>,
tmp_paths: [impl AsRef<Path>; 2],
rt: &runtime::Handle,
conn: &sea_orm::DbConn,
) -> crate::Result<Self> {
let ar_db = Self::open_ar(ar_db_path)?;
let ar_files = Self::open_ar(ar_files_path)?;
impl RepoArchiveWriter {
pub async fn open<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let path = PathBuf::from(path.as_ref());
Ok(Self {
ar_db,
ar_files,
rt: rt.clone(),
conn: conn.clone(),
tmp_paths: [
tmp_paths[0].as_ref().to_path_buf(),
tmp_paths[1].as_ref().to_path_buf(),
],
})
}
fn open_ar(path: impl AsRef<Path>) -> crate::Result<FileWriter> {
// Open the archive file
let ar = tokio::task::spawn_blocking(move || {
let mut builder = Builder::new();
builder.add_filter(WriteFilter::Gzip)?;
builder.set_format(WriteFormat::PaxRestricted)?;
Ok(builder.open_file(path)?)
builder.open_file(path)
})
.await
.unwrap()?;
Ok(Self {
// In practice, mutex is only ever used by one thread at a time. It's simply here so we
// can use spawn_blocking without issues.
ar: Arc::new(Mutex::new(ar)),
})
}
fn append_entry(
ar: &mut FileWriter,
src_path: impl AsRef<Path>,
dest_path: impl AsRef<Path>,
) -> crate::Result<()> {
let metadata = std::fs::metadata(&src_path)?;
/// Add either a "desc" or "files" entry to the archive
pub async fn add_entry<P: AsRef<Path>>(
&self,
full_name: &str,
path: P,
desc: bool,
) -> io::Result<()> {
let metadata = tokio::fs::metadata(&path).await?;
let file_size = metadata.len();
let ar = Arc::clone(&self.ar);
let full_name = String::from(full_name);
let path = PathBuf::from(path.as_ref());
Ok(tokio::task::spawn_blocking(move || {
let mut ar_entry = WriteEntry::new();
ar_entry.set_filetype(libarchive::archive::FileType::RegularFile);
ar_entry.set_pathname(dest_path);
let name = if desc { "desc" } else { "files" };
ar_entry.set_pathname(PathBuf::from(full_name).join(name));
ar_entry.set_mode(0o100644);
ar_entry.set_size(file_size.try_into().unwrap());
Ok(ar.append_path(&mut ar_entry, src_path)?)
ar.lock().unwrap().append_path(&mut ar_entry, path)
})
.await
.unwrap()?)
}
pub fn append_pkg(&mut self, pkg: &db::package::Model) -> crate::Result<()> {
self.write_desc(&self.tmp_paths[0], pkg)?;
self.write_files(&self.tmp_paths[1], pkg)?;
pub async fn close(&self) -> io::Result<()> {
let ar = Arc::clone(&self.ar);
let full_name = format!("{}-{}", pkg.name, pkg.version);
let dest_desc_path = format!("{}/desc", full_name);
let dest_files_path = format!("{}/files", full_name);
Self::append_entry(&mut self.ar_db, &self.tmp_paths[0], &dest_desc_path)?;
Self::append_entry(&mut self.ar_files, &self.tmp_paths[0], &dest_desc_path)?;
Self::append_entry(&mut self.ar_files, &self.tmp_paths[1], &dest_files_path)?;
Ok(())
}
/// Generate a "files" archive entry for the package in the given path
fn write_files(&self, path: impl AsRef<Path>, pkg: &db::package::Model) -> crate::Result<()> {
let mut f = std::io::BufWriter::new(std::fs::File::create(path)?);
writeln!(f, "%FILES%")?;
let (tx, mut rx) = mpsc::channel(1);
let conn = self.conn.clone();
let query = pkg.find_related(db::PackageFile);
self.rt.spawn(async move {
match query.stream(&conn).await {
Ok(mut stream) => {
while let Some(res) = stream.next().await {
let is_err = res.is_err();
let _ = tx.send(res).await;
if is_err {
return;
}
}
}
Err(err) => {
let _ = tx.send(Err(err)).await;
}
}
});
while let Some(file) = rx.blocking_recv().transpose()? {
writeln!(f, "{}", file.path)?;
}
f.flush()?;
Ok(())
}
fn write_desc(&self, path: impl AsRef<Path>, pkg: &db::package::Model) -> crate::Result<()> {
let mut f = std::io::BufWriter::new(std::fs::File::create(path)?);
let filename = format!(
"{}-{}-{}.pkg.tar.{}",
pkg.name, pkg.version, pkg.arch, pkg.compression
);
writeln!(f, "%FILENAME%\n{}", filename)?;
let mut write_attr = |k: &str, v: &str| {
if !v.is_empty() {
writeln!(f, "\n%{}%\n{}", k, v)
} else {
Ok(())
}
};
write_attr("NAME", &pkg.name)?;
write_attr("BASE", &pkg.base)?;
write_attr("VERSION", &pkg.version)?;
if let Some(ref desc) = pkg.description {
write_attr("DESC", desc)?;
}
let groups: Vec<String> = self.rt.block_on(
pkg.find_related(db::PackageGroup)
.select_only()
.column(db::package_group::Column::Name)
.into_tuple()
.all(&self.conn),
)?;
write_attr("GROUPS", &groups.join("\n"))?;
write_attr("CSIZE", &pkg.c_size.to_string())?;
write_attr("ISIZE", &pkg.size.to_string())?;
write_attr("SHA256SUM", &pkg.sha256_sum)?;
if let Some(ref url) = pkg.url {
write_attr("URL", url)?;
}
let licenses: Vec<String> = self.rt.block_on(
pkg.find_related(db::PackageLicense)
.select_only()
.column(db::package_license::Column::Name)
.into_tuple()
.all(&self.conn),
)?;
write_attr("LICENSE", &licenses.join("\n"))?;
write_attr("ARCH", &pkg.arch)?;
// TODO build date
write_attr(
"BUILDDATE",
&pkg.build_date.and_utc().timestamp().to_string(),
)?;
if let Some(ref packager) = pkg.packager {
write_attr("PACKAGER", packager)?;
}
let related = [
("REPLACES", db::PackageRelatedEnum::Replaces),
("CONFLICTS", db::PackageRelatedEnum::Conflicts),
("PROVIDES", db::PackageRelatedEnum::Provides),
("DEPENDS", db::PackageRelatedEnum::Depend),
("OPTDEPENDS", db::PackageRelatedEnum::Optdepend),
("MAKEDEPENDS", db::PackageRelatedEnum::Makedepend),
("CHECKDEPENDS", db::PackageRelatedEnum::Checkdepend),
];
for (key, attr) in related.into_iter() {
let items: Vec<String> = self.rt.block_on(
pkg.find_related(db::PackageRelated)
.filter(db::package_related::Column::Type.eq(attr))
.select_only()
.column(db::package_related::Column::Name)
.into_tuple()
.all(&self.conn),
)?;
write_attr(key, &items.join("\n"))?;
}
f.flush()?;
Ok(())
}
pub fn close(&mut self) -> crate::Result<()> {
self.ar_db.close()?;
self.ar_files.close()?;
let _ = std::fs::remove_file(&self.tmp_paths[0]);
let _ = std::fs::remove_file(&self.tmp_paths[1]);
Ok(())
Ok(
tokio::task::spawn_blocking(move || ar.lock().unwrap().close())
.await
.unwrap()?,
)
}
}

View File

@ -1,144 +0,0 @@
use super::{Command, SharedState};
use crate::db;
use std::{
path::PathBuf,
sync::{atomic::Ordering, Arc},
};
use sea_orm::{
ActiveModelTrait, ColumnTrait, Condition, EntityTrait, NotSet, QueryFilter, QuerySelect, Set,
};
use sea_query::Expr;
use uuid::Uuid;
#[derive(Clone)]
pub struct Handle {
state: Arc<SharedState>,
}
impl Handle {
pub fn new(state: &Arc<SharedState>) -> Self {
Self {
state: Arc::clone(state),
}
}
pub fn random_file_paths<const C: usize>(&self) -> [PathBuf; C] {
std::array::from_fn(|_| {
let uuid: uuid::fmt::Simple = Uuid::new_v4().into();
self.state.repos_dir.join(uuid.to_string())
})
}
pub async fn get_or_create_repo(&self, distro: &str, repo: &str) -> crate::Result<i32> {
let mut repos = self.state.repos.write().await;
let distro_id: Option<i32> = db::Distro::find()
.filter(db::distro::Column::Name.eq(distro))
.select_only()
.column(db::distro::Column::Id)
.into_tuple()
.one(&self.state.conn)
.await?;
let distro_id = if let Some(id) = distro_id {
id
} else {
let new_distro = db::distro::ActiveModel {
id: NotSet,
name: Set(distro.to_string()),
description: NotSet,
};
new_distro.insert(&self.state.conn).await?.id
};
let repo_id: Option<i32> = db::Repo::find()
.filter(db::repo::Column::DistroId.eq(distro_id))
.filter(db::repo::Column::Name.eq(repo))
.select_only()
.column(db::repo::Column::Id)
.into_tuple()
.one(&self.state.conn)
.await?;
let repo_id = if let Some(id) = repo_id {
id
} else {
let new_repo = db::repo::ActiveModel {
id: NotSet,
distro_id: Set(distro_id),
name: Set(repo.to_string()),
description: NotSet,
};
let id = new_repo.insert(&self.state.conn).await?.id;
tokio::fs::create_dir(self.state.repos_dir.join(id.to_string())).await?;
repos.insert(id, Default::default());
id
};
Ok(repo_id)
}
pub async fn get_repo(&self, distro: &str, repo: &str) -> crate::Result<Option<i32>> {
Ok(db::Repo::find()
.find_also_related(db::Distro)
.filter(
Condition::all()
.add(db::repo::Column::Name.eq(repo))
.add(db::distro::Column::Name.eq(distro)),
)
.one(&self.state.conn)
.await
.map(|res| res.map(|(repo, _)| repo.id))?)
}
pub async fn remove_repo(&self, repo: i32) -> crate::Result<()> {
self.state.repos.write().await.remove(&repo);
db::Repo::delete_by_id(repo).exec(&self.state.conn).await?;
let _ = tokio::fs::remove_dir_all(self.state.repos_dir.join(repo.to_string())).await;
Ok(())
}
/// Remove all packages in the repository that have a given arch. This method marks all
/// packages with the given architecture as "pending deletion", before performing a manual sync
/// & removal of stale packages.
pub async fn remove_repo_arch(&self, repo: i32, arch: &str) -> crate::Result<()> {
db::Package::update_many()
.col_expr(
db::package::Column::State,
Expr::value(db::PackageState::PendingDeletion),
)
.filter(
Condition::all()
.add(db::package::Column::RepoId.eq(repo))
.add(db::package::Column::Arch.eq(arch)),
)
.exec(&self.state.conn)
.await?;
self.queue_sync(repo).await;
self.queue_clean().await;
Ok(())
}
pub async fn queue_pkg(&self, repo: i32, path: PathBuf) {
self.state.tx.send(Command::ParsePkg(repo, path)).unwrap();
self.state.repos.read().await.get(&repo).inspect(|n| {
n.0.fetch_add(1, Ordering::SeqCst);
});
}
async fn queue_sync(&self, repo: i32) {
self.state.tx.send(Command::SyncRepo(repo)).unwrap();
}
async fn queue_clean(&self) {
self.state.tx.send(Command::Clean).unwrap();
}
}

View File

@ -0,0 +1,385 @@
use super::{archive, package};
use crate::db::{self, query::package::delete_stale_pkgs};
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{
atomic::{AtomicU32, Ordering},
Arc,
},
};
use futures::StreamExt;
use sea_orm::{
ActiveModelTrait, ColumnTrait, Condition, ConnectionTrait, DbConn, EntityTrait, JoinType,
ModelTrait, NotSet, QueryFilter, QuerySelect, Related, RelationTrait, Set, TransactionTrait,
};
use sea_query::{Alias, Expr, Query};
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
Mutex, RwLock,
};
use uuid::Uuid;
struct PkgQueueMsg {
repo: i32,
path: PathBuf,
}
/// A single instance of this struct orchestrates everything related to managing packages files on
/// disk for all repositories in the server
pub struct RepoMgr {
repos_dir: PathBuf,
conn: DbConn,
pkg_queue: (
UnboundedSender<PkgQueueMsg>,
Mutex<UnboundedReceiver<PkgQueueMsg>>,
),
repos: RwLock<HashMap<i32, (AtomicU32, Arc<Mutex<()>>)>>,
}
impl RepoMgr {
pub async fn new<P: AsRef<Path>>(repos_dir: P, conn: DbConn) -> crate::Result<Self> {
if !tokio::fs::try_exists(&repos_dir).await? {
tokio::fs::create_dir(&repos_dir).await?;
}
let (tx, rx) = unbounded_channel();
let mut repos = HashMap::new();
let repo_ids: Vec<i32> = db::Repo::find()
.select_only()
.column(db::repo::Column::Id)
.into_tuple()
.all(&conn)
.await?;
for id in repo_ids {
repos.insert(id, Default::default());
}
Ok(Self {
repos_dir: repos_dir.as_ref().to_path_buf(),
conn,
pkg_queue: (tx, Mutex::new(rx)),
repos: RwLock::new(repos),
})
}
/// Generate archive databases for all known architectures in the repository, including the
/// "any" architecture.
pub async fn sync_repo(&self, repo: i32) -> crate::Result<()> {
let lock = self
.repos
.read()
.await
.get(&repo)
.map(|(_, lock)| Arc::clone(lock));
if lock.is_none() {
return Ok(());
}
let lock = lock.unwrap();
let _guard = lock.lock().await;
let archs: Vec<String> = db::Package::find()
.filter(db::package::Column::RepoId.eq(repo))
.select_only()
.column(db::package::Column::Arch)
.distinct()
.into_tuple()
.all(&self.conn)
.await?;
for arch in archs {
self.generate_archives(repo, &arch).await?;
}
Ok(())
}
/// Generate the archive databases for the given repository and architecture.
async fn generate_archives(&self, repo: i32, arch: &str) -> crate::Result<()> {
let [tmp_ar_db_path, tmp_ar_files_path, files_tmp_file_path, desc_tmp_file_path] =
self.random_file_paths();
let ar_db = archive::RepoArchiveWriter::open(&tmp_ar_db_path).await?;
let ar_files = archive::RepoArchiveWriter::open(&tmp_ar_files_path).await?;
// Query all packages in the repo that have the given architecture or the "any"
// architecture
let mut pkgs = db::query::package::pkgs_to_sync(&self.conn, repo, arch)
.stream(&self.conn)
.await?;
let mut commited_ids: Vec<i32> = Vec::new();
while let Some(pkg) = pkgs.next().await.transpose()? {
commited_ids.push(pkg.id);
let mut files_tmp_file = tokio::fs::File::create(&files_tmp_file_path).await?;
let mut desc_tmp_file = tokio::fs::File::create(&desc_tmp_file_path).await?;
package::write_files(&self.conn, &mut files_tmp_file, &pkg).await?;
package::write_desc(&self.conn, &mut desc_tmp_file, &pkg).await?;
let full_name = format!("{}-{}", pkg.name, pkg.version);
ar_db
.add_entry(&full_name, &desc_tmp_file_path, true)
.await?;
ar_files
.add_entry(&full_name, &desc_tmp_file_path, true)
.await?;
ar_files
.add_entry(&full_name, &files_tmp_file_path, false)
.await?;
}
// Cleanup
ar_db.close().await?;
ar_files.close().await?;
let repo_dir = self.repos_dir.join(repo.to_string());
// Move the db archives to their respective places
tokio::fs::rename(tmp_ar_db_path, repo_dir.join(format!("{}.db.tar.gz", arch))).await?;
tokio::fs::rename(
tmp_ar_files_path,
repo_dir.join(format!("{}.files.tar.gz", arch)),
)
.await?;
// Only after we have successfully written everything to disk do we update the database.
// This order ensures any failure can be recovered, as the database is our single source of
// truth.
db::Package::update_many()
.col_expr(
db::package::Column::State,
Expr::value(db::PackageState::Committed),
)
.filter(db::package::Column::Id.is_in(commited_ids))
.exec(&self.conn)
.await?;
// If this fails there's no point in failing the function + if there were no packages in
// the repo, this fails anyway because the temp file doesn't exist
let _ = tokio::fs::remove_file(desc_tmp_file_path).await;
let _ = tokio::fs::remove_file(files_tmp_file_path).await;
tracing::info!("Package archives generated for repo {} ('{}')", repo, arch);
Ok(())
}
/// Clean any remaining old package files from the database and file system
pub async fn remove_stale_pkgs(&self) -> crate::Result<()> {
let mut pkgs = db::query::package::stale_pkgs(&self.conn)
.stream(&self.conn)
.await?;
// Ids are monotonically increasing, so the max id suffices to know which packages to
// remove later
let mut max_id = -1;
let mut removed_pkgs = 0;
while let Some(pkg) = pkgs.next().await.transpose()? {
// Failing to remove the package file isn't the biggest problem
let _ = tokio::fs::remove_file(
self.repos_dir
.join(pkg.repo_id.to_string())
.join(pkg.id.to_string()),
)
.await;
if pkg.id > max_id {
max_id = pkg.id;
}
removed_pkgs += 1;
}
if removed_pkgs > 0 {
db::query::package::delete_stale_pkgs(&self.conn, max_id).await?;
}
tracing::info!("Removed {removed_pkgs} stale package(s)");
Ok(())
}
pub async fn pkg_parse_task(&self) {
loop {
// Receive the next message and immediately drop the mutex afterwards. As long as the
// quue is empty, this will lock the mutex. This is okay, as the mutex will be unlocked
// as soon as a message is received, so another worker can pick up the mutex.
let msg = {
let mut recv = self.pkg_queue.1.lock().await;
recv.recv().await
};
if let Some(msg) = msg {
// TODO better handle this error (retry if failure wasn't because the package is
// faulty)
let _ = self
.add_pkg_from_path(msg.path, msg.repo)
.await
.inspect_err(|e| tracing::error!("{:?}", e));
let old = self
.repos
.read()
.await
.get(&msg.repo)
.map(|n| n.0.fetch_sub(1, Ordering::SeqCst));
// Every time the queue for a repo becomes empty, we run a sync job
if old == Some(1) {
// TODO error handling
let _ = self.sync_repo(msg.repo).await;
// TODO move this so that we only clean if entire queue is empty, not just
// queue for specific repo
let _ = self.remove_stale_pkgs().await;
}
}
}
}
pub async fn queue_pkg(&self, repo: i32, path: PathBuf) {
self.pkg_queue.0.send(PkgQueueMsg { path, repo }).unwrap();
self.repos.read().await.get(&repo).inspect(|n| {
n.0.fetch_add(1, Ordering::SeqCst);
});
}
pub async fn get_repo(&self, distro: &str, repo: &str) -> crate::Result<Option<i32>> {
Ok(db::Repo::find()
.find_also_related(db::Distro)
.filter(
Condition::all()
.add(db::repo::Column::Name.eq(repo))
.add(db::distro::Column::Name.eq(distro)),
)
.one(&self.conn)
.await
.map(|res| res.map(|(repo, _)| repo.id))?)
}
pub async fn get_or_create_repo(&self, distro: &str, repo: &str) -> crate::Result<i32> {
let mut repos = self.repos.write().await;
let distro_id: Option<i32> = db::Distro::find()
.filter(db::distro::Column::Name.eq(distro))
.select_only()
.column(db::distro::Column::Id)
.into_tuple()
.one(&self.conn)
.await?;
let distro_id = if let Some(id) = distro_id {
id
} else {
let new_distro = db::distro::ActiveModel {
id: NotSet,
name: Set(distro.to_string()),
description: NotSet,
};
new_distro.insert(&self.conn).await?.id
};
let repo_id: Option<i32> = db::Repo::find()
.filter(db::repo::Column::DistroId.eq(distro_id))
.filter(db::repo::Column::Name.eq(repo))
.select_only()
.column(db::repo::Column::Id)
.into_tuple()
.one(&self.conn)
.await?;
let repo_id = if let Some(id) = repo_id {
id
} else {
let new_repo = db::repo::ActiveModel {
id: NotSet,
distro_id: Set(distro_id),
name: Set(repo.to_string()),
description: NotSet,
};
let id = new_repo.insert(&self.conn).await?.id;
tokio::fs::create_dir(self.repos_dir.join(id.to_string())).await?;
repos.insert(id, Default::default());
id
};
Ok(repo_id)
}
async fn add_pkg_from_path<P: AsRef<Path>>(&self, path: P, repo: i32) -> crate::Result<()> {
let path_clone = path.as_ref().to_path_buf();
let pkg = tokio::task::spawn_blocking(move || package::Package::open(path_clone))
.await
.unwrap()?;
// TODO prevent database from being updated but file failing to move to repo dir?
let pkg = db::query::package::insert(&self.conn, repo, pkg).await?;
let dest_path = self
.repos_dir
.join(repo.to_string())
.join(pkg.id.to_string());
tokio::fs::rename(path.as_ref(), dest_path).await?;
tracing::info!(
"Added '{}-{}-{}' to repository {}",
pkg.name,
pkg.version,
pkg.arch,
repo,
);
Ok(())
}
pub async fn remove_repo(&self, repo: i32) -> crate::Result<()> {
self.repos.write().await.remove(&repo);
db::Repo::delete_by_id(repo).exec(&self.conn).await?;
let _ = tokio::fs::remove_dir_all(self.repos_dir.join(repo.to_string())).await;
Ok(())
}
/// Remove all packages in the repository that have a given arch. This method marks all
/// packages with the given architecture as "pending deletion", before performing a manual sync
/// & removal of stale packages.
pub async fn remove_repo_arch(&self, repo: i32, arch: &str) -> crate::Result<()> {
db::Package::update_many()
.col_expr(
db::package::Column::State,
Expr::value(db::PackageState::PendingDeletion),
)
.filter(
Condition::all()
.add(db::package::Column::RepoId.eq(repo))
.add(db::package::Column::Arch.eq(arch)),
)
.exec(&self.conn)
.await?;
self.sync_repo(repo).await?;
self.remove_stale_pkgs().await?;
Ok(())
}
pub fn random_file_paths<const C: usize>(&self) -> [PathBuf; C] {
std::array::from_fn(|_| {
let uuid: uuid::fmt::Simple = Uuid::new_v4().into();
self.repos_dir.join(uuid.to_string())
})
}
}

View File

@ -1,90 +1,161 @@
mod actor;
mod archive;
mod handle;
mod manager;
pub mod package;
pub use actor::Actor;
pub use handle::Handle;
pub use manager::RepoMgr;
use crate::db;
use crate::FsConfig;
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{atomic::AtomicU32, Arc, Mutex},
use axum::{
body::Body,
extract::{Path, State},
http::{Request, StatusCode},
response::IntoResponse,
routing::{delete, post},
Router,
};
use futures::TryStreamExt;
use tokio_util::io::StreamReader;
use tower::util::ServiceExt;
use tower_http::{services::ServeFile, validate_request::ValidateRequestHeaderLayer};
use sea_orm::{DbConn, EntityTrait, QuerySelect};
use tokio::{
runtime,
sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
RwLock,
},
};
pub enum Command {
ParsePkg(i32, PathBuf),
SyncRepo(i32),
Clean,
pub fn router(api_key: &str) -> Router<crate::Global> {
Router::new()
.route(
"/:distro/:repo",
post(post_package_archive)
.delete(delete_repo)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
)
.route(
"/:distro/:repo/:arch",
delete(delete_arch_repo).route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
)
// Routes added after the layer do not get that layer applied, so the GET requests will not
// be authorized
.route(
"/:distro/:repo/:arch/:filename",
delete(delete_package)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key))
.get(get_file),
)
}
type RepoState = (AtomicU32, Arc<Mutex<()>>);
/// Serve the package archive files and database archives. If files are requested for an
/// architecture that does not have any explicit packages, a repository containing only "any" files
/// is returned.
async fn get_file(
State(global): State<crate::Global>,
Path((distro, repo, arch, file_name)): Path<(String, String, String, String)>,
req: Request<Body>,
) -> crate::Result<impl IntoResponse> {
if let Some(repo_id) = global.mgr.get_repo(&distro, &repo).await? {
match global.config.fs {
FsConfig::Local { data_dir } => {
let repo_dir = data_dir.join("repos").join(repo_id.to_string());
pub struct SharedState {
pub repos_dir: PathBuf,
pub conn: DbConn,
pub rx: Mutex<UnboundedReceiver<Command>>,
pub tx: UnboundedSender<Command>,
pub repos: RwLock<HashMap<i32, RepoState>>,
}
let file_name = if file_name == format!("{}.db", repo)
|| file_name == format!("{}.db.tar.gz", repo)
{
format!("{}.db.tar.gz", arch)
} else if file_name == format!("{}.files", repo)
|| file_name == format!("{}.files.tar.gz", repo)
{
format!("{}.files.tar.gz", arch)
} else {
file_name
};
impl SharedState {
pub fn new(
repos_dir: impl AsRef<Path>,
conn: DbConn,
repos: HashMap<i32, (AtomicU32, Arc<Mutex<()>>)>,
) -> Self {
let (tx, rx) = unbounded_channel();
Self {
repos_dir: repos_dir.as_ref().to_path_buf(),
conn,
rx: Mutex::new(rx),
tx,
repos: RwLock::new(repos),
let path = repo_dir.join(file_name);
Ok(ServeFile::new(path).oneshot(req).await)
}
}
} else {
Err(StatusCode::NOT_FOUND.into())
}
}
pub fn start(
repos_dir: impl AsRef<Path>,
conn: DbConn,
rt: runtime::Handle,
actors: u32,
) -> crate::Result<Handle> {
std::fs::create_dir_all(repos_dir.as_ref())?;
async fn post_package_archive(
State(global): State<crate::Global>,
Path((distro, repo)): Path<(String, String)>,
body: Body,
) -> crate::Result<StatusCode> {
let mut body = StreamReader::new(body.into_data_stream().map_err(std::io::Error::other));
let repo = global.mgr.get_or_create_repo(&distro, &repo).await?;
let [tmp_path] = global.mgr.random_file_paths();
let mut repos = HashMap::new();
let repo_ids: Vec<i32> = rt.block_on(
db::Repo::find()
.select_only()
.column(db::repo::Column::Id)
.into_tuple()
.all(&conn),
)?;
let mut tmp_file = tokio::fs::File::create(&tmp_path).await?;
tokio::io::copy(&mut body, &mut tmp_file).await?;
for id in repo_ids {
repos.insert(id, Default::default());
}
global.mgr.queue_pkg(repo, tmp_path).await;
let state = Arc::new(SharedState::new(repos_dir, conn, repos));
for _ in 0..actors {
let actor = Actor::new(rt.clone(), Arc::clone(&state));
std::thread::spawn(|| actor.run());
}
Ok(Handle::new(&state))
Ok(StatusCode::ACCEPTED)
}
async fn delete_repo(
State(global): State<crate::Global>,
Path((distro, repo)): Path<(String, String)>,
) -> crate::Result<StatusCode> {
if let Some(repo) = global.mgr.get_repo(&distro, &repo).await? {
global.mgr.remove_repo(repo).await?;
tracing::info!("Removed repository {repo}");
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
}
async fn delete_arch_repo(
State(global): State<crate::Global>,
Path((distro, repo, arch)): Path<(String, String, String)>,
) -> crate::Result<StatusCode> {
if let Some(repo) = global.mgr.get_repo(&distro, &repo).await? {
global.mgr.remove_repo_arch(repo, &arch).await?;
tracing::info!("Removed architecture '{arch}' from repository {repo}");
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
//if let Some(mgr) = global.mgr.get_mgr(&distro).await {
// let repo_removed = mgr.remove_repo_arch(&repo, &arch).await?;
//
// if repo_removed {
// tracing::info!("Removed arch '{}' from repository '{}'", arch, repo);
//
// Ok(StatusCode::OK)
// } else {
// Ok(StatusCode::NOT_FOUND)
// }
//} else {
// Ok(StatusCode::NOT_FOUND)
//}
}
async fn delete_package(
State(global): State<crate::Global>,
Path((distro, repo, arch, pkg_name)): Path<(String, String, String, String)>,
) -> crate::Result<StatusCode> {
Ok(StatusCode::NOT_FOUND)
//if let Some(mgr) = global.mgr.get_mgr(&distro).await {
// let pkg_removed = mgr.remove_pkg(&repo, &arch, &pkg_name).await?;
//
// if pkg_removed {
// tracing::info!(
// "Removed package '{}' ({}) from repository '{}'",
// pkg_name,
// arch,
// repo
// );
//
// Ok(StatusCode::OK)
// } else {
// Ok(StatusCode::NOT_FOUND)
// }
//} else {
// Ok(StatusCode::NOT_FOUND)
//}
}

View File

@ -1,17 +1,21 @@
use crate::db::entities::package;
use crate::db::{self, entities::package, PackageRelatedEnum};
use std::{
fmt, fs,
io::{self, BufRead, BufReader, Read},
io::{self, BufRead, BufReader, BufWriter, Read, Write},
path::{Path, PathBuf},
};
use chrono::NaiveDateTime;
use futures::StreamExt;
use libarchive::{
read::{Archive, Builder},
Entry, ReadFilter,
};
use sea_orm::ActiveValue::Set;
use sea_orm::{ActiveValue::Set, ColumnTrait, DbConn, ModelTrait, QueryFilter, QuerySelect};
use tokio::io::{AsyncWrite, AsyncWriteExt};
const IGNORED_FILES: [&str; 5] = [".BUILDINFO", ".INSTALL", ".MTREE", ".PKGINFO", ".CHANGELOG"];
#[derive(Debug, Clone)]
pub struct Package {
@ -48,18 +52,18 @@ pub struct PkgInfo {
}
#[derive(Debug, PartialEq, Eq)]
pub enum InvalidPkgInfoError {
Size,
BuildDate,
PgpSigSize,
pub enum ParsePkgInfoError {
InvalidSize,
InvalidBuildDate,
InvalidPgpSigSize,
}
impl fmt::Display for InvalidPkgInfoError {
impl fmt::Display for ParsePkgInfoError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
Self::Size => "invalid size",
Self::BuildDate => "invalid build date",
Self::PgpSigSize => "invalid pgp sig size",
Self::InvalidSize => "invalid size",
Self::InvalidBuildDate => "invalid build date",
Self::InvalidPgpSigSize => "invalid pgp sig size",
};
write!(f, "{}", s)
@ -67,7 +71,7 @@ impl fmt::Display for InvalidPkgInfoError {
}
impl PkgInfo {
pub fn extend<S: AsRef<str>>(&mut self, line: S) -> Result<(), InvalidPkgInfoError> {
pub fn extend<S: AsRef<str>>(&mut self, line: S) -> Result<(), ParsePkgInfoError> {
let line = line.as_ref();
if !line.starts_with('#') {
@ -77,21 +81,26 @@ impl PkgInfo {
"pkgbase" => self.base = value.to_string(),
"pkgver" => self.version = value.to_string(),
"pkgdesc" => self.description = Some(value.to_string()),
"size" => self.size = value.parse().map_err(|_| InvalidPkgInfoError::Size)?,
"size" => {
self.size = value.parse().map_err(|_| ParsePkgInfoError::InvalidSize)?
}
"url" => self.url = Some(value.to_string()),
"arch" => self.arch = value.to_string(),
"builddate" => {
let seconds: i64 =
value.parse().map_err(|_| InvalidPkgInfoError::BuildDate)?;
self.build_date = chrono::DateTime::from_timestamp_millis(seconds * 1000)
.ok_or(InvalidPkgInfoError::BuildDate)?
.naive_utc();
let seconds: i64 = value
.parse()
.map_err(|_| ParsePkgInfoError::InvalidBuildDate)?;
self.build_date = NaiveDateTime::from_timestamp_millis(seconds * 1000)
.ok_or(ParsePkgInfoError::InvalidBuildDate)?
}
"packager" => self.packager = Some(value.to_string()),
"pgpsig" => self.pgpsig = Some(value.to_string()),
"pgpsigsize" => {
self.pgpsigsize =
Some(value.parse().map_err(|_| InvalidPkgInfoError::PgpSigSize)?)
self.pgpsigsize = Some(
value
.parse()
.map_err(|_| ParsePkgInfoError::InvalidPgpSigSize)?,
)
}
"group" => self.groups.push(value.to_string()),
"license" => self.licenses.push(value.to_string()),
@ -151,9 +160,11 @@ impl Package {
let entry = entry?;
let path_name = entry.pathname();
if !path_name.starts_with('.') {
if !IGNORED_FILES.iter().any(|p| p == &path_name) {
files.push(PathBuf::from(path_name));
} else if path_name == ".PKGINFO" {
}
if path_name == ".PKGINFO" {
info = Some(PkgInfo::parse(entry)?);
}
}
@ -193,6 +204,74 @@ impl Package {
self.compression.extension().unwrap()
)
}
/// Write the formatted desc file to the provided writer
pub fn write_desc<W: Write>(&self, w: &mut W) -> io::Result<()> {
// We write a lot of small strings to the writer, so wrapping it in a BufWriter is
// beneficial
let mut w = BufWriter::new(w);
let info = &self.info;
writeln!(w, "%FILENAME%\n{}", self.file_name())?;
let mut write = |key: &str, value: &str| {
if !value.is_empty() {
writeln!(w, "\n%{}%\n{}", key, value)
} else {
Ok(())
}
};
write("NAME", &info.name)?;
write("BASE", &info.base)?;
write("VERSION", &info.version)?;
if let Some(ref description) = info.description {
write("DESC", description)?;
}
write("GROUPS", &info.groups.join("\n"))?;
write("CSIZE", &info.csize.to_string())?;
write("ISIZE", &info.size.to_string())?;
write("SHA256SUM", &info.sha256sum)?;
if let Some(ref url) = info.url {
write("URL", url)?;
}
write("LICENSE", &info.licenses.join("\n"))?;
write("ARCH", &info.arch)?;
write("BUILDDATE", &info.build_date.timestamp().to_string())?;
if let Some(ref packager) = info.packager {
write("PACKAGER", packager)?;
}
write("REPLACES", &info.replaces.join("\n"))?;
write("CONFLICTS", &info.conflicts.join("\n"))?;
write("PROVIDES", &info.provides.join("\n"))?;
write("DEPENDS", &info.depends.join("\n"))?;
write("OPTDEPENDS", &info.optdepends.join("\n"))?;
write("MAKEDEPENDS", &info.makedepends.join("\n"))?;
write("CHECKDEPENDS", &info.checkdepends.join("\n"))?;
Ok(())
}
pub fn write_files<W: Write>(&self, w: &mut W) -> io::Result<()> {
// We write a lot of small strings to the writer, so wrapping it in a BufWriter is
// beneficial
let mut w = BufWriter::new(w);
writeln!(w, "%FILES%")?;
for file in &self.files {
writeln!(w, "{}", file.to_string_lossy())?;
}
Ok(())
}
}
impl From<Package> for package::ActiveModel {
@ -217,3 +296,130 @@ impl From<Package> for package::ActiveModel {
}
}
}
pub fn filename(pkg: &package::Model) -> String {
format!(
"{}-{}-{}.pkg.tar.{}",
pkg.name, pkg.version, pkg.arch, pkg.compression
)
}
async fn write_attribute<W: AsyncWrite + std::marker::Unpin>(
writer: &mut W,
key: &str,
value: &str,
) -> io::Result<()> {
if !value.is_empty() {
let s = format!("\n%{}%\n{}\n", key, value);
writer.write_all(s.as_bytes()).await?;
}
Ok(())
}
pub async fn write_desc<W: AsyncWrite + std::marker::Unpin>(
conn: &DbConn,
writer: &mut W,
pkg: &package::Model,
) -> crate::Result<()> {
writer
.write_all(format!("%FILENAME%\n{}\n", pkg.id).as_bytes())
.await?;
write_attribute(writer, "NAME", &pkg.name).await?;
write_attribute(writer, "BASE", &pkg.base).await?;
write_attribute(writer, "VERSION", &pkg.version).await?;
if let Some(ref description) = pkg.description {
write_attribute(writer, "DESC", description).await?;
}
let groups: Vec<String> = pkg
.find_related(db::PackageGroup)
.select_only()
.column(db::package_group::Column::Name)
.into_tuple()
.all(conn)
.await?;
write_attribute(writer, "GROUPS", &groups.join("\n")).await?;
write_attribute(writer, "CSIZE", &pkg.c_size.to_string()).await?;
write_attribute(writer, "ISIZE", &pkg.size.to_string()).await?;
write_attribute(writer, "SHA256SUM", &pkg.sha256_sum).await?;
if let Some(ref url) = pkg.url {
write_attribute(writer, "URL", url).await?;
}
let licenses: Vec<String> = pkg
.find_related(db::PackageLicense)
.select_only()
.column(db::package_license::Column::Name)
.into_tuple()
.all(conn)
.await?;
write_attribute(writer, "LICENSE", &licenses.join("\n")).await?;
write_attribute(writer, "ARCH", &pkg.arch).await?;
// TODO build date
write_attribute(
writer,
"BUILDDATE",
&pkg.build_date.and_utc().timestamp().to_string(),
)
.await?;
if let Some(ref packager) = pkg.packager {
write_attribute(writer, "PACKAGER", packager).await?;
}
let related = [
("REPLACES", PackageRelatedEnum::Replaces),
("CONFLICTS", PackageRelatedEnum::Conflicts),
("PROVIDES", PackageRelatedEnum::Provides),
("DEPENDS", PackageRelatedEnum::Depend),
("OPTDEPENDS", PackageRelatedEnum::Optdepend),
("MAKEDEPENDS", PackageRelatedEnum::Makedepend),
("CHECKDEPENDS", PackageRelatedEnum::Checkdepend),
];
for (key, attr) in related.into_iter() {
let items: Vec<String> = pkg
.find_related(db::PackageRelated)
.filter(db::package_related::Column::Type.eq(attr))
.select_only()
.column(db::package_related::Column::Name)
.into_tuple()
.all(conn)
.await?;
write_attribute(writer, key, &items.join("\n")).await?;
}
writer.flush().await?;
Ok(())
}
pub async fn write_files<W: AsyncWrite + std::marker::Unpin>(
conn: &DbConn,
writer: &mut W,
pkg: &package::Model,
) -> crate::Result<()> {
let line = "%FILES%\n";
writer.write_all(line.as_bytes()).await?;
// Generate the files list for the package
let mut files = pkg.find_related(db::PackageFile).stream(conn).await?;
while let Some(file) = files.next().await.transpose()? {
writer
.write_all(format!("{}\n", file.path).as_bytes())
.await?;
}
writer.flush().await?;
Ok(())
}

View File

@ -1,23 +0,0 @@
pub struct Chunked<I> {
iter: I,
chunk_size: usize,
}
impl<I: Iterator> Chunked<I> {
pub fn new<T: IntoIterator<IntoIter = I>>(into: T, chunk_size: usize) -> Self {
Self {
iter: into.into_iter(),
chunk_size,
}
}
}
// https://users.rust-lang.org/t/how-to-breakup-an-iterator-into-chunks/87915/5
impl<I: Iterator> Iterator for Chunked<I> {
type Item = Vec<I::Item>;
fn next(&mut self) -> Option<Self::Item> {
Some(self.iter.by_ref().take(self.chunk_size).collect())
.filter(|chunk: &Vec<_>| !chunk.is_empty())
}
}

View File

@ -1,13 +0,0 @@
mod api;
mod repo;
use axum::Router;
use tower_http::trace::TraceLayer;
pub fn router(global: crate::Global) -> Router {
Router::new()
.nest("/api", api::router())
.merge(repo::router(&global.config.api_key))
.with_state(global)
.layer(TraceLayer::new_for_http())
}

View File

@ -1,126 +0,0 @@
use crate::{db, FsConfig};
use axum::{
body::Body,
extract::{Path, State},
http::{Request, StatusCode},
response::IntoResponse,
routing::{delete, get, post},
Router,
};
use futures::TryStreamExt;
use tokio_util::io::StreamReader;
use tower::util::ServiceExt;
use tower_http::{services::ServeFile, validate_request::ValidateRequestHeaderLayer};
pub fn router(api_key: &str) -> Router<crate::Global> {
Router::new()
.route(
"/:distro/:repo",
post(post_package_archive)
.delete(delete_repo)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
)
.route(
"/:distro/:repo/:arch",
delete(delete_arch_repo).route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
)
// Routes added after the layer do not get that layer applied, so the GET requests will not
// be authorized
.route("/:distro/:repo/:arch/:filename", get(get_file))
}
/// Serve the package archive files and database archives. If files are requested for an
/// architecture that does not have any explicit packages, a repository containing only "any" files
/// is returned.
async fn get_file(
State(global): State<crate::Global>,
Path((distro, repo, arch, file_name)): Path<(String, String, String, String)>,
req: Request<Body>,
) -> crate::Result<impl IntoResponse> {
if let Some(repo_id) = global.repo.get_repo(&distro, &repo).await? {
let file_name =
if file_name == format!("{}.db", repo) || file_name == format!("{}.db.tar.gz", repo) {
format!("{}.db.tar.gz", arch)
} else if file_name == format!("{}.files", repo)
|| file_name == format!("{}.files.tar.gz", repo)
{
format!("{}.files.tar.gz", arch)
} else if let Some(m) = global.pkg_filename_re.captures(&file_name) {
// SAFETY: these unwraps cannot fail if the RegEx matched successfully
db::query::package::by_fields(
&global.db,
repo_id,
m.get(1).unwrap().as_str(),
m.get(2).unwrap().as_str(),
m.get(3).unwrap().as_str(),
m.get(4).unwrap().as_str(),
)
.await?
.ok_or(StatusCode::NOT_FOUND)?
.id
.to_string()
} else {
return Err(StatusCode::NOT_FOUND.into());
};
match global.config.fs {
FsConfig::Local { data_dir } => {
let path = data_dir
.join("repos")
.join(repo_id.to_string())
.join(file_name);
Ok(ServeFile::new(path).oneshot(req).await)
}
}
} else {
Err(StatusCode::NOT_FOUND.into())
}
}
async fn post_package_archive(
State(global): State<crate::Global>,
Path((distro, repo)): Path<(String, String)>,
body: Body,
) -> crate::Result<StatusCode> {
let repo_id = global.repo.get_or_create_repo(&distro, &repo).await?;
let [tmp_path] = global.repo.random_file_paths();
let mut tmp_file = tokio::fs::File::create(&tmp_path).await?;
let mut body = StreamReader::new(body.into_data_stream().map_err(std::io::Error::other));
tokio::io::copy(&mut body, &mut tmp_file).await?;
global.repo.queue_pkg(repo_id, tmp_path).await;
Ok(StatusCode::ACCEPTED)
}
async fn delete_repo(
State(global): State<crate::Global>,
Path((distro, repo)): Path<(String, String)>,
) -> crate::Result<StatusCode> {
if let Some(repo) = global.repo.get_repo(&distro, &repo).await? {
global.repo.remove_repo(repo).await?;
tracing::info!("Removed repository {repo}");
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
}
async fn delete_arch_repo(
State(global): State<crate::Global>,
Path((distro, repo, arch)): Path<(String, String, String)>,
) -> crate::Result<StatusCode> {
if let Some(repo) = global.repo.get_repo(&distro, &repo).await? {
global.repo.remove_repo_arch(repo, &arch).await?;
tracing::info!("Removed architecture '{arch}' from repository {repo}");
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
}