Compare commits

...

11 Commits

Author SHA1 Message Date
Jef Roosens bc19158747
refactor(server): clean up some stuff
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-08-02 22:41:23 +02:00
Jef Roosens afe73d5314
feat(server): log errors; configurable database 2023-08-02 22:28:17 +02:00
Jef Roosens 7c6f485ea6
feat(server): update database when publishing packages 2023-08-02 22:25:38 +02:00
Jef Roosens f706b72b7c
feat(server): improve package parse semantics 2023-08-02 22:19:55 +02:00
Jef Roosens a2d844c582
feat(server): start of package database schema 2023-08-02 22:19:55 +02:00
Jef Roosens e63d0b5565
feat(server): pagination 2023-08-02 22:19:55 +02:00
Jef Roosens 25627e166e
feat(server): example of pagination 2023-08-02 22:19:54 +02:00
Jef Roosens 37218536c5
feat(server): start api using CRUD operations 2023-08-02 22:19:00 +02:00
Jef Roosens e08048d0f0
feat(server): initialize database migrations 2023-08-02 22:17:07 +02:00
Jef Roosens af27b06df1
feat(server): correctly serve repo files
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-08-02 22:00:31 +02:00
Jef Roosens 2e5c84a48d
refactor(server): rewrite part of repo logic; remove need for default
arch
2023-08-02 18:48:17 +02:00
20 changed files with 2438 additions and 171 deletions

View File

@ -14,3 +14,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Serve packages from any number of repositories & architectures
* Publish packages to and delete packages from repositories using HTTP
requests
* Packages of architecture "any" are part of every architecture's
database

1692
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -4,3 +4,9 @@ members = [
'libarchive',
'libarchive3-sys'
]
[profile.release]
lto = "fat"
codegen-units = 1
panic = "abort"
strip = true

View File

@ -8,9 +8,12 @@ authors = ["Jef Roosens"]
[dependencies]
axum = { version = "0.6.18", features = ["http2"] }
chrono = { version = "0.4.26", features = ["serde"] }
clap = { version = "4.3.12", features = ["env", "derive"] }
futures = "0.3.28"
libarchive = { path = "../libarchive" }
sea-orm-migration = "0.12.1"
serde = { version = "1.0.178", features = ["derive"] }
sha256 = "1.1.4"
tokio = { version = "1.29.1", features = ["full"] }
tokio-util = { version = "0.7.8", features = ["io"] }
@ -20,8 +23,12 @@ tracing = "0.1.37"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
uuid = { version = "1.4.0", features = ["v4"] }
[profile.release]
lto = "fat"
codegen-units = 1
panic = "abort"
strip = true
[dependencies.sea-orm]
version = "0.12.1"
features = [
"sqlx-sqlite",
"sqlx-postgres",
"runtime-tokio-rustls",
"macros",
"with-chrono"
]

View File

@ -0,0 +1,59 @@
mod pagination;
use axum::extract::{Path, Query, State};
use axum::routing::get;
use axum::Json;
use axum::Router;
use sea_orm::entity::EntityTrait;
use sea_orm::query::QueryOrder;
use sea_orm::PaginatorTrait;
use pagination::PaginatedResponse;
use crate::db::entities::package;
use crate::db::entities::repo;
pub fn router() -> Router<crate::Global> {
Router::new()
.route("/repos", get(get_repos))
.route("/repos/:id", get(get_single_repo))
.route("/packages", get(get_packages))
}
async fn get_repos(
State(global): State<crate::Global>,
Query(pagination): Query<pagination::Query>,
) -> crate::Result<Json<PaginatedResponse<repo::Model>>> {
let repos = repo::Entity::find()
.order_by_asc(repo::Column::Id)
.paginate(&global.db, pagination.per_page.unwrap_or(25))
.fetch_page(pagination.page.unwrap_or(1) - 1)
.await?;
Ok(Json(pagination.res(repos)))
}
async fn get_single_repo(
State(global): State<crate::Global>,
Path(id): Path<i32>,
) -> crate::Result<Json<repo::Model>> {
let repo = repo::Entity::find_by_id(id)
.one(&global.db)
.await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
Ok(Json(repo))
}
async fn get_packages(
State(global): State<crate::Global>,
Query(pagination): Query<pagination::Query>,
) -> crate::Result<Json<PaginatedResponse<package::Model>>> {
let pkgs = package::Entity::find()
.order_by_asc(package::Column::Id)
.paginate(&global.db, pagination.per_page.unwrap_or(25))
.fetch_page(pagination.page.unwrap_or(1) - 1)
.await?;
Ok(Json(pagination.res(pkgs)))
}

View File

@ -0,0 +1,32 @@
use serde::{Deserialize, Serialize};
pub const DEFAULT_PAGE: u64 = 0;
pub const DEFAULT_PER_PAGE: u64 = 25;
#[derive(Deserialize)]
pub struct Query {
pub page: Option<u64>,
pub per_page: Option<u64>,
}
#[derive(Serialize)]
pub struct PaginatedResponse<T>
where
T: for<'de> Serialize,
{
pub page: u64,
pub per_page: u64,
pub count: usize,
pub items: Vec<T>,
}
impl Query {
pub fn res<T: for<'de> Serialize>(self, items: Vec<T>) -> PaginatedResponse<T> {
PaginatedResponse {
page: self.page.unwrap_or(DEFAULT_PAGE),
per_page: self.page.unwrap_or(DEFAULT_PER_PAGE),
count: items.len(),
items,
}
}
}

View File

@ -4,14 +4,25 @@ use crate::{Config, Global};
use axum::extract::FromRef;
use axum::Router;
use clap::Parser;
use std::io;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use tower_http::trace::TraceLayer;
use tracing::debug;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
pub struct Cli {
/// Directory where package archives will be stored
pub pkg_dir: PathBuf,
/// Directory where repository metadata & SQLite database is stored
pub data_dir: PathBuf,
/// Database connection URL; either sqlite:// or postgres://. Defaults to rieter.sqlite in the
/// data directory
#[arg(short, long)]
pub database_url: Option<String>,
/// Port the server will listen on
#[arg(short, long, value_name = "PORT", default_value_t = 8000)]
pub port: u16,
@ -22,12 +33,6 @@ pub struct Cli {
default_value = "tower_http=debug,rieterd=debug"
)]
pub log: String,
/// Directory where package archives will be stored
pub pkg_dir: PathBuf,
/// Directory where repository metadata is stored
pub repo_dir: PathBuf,
/// Default architecture to add packages with arch "any" to
pub default_arch: String,
}
impl FromRef<Global> for Arc<RwLock<RepoGroupManager>> {
@ -44,30 +49,51 @@ impl Cli {
.init();
}
pub async fn run(&self) {
pub async fn run(&self) -> crate::Result<()> {
self.init_tracing();
let db_url = if let Some(url) = &self.database_url {
url.clone()
} else {
format!(
"sqlite://{}",
self.data_dir.join("rieter.sqlite").to_string_lossy()
)
};
debug!("Connecting to database with URL {}", db_url);
let db = crate::db::init(db_url).await?;
// let db = crate::db::init("postgres://rieter:rieter@localhost:5432/rieter")
// .await
// .unwrap();
let config = Config {
repo_dir: self.repo_dir.clone(),
data_dir: self.data_dir.clone(),
repo_dir: self.data_dir.join("repos"),
pkg_dir: self.pkg_dir.clone(),
};
let repo_manager = RepoGroupManager::new(&self.repo_dir, &self.pkg_dir, &self.default_arch);
let repo_manager = RepoGroupManager::new(&config.repo_dir, &self.pkg_dir);
let global = Global {
config,
repo_manager: Arc::new(RwLock::new(repo_manager)),
db,
};
// build our application with a single route
let app = Router::new()
.merge(crate::repo::router(&global))
.nest("/api", crate::api::router())
.merge(crate::repo::router())
.with_state(global)
.layer(TraceLayer::new_for_http());
// run it with hyper on localhost:3000
axum::Server::bind(&format!("0.0.0.0:{}", self.port).parse().unwrap())
.serve(app.into_make_service())
.await
.unwrap();
Ok(
axum::Server::bind(&format!("0.0.0.0:{}", self.port).parse().unwrap())
.serve(app.into_make_service())
.await
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?,
)
}
}

View File

@ -0,0 +1,7 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.1
pub mod prelude;
pub mod package;
pub mod package_license;
pub mod repo;

View File

@ -0,0 +1,53 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.1
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "package")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: i32,
pub repo_id: i32,
pub base: String,
pub name: String,
pub version: String,
pub arch: String,
pub size: i64,
pub c_size: i64,
pub description: Option<String>,
pub url: Option<String>,
pub build_date: DateTime,
pub packager: Option<String>,
pub pgp_sig: Option<String>,
pub pgp_sig_size: Option<i64>,
pub sha256_sum: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::package_license::Entity")]
PackageLicense,
#[sea_orm(
belongs_to = "super::repo::Entity",
from = "Column::RepoId",
to = "super::repo::Column::Id",
on_update = "NoAction",
on_delete = "Cascade"
)]
Repo,
}
impl Related<super::package_license::Entity> for Entity {
fn to() -> RelationDef {
Relation::PackageLicense.def()
}
}
impl Related<super::repo::Entity> for Entity {
fn to() -> RelationDef {
Relation::Repo.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,33 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.1
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "package_license")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub package_id: i32,
#[sea_orm(primary_key, auto_increment = false)]
pub value: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::package::Entity",
from = "Column::PackageId",
to = "super::package::Column::Id",
on_update = "NoAction",
on_delete = "Cascade"
)]
Package,
}
impl Related<super::package::Entity> for Entity {
fn to() -> RelationDef {
Relation::Package.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,5 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.1
pub use super::package::Entity as Package;
pub use super::package_license::Entity as PackageLicense;
pub use super::repo::Entity as Repo;

View File

@ -0,0 +1,28 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.1
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "repo")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: i32,
#[sea_orm(unique)]
pub name: String,
pub description: Option<String>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::package::Entity")]
Package,
}
impl Related<super::package::Entity> for Entity {
fn to() -> RelationDef {
Relation::Package.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -0,0 +1,143 @@
use sea_orm_migration::prelude::*;
pub struct Migration;
impl MigrationName for Migration {
fn name(&self) -> &str {
"m_20230730_000001_create_repo_tables"
}
}
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(Repo::Table)
.col(
ColumnDef::new(Repo::Id)
.integer()
.not_null()
.auto_increment()
.primary_key(),
)
.col(ColumnDef::new(Repo::Name).string().not_null().unique_key())
.col(ColumnDef::new(Repo::Description).string())
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(Package::Table)
.col(
ColumnDef::new(Package::Id)
.integer()
.not_null()
.auto_increment()
.primary_key(),
)
.col(ColumnDef::new(Package::RepoId).integer().not_null())
.col(ColumnDef::new(Package::Base).string_len(255).not_null())
.col(ColumnDef::new(Package::Name).string_len(255).not_null())
.col(ColumnDef::new(Package::Version).string_len(255).not_null())
.col(ColumnDef::new(Package::Arch).string_len(255).not_null())
.col(ColumnDef::new(Package::Size).big_integer().not_null())
.col(ColumnDef::new(Package::CSize).big_integer().not_null())
.col(ColumnDef::new(Package::Description).string())
.col(ColumnDef::new(Package::Url).string_len(255))
.col(ColumnDef::new(Package::BuildDate).date_time().not_null())
.col(ColumnDef::new(Package::Packager).string_len(255))
.col(ColumnDef::new(Package::PgpSig).string_len(255))
.col(ColumnDef::new(Package::PgpSigSize).big_integer())
.col(ColumnDef::new(Package::Sha256Sum).char_len(64).not_null())
.foreign_key(
ForeignKey::create()
.name("fk-package-repo_id")
.from(Package::Table, Package::RepoId)
.to(Repo::Table, Repo::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(PackageLicense::Table)
.col(
ColumnDef::new(PackageLicense::PackageId)
.integer()
.not_null(),
)
.col(
ColumnDef::new(PackageLicense::Value)
.string_len(255)
.not_null(),
)
.primary_key(
Index::create()
.col(PackageLicense::PackageId)
.col(PackageLicense::Value),
)
.foreign_key(
ForeignKey::create()
.name("fk-package_license-package_id")
.from(PackageLicense::Table, PackageLicense::PackageId)
.to(Package::Table, Package::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await
}
// Define how to rollback this migration: Drop the Bakery table.
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(PackageLicense::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Package::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Repo::Table).to_owned())
.await
}
}
#[derive(Iden)]
pub enum Repo {
Table,
Id,
Name,
Description,
}
#[derive(Iden)]
pub enum Package {
Table,
Id,
RepoId,
Name,
Base,
Version,
Description,
Size,
CSize,
Url,
Arch,
BuildDate,
Packager,
PgpSig,
PgpSigSize,
Sha256Sum,
}
#[derive(Iden)]
pub enum PackageLicense {
Table,
PackageId,
Value,
}

View File

@ -0,0 +1,12 @@
use sea_orm_migration::prelude::*;
pub struct Migrator;
mod m20230730_000001_create_repo_tables;
#[async_trait::async_trait]
impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![Box::new(m20230730_000001_create_repo_tables::Migration)]
}
}

View File

@ -0,0 +1,17 @@
pub mod entities;
mod migrator;
use migrator::Migrator;
use sea_orm::ConnectOptions;
use sea_orm::Database;
use sea_orm_migration::MigratorTrait;
pub async fn init<C: Into<ConnectOptions>>(
opt: C,
) -> Result<sea_orm::DatabaseConnection, sea_orm::DbErr> {
let db = Database::connect(opt).await?;
Migrator::up(&db, None).await?;
Ok(db)
}

View File

@ -10,6 +10,8 @@ pub type Result<T> = std::result::Result<T, ServerError>;
pub enum ServerError {
IO(io::Error),
Axum(axum::Error),
Db(sea_orm::DbErr),
Status(StatusCode),
}
impl fmt::Display for ServerError {
@ -17,6 +19,8 @@ impl fmt::Display for ServerError {
match self {
ServerError::IO(err) => write!(fmt, "{}", err),
ServerError::Axum(err) => write!(fmt, "{}", err),
ServerError::Status(status) => write!(fmt, "{}", status),
ServerError::Db(err) => write!(fmt, "{}", err),
}
}
}
@ -25,9 +29,16 @@ impl Error for ServerError {}
impl IntoResponse for ServerError {
fn into_response(self) -> Response {
tracing::error!("{:?}", self);
match self {
ServerError::IO(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
ServerError::Axum(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
ServerError::Status(status) => status.into_response(),
ServerError::Db(sea_orm::DbErr::RecordNotFound(_)) => {
StatusCode::NOT_FOUND.into_response()
}
ServerError::Db(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
}
}
}
@ -49,3 +60,15 @@ impl From<tokio::task::JoinError> for ServerError {
ServerError::IO(err.into())
}
}
impl From<StatusCode> for ServerError {
fn from(status: StatusCode) -> Self {
Self::Status(status)
}
}
impl From<sea_orm::DbErr> for ServerError {
fn from(err: sea_orm::DbErr) -> Self {
ServerError::Db(err)
}
}

View File

@ -1,15 +1,19 @@
mod api;
mod cli;
pub mod db;
mod error;
mod repo;
use clap::Parser;
pub use error::{Result, ServerError};
use repo::RepoGroupManager;
use sea_orm::DatabaseConnection;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
#[derive(Clone)]
pub struct Config {
data_dir: PathBuf,
repo_dir: PathBuf,
pkg_dir: PathBuf,
}
@ -18,10 +22,11 @@ pub struct Config {
pub struct Global {
config: Config,
repo_manager: Arc<RwLock<RepoGroupManager>>,
db: DatabaseConnection,
}
#[tokio::main]
async fn main() {
async fn main() -> crate::Result<()> {
let cli = cli::Cli::parse();
cli.run().await;
cli.run().await
}

View File

@ -1,28 +1,33 @@
use super::package::Package;
use libarchive::write::{Builder, WriteEntry};
use libarchive::{Entry, WriteFilter, WriteFormat};
use std::collections::HashSet;
use std::fs;
use std::io;
use std::path::{Path, PathBuf};
pub const ANY_ARCH: &str = "any";
/// Overarching abstraction that orchestrates updating the repositories stored on the server
pub struct RepoGroupManager {
repo_dir: PathBuf,
pkg_dir: PathBuf,
default_arch: String,
}
fn parse_pkg_filename(file_name: &str) -> (String, &str, &str, &str) {
let name_parts = file_name.split('-').collect::<Vec<_>>();
let name = name_parts[..name_parts.len() - 3].join("-");
let version = name_parts[name_parts.len() - 3];
let release = name_parts[name_parts.len() - 2];
let (arch, _) = name_parts[name_parts.len() - 1].split_once('.').unwrap();
(name, version, release, arch)
}
impl RepoGroupManager {
pub fn new<P1: AsRef<Path>, P2: AsRef<Path>>(
repo_dir: P1,
pkg_dir: P2,
default_arch: &str,
) -> Self {
pub fn new<P1: AsRef<Path>, P2: AsRef<Path>>(repo_dir: P1, pkg_dir: P2) -> Self {
RepoGroupManager {
repo_dir: repo_dir.as_ref().to_path_buf(),
pkg_dir: pkg_dir.as_ref().to_path_buf(),
default_arch: String::from(default_arch),
}
}
@ -37,11 +42,23 @@ impl RepoGroupManager {
ar_files.add_filter(WriteFilter::Gzip)?;
ar_files.set_format(WriteFormat::PaxRestricted)?;
let mut ar_db = ar_db.open_file(subrepo_path.join(format!("{}.tar.gz", repo)))?;
let mut ar_db = ar_db.open_file(subrepo_path.join(format!("{}.db.tar.gz", repo)))?;
let mut ar_files =
ar_files.open_file(subrepo_path.join(format!("{}.files.tar.gz", repo)))?;
for entry in subrepo_path.read_dir()? {
// All architectures should also include the "any" architecture, except for the "any"
// architecture itself.
let repo_any_dir = self.repo_dir.join(repo).join(ANY_ARCH);
let any_entries_iter = if arch != ANY_ARCH && repo_any_dir.try_exists()? {
Some(repo_any_dir.read_dir()?)
} else {
None
}
.into_iter()
.flatten();
for entry in subrepo_path.read_dir()?.chain(any_entries_iter) {
let entry = entry?;
if entry.file_type()?.is_dir() {
@ -68,123 +85,123 @@ impl RepoGroupManager {
}
}
ar_db.close().and(ar_files.close()).map_err(Into::into)
ar_db.close()?;
ar_files.close()?;
Ok(())
}
pub fn add_pkg_from_path<P: AsRef<Path>>(&mut self, repo: &str, path: P) -> io::Result<()> {
let mut pkg = Package::open(&path)?;
pkg.calculate_checksum()?;
/// Synchronize all present architectures' db archives in the given repository.
pub fn sync_all(&mut self, repo: &str) -> io::Result<()> {
for entry in self.repo_dir.join(repo).read_dir()? {
let entry = entry?;
let archs = self.add_pkg_in_repo(repo, &pkg)?;
// We add the package to each architecture it was added to by hard-linking the provided
// package file. This prevents storing a package of type "any" multiple times on disk.
for arch in archs {
let arch_repo_pkg_path = self.pkg_dir.join(repo).join(arch);
let dest_pkg_path = arch_repo_pkg_path.join(pkg.file_name());
fs::create_dir_all(&arch_repo_pkg_path)?;
fs::hard_link(&path, dest_pkg_path)?;
if entry.file_type()?.is_dir() {
self.sync(repo, &entry.file_name().to_string_lossy())?;
}
}
fs::remove_file(path)
Ok(())
}
pub fn add_pkg_from_path<P: AsRef<Path>>(
&mut self,
repo: &str,
path: P,
) -> io::Result<Package> {
let pkg = Package::open(&path)?;
self.add_pkg(repo, &pkg)?;
// After successfully adding the package, we move it to the packages directory
let dest_pkg_path = self
.pkg_dir
.join(repo)
.join(&pkg.info.arch)
.join(pkg.file_name());
fs::create_dir_all(dest_pkg_path.parent().unwrap())?;
fs::rename(&path, dest_pkg_path)?;
Ok(pkg)
}
/// Add a package to the given repo, returning to what architectures the package was added.
pub fn add_pkg_in_repo(&mut self, repo: &str, pkg: &Package) -> io::Result<HashSet<String>> {
let mut arch_repos: HashSet<String> = HashSet::new();
pub fn add_pkg(&mut self, repo: &str, pkg: &Package) -> io::Result<()> {
// We first remove any existing version of the package
self.remove_pkg(repo, &pkg.info.arch, &pkg.info.name, false)?;
if pkg.info.arch != "any" {
self.add_pkg_in_arch_repo(repo, &pkg.info.arch, pkg)?;
arch_repos.insert(pkg.info.arch.clone());
}
// Packages of arch "any" are added to every existing arch
else {
arch_repos.insert(self.default_arch.clone());
let repo_dir = self.repo_dir.join(repo);
if repo_dir.exists() {
for entry in repo_dir.read_dir()? {
arch_repos.insert(entry?.file_name().to_string_lossy().to_string());
}
}
for arch in arch_repos.iter() {
self.add_pkg_in_arch_repo(repo, arch, pkg)?;
}
}
Ok(arch_repos)
}
pub fn add_pkg_in_arch_repo(
&mut self,
repo: &str,
arch: &str,
pkg: &Package,
) -> io::Result<()> {
let pkg_dir = self
// Write the `desc` and `files` metadata files to disk
let metadata_dir = self
.repo_dir
.join(repo)
.join(arch)
.join(&pkg.info.arch)
.join(format!("{}-{}", pkg.info.name, pkg.info.version));
// We first remove the previous version of the package, if present
self.remove_pkg_from_arch_repo(repo, arch, &pkg.info.name, false)?;
fs::create_dir_all(&metadata_dir)?;
fs::create_dir_all(&pkg_dir)?;
let mut desc_file = fs::File::create(pkg_dir.join("desc"))?;
let mut desc_file = fs::File::create(metadata_dir.join("desc"))?;
pkg.write_desc(&mut desc_file)?;
let mut files_file = fs::File::create(pkg_dir.join("files"))?;
let mut files_file = fs::File::create(metadata_dir.join("files"))?;
pkg.write_files(&mut files_file)?;
self.sync(repo, arch)
// If a package of type "any" is added, we need to update every existing database
if pkg.info.arch == ANY_ARCH {
self.sync_all(repo)?;
} else {
self.sync(repo, &pkg.info.arch)?;
}
Ok(())
}
pub fn remove_repo(&mut self, repo: &str) -> io::Result<bool> {
let repo_dir = self.repo_dir.join(&repo);
let repo_dir = self.repo_dir.join(repo);
if !repo_dir.exists() {
Ok(false)
} else {
fs::remove_dir_all(&repo_dir)
.and_then(|_| fs::remove_dir_all(self.pkg_dir.join(repo)))?;
fs::remove_dir_all(&repo_dir)?;
fs::remove_dir_all(self.pkg_dir.join(repo))?;
Ok(true)
}
}
pub fn remove_arch_repo(&mut self, repo: &str, arch: &str) -> io::Result<bool> {
pub fn remove_repo_arch(&mut self, repo: &str, arch: &str) -> io::Result<bool> {
let sub_path = PathBuf::from(repo).join(arch);
let repo_dir = self.repo_dir.join(&sub_path);
if !repo_dir.exists() {
Ok(false)
} else {
fs::remove_dir_all(&repo_dir)
.and_then(|_| fs::remove_dir_all(self.pkg_dir.join(sub_path)))?;
Ok(true)
return Ok(false);
}
fs::remove_dir_all(&repo_dir)?;
fs::remove_dir_all(self.pkg_dir.join(sub_path))?;
// Removing the "any" architecture updates all other repositories
if arch == ANY_ARCH {
self.sync_all(repo)?;
}
Ok(true)
}
pub fn remove_pkg_from_arch_repo(
pub fn remove_pkg(
&mut self,
repo: &str,
arch: &str,
pkg_name: &str,
sync: bool,
) -> io::Result<bool> {
let arch_repo_dir = self.repo_dir.join(repo).join(arch);
let repo_arch_dir = self.repo_dir.join(repo).join(arch);
if !arch_repo_dir.exists() {
if !repo_arch_dir.exists() {
return Ok(false);
}
for entry in arch_repo_dir.read_dir()? {
for entry in repo_arch_dir.read_dir()? {
let entry = entry?;
// Make sure we skip the archive files
@ -204,16 +221,13 @@ impl RepoGroupManager {
fs::remove_dir_all(entry.path())?;
// Also remove the old package archive
let arch_repo_pkg_dir = self.pkg_dir.join(repo).join(arch);
let repo_arch_pkg_dir = self.pkg_dir.join(repo).join(arch);
arch_repo_pkg_dir.read_dir()?.try_for_each(|res| {
repo_arch_pkg_dir.read_dir()?.try_for_each(|res| {
res.and_then(|entry: fs::DirEntry| {
let file_name = entry.file_name();
let file_name = file_name.to_string_lossy();
// Same trick, but for package files, we also need to trim the arch
let name_parts = file_name.split('-').collect::<Vec<_>>();
let name = name_parts[..name_parts.len() - 3].join("-");
let (name, _, _, _) = parse_pkg_filename(&file_name);
if name == pkg_name {
fs::remove_file(entry.path())
@ -224,7 +238,11 @@ impl RepoGroupManager {
})?;
if sync {
self.sync(repo, arch)?;
if arch == ANY_ARCH {
self.sync_all(repo)?;
} else {
self.sync(repo, arch)?;
}
}
return Ok(true);

View File

@ -3,30 +3,30 @@ mod package;
pub use manager::RepoGroupManager;
use crate::db::entities::{package as db_package, repo as db_repo};
use axum::body::Body;
use axum::extract::{BodyStream, Path, State};
use axum::http::Request;
use axum::http::StatusCode;
use axum::routing::{delete, get_service, post};
use axum::response::IntoResponse;
use axum::routing::{delete, post};
use axum::Router;
use futures::StreamExt;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, QueryFilter};
use std::sync::Arc;
use tokio::{fs, io::AsyncWriteExt};
use tower_http::services::ServeDir;
use tower::util::ServiceExt;
use tower_http::services::{ServeDir, ServeFile};
use uuid::Uuid;
pub fn router(global: &crate::Global) -> Router<crate::Global> {
// Try to serve packages by default, and try the database files instead if not found
let serve_repos = get_service(
ServeDir::new(&global.config.pkg_dir).fallback(ServeDir::new(&global.config.repo_dir)),
);
pub fn router() -> Router<crate::Global> {
Router::new()
.route("/:repo", post(post_package_archive).delete(delete_repo))
.route("/:repo/:arch", delete(delete_arch_repo))
.route(
"/:repo/:arch/:filename",
delete(delete_package).get(serve_repos.clone()),
delete(delete_package).get(get_file),
)
.fallback(serve_repos)
.with_state(global.clone())
}
async fn post_package_archive(
@ -44,10 +44,104 @@ async fn post_package_archive(
}
let clone = Arc::clone(&global.repo_manager);
tokio::task::spawn_blocking(move || clone.write().unwrap().add_pkg_from_path(&repo, &path))
.await??;
let path_clone = path.clone();
let repo_clone = repo.clone();
let res = tokio::task::spawn_blocking(move || {
clone
.write()
.unwrap()
.add_pkg_from_path(&repo_clone, &path_clone)
})
.await?;
Ok(())
match res {
// Insert the newly added package into the database
Ok(pkg) => {
// Query the repo for its ID, or create it if it does not already exist
let repo_entity = db_repo::Entity::find()
.filter(db_repo::Column::Name.eq(&repo))
.one(&global.db)
.await?;
let repo_id = if let Some(repo_entity) = repo_entity {
repo_entity.id
} else {
let model = db_repo::ActiveModel {
name: sea_orm::Set(repo.clone()),
..Default::default()
};
db_repo::Entity::insert(model)
.exec(&global.db)
.await?
.last_insert_id
};
// Insert the package's data into the database
let mut model: db_package::ActiveModel = pkg.into();
model.repo_id = sea_orm::Set(repo_id);
model.insert(&global.db).await?;
Ok(())
}
// Remove the uploaded file and return the error
Err(err) => {
tokio::fs::remove_file(path).await?;
Err(err.into())
}
}
}
/// Serve the package archive files and database archives. If files are requested for an
/// architecture that does not have any explicit packages, a repository containing only "any" files
/// is returned.
async fn get_file(
State(global): State<crate::Global>,
Path((repo, arch, mut file_name)): Path<(String, String, String)>,
req: Request<Body>,
) -> crate::Result<impl IntoResponse> {
let repo_dir = global.config.repo_dir.join(&repo).join(&arch);
let repo_exists = tokio::fs::try_exists(&repo_dir).await?;
let res = if file_name.ends_with(".db") || file_name.ends_with(".db.tar.gz") {
// Append tar extension to ensure we find the file
if file_name.ends_with(".db") {
file_name.push_str(".tar.gz");
};
if repo_exists {
ServeFile::new(repo_dir.join(file_name)).oneshot(req).await
} else {
let path = global
.config
.repo_dir
.join(repo)
.join(manager::ANY_ARCH)
.join(file_name);
ServeFile::new(path).oneshot(req).await
}
} else {
let any_file = global
.config
.pkg_dir
.join(repo)
.join(manager::ANY_ARCH)
.join(file_name);
if repo_exists {
ServeDir::new(global.config.pkg_dir)
.fallback(ServeFile::new(any_file))
.oneshot(req)
.await
} else {
ServeFile::new(any_file).oneshot(req).await
}
};
Ok(res)
}
async fn delete_repo(
@ -73,7 +167,7 @@ async fn delete_arch_repo(
let clone = Arc::clone(&global.repo_manager);
let repo_removed =
tokio::task::spawn_blocking(move || clone.write().unwrap().remove_arch_repo(&repo, &arch))
tokio::task::spawn_blocking(move || clone.write().unwrap().remove_repo_arch(&repo, &arch))
.await??;
if repo_removed {
@ -100,10 +194,7 @@ async fn delete_package(
let clone = Arc::clone(&global.repo_manager);
let pkg_removed = tokio::task::spawn_blocking(move || {
clone
.write()
.unwrap()
.remove_pkg_from_arch_repo(&repo, &arch, &name, true)
clone.write().unwrap().remove_pkg(&repo, &arch, &name, true)
})
.await??;

View File

@ -1,10 +1,14 @@
use chrono::NaiveDateTime;
use libarchive::read::{Archive, Builder};
use libarchive::{Entry, ReadFilter};
use sea_orm::ActiveValue::Set;
use std::fmt;
use std::fs;
use std::io::{self, BufRead, BufReader, BufWriter, Read, Write};
use std::path::{Path, PathBuf};
use crate::db::entities::package;
const IGNORED_FILES: [&str; 5] = [".BUILDINFO", ".INSTALL", ".MTREE", ".PKGINFO", ".CHANGELOG"];
#[derive(Debug)]
@ -17,18 +21,18 @@ pub struct Package {
#[derive(Debug, Default)]
pub struct PkgInfo {
pub name: String,
pub base: String,
pub name: String,
pub version: String,
pub description: String,
pub size: u64,
pub csize: u64,
pub url: String,
pub arch: String,
pub build_date: i64,
pub packager: String,
pub pgpsig: String,
pub pgpsigsize: i64,
pub description: Option<String>,
pub size: i64,
pub csize: i64,
pub url: Option<String>,
pub build_date: NaiveDateTime,
pub packager: Option<String>,
pub pgpsig: Option<String>,
pub pgpsigsize: Option<i64>,
pub groups: Vec<String>,
pub licenses: Vec<String>,
pub replaces: Vec<String>,
@ -38,7 +42,7 @@ pub struct PkgInfo {
pub optdepends: Vec<String>,
pub makedepends: Vec<String>,
pub checkdepends: Vec<String>,
pub sha256sum: Option<String>,
pub sha256sum: String,
}
#[derive(Debug, PartialEq, Eq)]
@ -70,23 +74,27 @@ impl PkgInfo {
"pkgname" => self.name = value.to_string(),
"pkgbase" => self.base = value.to_string(),
"pkgver" => self.version = value.to_string(),
"pkgdesc" => self.description = value.to_string(),
"pkgdesc" => self.description = Some(value.to_string()),
"size" => {
self.size = value.parse().map_err(|_| ParsePkgInfoError::InvalidSize)?
}
"url" => self.url = value.to_string(),
"url" => self.url = Some(value.to_string()),
"arch" => self.arch = value.to_string(),
"builddate" => {
self.build_date = value
let seconds: i64 = value
.parse()
.map_err(|_| ParsePkgInfoError::InvalidBuildDate)?
.map_err(|_| ParsePkgInfoError::InvalidBuildDate)?;
self.build_date = NaiveDateTime::from_timestamp_millis(seconds * 1000)
.ok_or(ParsePkgInfoError::InvalidBuildDate)?
}
"packager" => self.packager = value.to_string(),
"pgpsig" => self.pgpsig = value.to_string(),
"packager" => self.packager = Some(value.to_string()),
"pgpsig" => self.pgpsig = Some(value.to_string()),
"pgpsigsize" => {
self.pgpsigsize = value
.parse()
.map_err(|_| ParsePkgInfoError::InvalidPgpSigSize)?
self.pgpsigsize = Some(
value
.parse()
.map_err(|_| ParsePkgInfoError::InvalidPgpSigSize)?,
)
}
"group" => self.groups.push(value.to_string()),
"license" => self.licenses.push(value.to_string()),
@ -156,7 +164,9 @@ impl Package {
}
if let Some(mut info) = info {
info.csize = fs::metadata(path.as_ref())?.len();
// I'll take my chances on a file size fitting in an i64
info.csize = fs::metadata(path.as_ref())?.len().try_into().unwrap();
info.sha256sum = sha256::try_digest(path.as_ref())?;
Ok(Package {
path: path.as_ref().to_path_buf(),
@ -172,12 +182,6 @@ impl Package {
}
}
pub fn calculate_checksum(&mut self) -> io::Result<()> {
self.info.sha256sum = Some(sha256::try_digest(self.path.as_ref())?);
Ok(())
}
pub fn full_name(&self) -> String {
format!(
"{}-{}-{}",
@ -216,20 +220,27 @@ impl Package {
write("NAME", &info.name)?;
write("BASE", &info.base)?;
write("VERSION", &info.version)?;
write("DESC", &info.description)?;
if let Some(ref description) = info.description {
write("DESC", description)?;
}
write("GROUPS", &info.groups.join("\n"))?;
write("CSIZE", &info.csize.to_string())?;
write("ISIZE", &info.size.to_string())?;
if let Some(checksum) = &info.sha256sum {
write("SHA256SUM", checksum)?;
write("SHA256SUM", &info.sha256sum)?;
if let Some(ref url) = info.url {
write("URL", url)?;
}
write("URL", &info.url)?;
write("LICENSE", &info.licenses.join("\n"))?;
write("ARCH", &info.arch)?;
write("BUILDDATE", &info.build_date.to_string())?;
write("PACKAGER", &info.packager)?;
write("BUILDDATE", &info.build_date.timestamp().to_string())?;
if let Some(ref packager) = info.packager {
write("PACKAGER", packager)?;
}
write("REPLACES", &info.replaces.join("\n"))?;
write("CONFLICTS", &info.conflicts.join("\n"))?;
@ -256,3 +267,26 @@ impl Package {
Ok(())
}
}
impl From<Package> for package::ActiveModel {
fn from(pkg: Package) -> Self {
let info = pkg.info;
package::ActiveModel {
base: Set(info.base),
name: Set(info.name),
version: Set(info.version),
arch: Set(info.arch),
size: Set(info.size),
c_size: Set(info.csize),
description: Set(info.description),
url: Set(info.url),
build_date: Set(info.build_date),
packager: Set(info.packager),
pgp_sig: Set(info.pgpsig),
pgp_sig_size: Set(info.pgpsigsize),
sha256_sum: Set(info.sha256sum),
..Default::default()
}
}
}