Compare commits

...

7 Commits
dev ... distro

Author SHA1 Message Date
Jef Roosens 9ad19eb36d
refactor(server): move some consts around
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-08-17 14:25:25 +02:00
Jef Roosens 50ebffb459
feat(server): POST request to create distros 2023-08-17 10:56:08 +02:00
Jef Roosens b7be311485
refactor(server): separate api pieces into modules
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-08-17 10:24:29 +02:00
Jef Roosens 80fb6d22f8
refactor(server): separate web logic into separate module 2023-08-17 10:10:55 +02:00
Jef Roosens 1b80bcd757
feat(server): add read-only distro api routes 2023-08-17 09:50:17 +02:00
Jef Roosens 0565328ea8
feat(server): start distro table
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/clippy Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
2023-08-13 21:16:43 +02:00
Jef Roosens 2f7c4c34f7
refactor(server): split initial migration 2023-08-13 20:40:52 +02:00
26 changed files with 654 additions and 380 deletions

1
Cargo.lock generated
View File

@ -1636,6 +1636,7 @@ dependencies = [
"chrono", "chrono",
"clap", "clap",
"futures", "futures",
"hyper",
"libarchive", "libarchive",
"sea-orm", "sea-orm",
"sea-orm-migration", "sea-orm-migration",

View File

@ -8,6 +8,7 @@ authors = ["Jef Roosens"]
[dependencies] [dependencies]
axum = { version = "0.6.18", features = ["http2"] } axum = { version = "0.6.18", features = ["http2"] }
hyper = "*"
chrono = { version = "0.4.26", features = ["serde"] } chrono = { version = "0.4.26", features = ["serde"] }
clap = { version = "4.3.12", features = ["env", "derive"] } clap = { version = "4.3.12", features = ["env", "derive"] }
futures = "0.3.28" futures = "0.3.28"

View File

@ -1,77 +0,0 @@
mod pagination;
use axum::extract::{Path, Query, State};
use axum::routing::get;
use axum::Json;
use axum::Router;
use pagination::PaginatedResponse;
use crate::db;
pub fn router() -> Router<crate::Global> {
Router::new()
.route("/repos", get(get_repos))
.route("/repos/:id", get(get_single_repo))
.route("/packages", get(get_packages))
.route("/packages/:id", get(get_single_package))
}
async fn get_repos(
State(global): State<crate::Global>,
Query(pagination): Query<pagination::Query>,
) -> crate::Result<Json<PaginatedResponse<db::repo::Model>>> {
let (total_pages, repos) = global
.db
.repo
.page(
pagination.per_page.unwrap_or(25),
pagination.page.unwrap_or(1) - 1,
)
.await?;
Ok(Json(pagination.res(total_pages, repos)))
}
async fn get_single_repo(
State(global): State<crate::Global>,
Path(id): Path<i32>,
) -> crate::Result<Json<db::repo::Model>> {
let repo = global
.db
.repo
.by_id(id)
.await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
Ok(Json(repo))
}
async fn get_packages(
State(global): State<crate::Global>,
Query(pagination): Query<pagination::Query>,
) -> crate::Result<Json<PaginatedResponse<db::package::Model>>> {
let (total_pages, pkgs) = global
.db
.pkg
.page(
pagination.per_page.unwrap_or(25),
pagination.page.unwrap_or(1) - 1,
)
.await?;
Ok(Json(pagination.res(total_pages, pkgs)))
}
async fn get_single_package(
State(global): State<crate::Global>,
Path(id): Path<i32>,
) -> crate::Result<Json<crate::db::FullPackage>> {
let entry = global
.db
.pkg
.full(id)
.await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
Ok(Json(entry))
}

View File

@ -2,12 +2,10 @@ use crate::repo::RepoGroupManager;
use crate::{Config, Global}; use crate::{Config, Global};
use axum::extract::FromRef; use axum::extract::FromRef;
use axum::Router;
use clap::Parser; use clap::Parser;
use std::io; use std::io;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use tower_http::trace::TraceLayer;
use tracing::debug; use tracing::debug;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
@ -95,18 +93,9 @@ impl Cli {
}; };
// build our application with a single route // build our application with a single route
let app = Router::new() let app = crate::web::app(global, &self.api_key);
.nest("/api", crate::api::router()) Ok(crate::web::serve(app, self.port)
.merge(crate::repo::router(&self.api_key))
.with_state(global)
.layer(TraceLayer::new_for_http());
// run it with hyper on localhost:3000
Ok(
axum::Server::bind(&format!("0.0.0.0:{}", self.port).parse().unwrap())
.serve(app.into_make_service())
.await .await
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?, .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?)
)
} }
} }

View File

@ -0,0 +1,32 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.1
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "distro")]
pub struct Model {
#[sea_orm(primary_key)]
#[serde(skip_deserializing)]
pub id: i32,
#[sea_orm(unique)]
pub slug: String,
#[sea_orm(unique)]
pub name: String,
pub description: Option<String>,
pub url: Option<String>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::repo::Entity")]
Repo,
}
impl Related<super::repo::Entity> for Entity {
fn to() -> RelationDef {
Relation::Repo.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -2,6 +2,7 @@
pub mod prelude; pub mod prelude;
pub mod distro;
pub mod package; pub mod package;
pub mod package_conflicts; pub mod package_conflicts;
pub mod package_depends; pub mod package_depends;

View File

@ -1,5 +1,6 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.1 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.1
pub use super::distro::Entity as Distro;
pub use super::package::Entity as Package; pub use super::package::Entity as Package;
pub use super::package_conflicts::Entity as PackageConflicts; pub use super::package_conflicts::Entity as PackageConflicts;
pub use super::package_depends::Entity as PackageDepends; pub use super::package_depends::Entity as PackageDepends;

View File

@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize};
pub struct Model { pub struct Model {
#[sea_orm(primary_key)] #[sea_orm(primary_key)]
pub id: i32, pub id: i32,
pub distro_id: i32,
#[sea_orm(unique)] #[sea_orm(unique)]
pub name: String, pub name: String,
pub description: Option<String>, pub description: Option<String>,
@ -15,10 +16,24 @@ pub struct Model {
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation { pub enum Relation {
#[sea_orm(
belongs_to = "super::distro::Entity",
from = "Column::DistroId",
to = "super::distro::Column::Id",
on_update = "NoAction",
on_delete = "Cascade"
)]
Distro,
#[sea_orm(has_many = "super::package::Entity")] #[sea_orm(has_many = "super::package::Entity")]
Package, Package,
} }
impl Related<super::distro::Entity> for Entity {
fn to() -> RelationDef {
Relation::Distro.def()
}
}
impl Related<super::package::Entity> for Entity { impl Related<super::package::Entity> for Entity {
fn to() -> RelationDef { fn to() -> RelationDef {
Relation::Package.def() Relation::Package.def()

View File

@ -0,0 +1,98 @@
use sea_orm_migration::prelude::*;
pub struct Migration;
impl MigrationName for Migration {
fn name(&self) -> &str {
"m_20230813_000001_create_dist_tables"
}
}
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(Distro::Table)
.col(
ColumnDef::new(Distro::Id)
.integer()
.not_null()
.auto_increment()
.primary_key(),
)
.col(
ColumnDef::new(Distro::Slug)
.string_len(255)
.not_null()
.unique_key(),
)
.col(
ColumnDef::new(Distro::Name)
.string()
.not_null()
.unique_key(),
)
.col(ColumnDef::new(Distro::Description).string())
.col(ColumnDef::new(Distro::Url).string())
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(Repo::Table)
.col(
ColumnDef::new(Repo::Id)
.integer()
.not_null()
.auto_increment()
.primary_key(),
)
.col(ColumnDef::new(Repo::DistroId).integer().not_null())
.col(ColumnDef::new(Repo::Name).string().not_null().unique_key())
.col(ColumnDef::new(Repo::Description).string())
.foreign_key(
ForeignKey::create()
.name("fk-repo-distro_id")
.from(Repo::Table, Repo::DistroId)
.to(Distro::Table, Distro::Id)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await?;
Ok(())
}
// Define how to rollback this migration: Drop the Bakery table.
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_table(Table::drop().table(Repo::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Distro::Table).to_owned())
.await
}
}
#[derive(Iden)]
pub enum Distro {
Table,
Id,
Slug,
Name,
Description,
Url,
}
#[derive(Iden)]
pub enum Repo {
Table,
Id,
DistroId,
Name,
Description,
}

View File

@ -4,29 +4,13 @@ pub struct Migration;
impl MigrationName for Migration { impl MigrationName for Migration {
fn name(&self) -> &str { fn name(&self) -> &str {
"m_20230730_000001_create_repo_tables" "m_20230813_000002_create_package_tables"
} }
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(Repo::Table)
.col(
ColumnDef::new(Repo::Id)
.integer()
.not_null()
.auto_increment()
.primary_key(),
)
.col(ColumnDef::new(Repo::Name).string().not_null().unique_key())
.col(ColumnDef::new(Repo::Description).string())
.to_owned(),
)
.await?;
manager manager
.create_table( .create_table(
Table::create() Table::create()
@ -292,9 +276,6 @@ impl MigrationTrait for Migration {
.await?; .await?;
manager manager
.drop_table(Table::drop().table(Package::Table).to_owned()) .drop_table(Table::drop().table(Package::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(Repo::Table).to_owned())
.await .await
} }
} }
@ -303,8 +284,6 @@ impl MigrationTrait for Migration {
pub enum Repo { pub enum Repo {
Table, Table,
Id, Id,
Name,
Description,
} }
#[derive(Iden)] #[derive(Iden)]

View File

@ -1,12 +1,16 @@
mod m20230813_000001_create_dist_tables;
mod m20230813_000002_create_package_tables;
use sea_orm_migration::prelude::*; use sea_orm_migration::prelude::*;
pub struct Migrator; pub struct Migrator;
mod m20230730_000001_create_repo_tables;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigratorTrait for Migrator { impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> { fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![Box::new(m20230730_000001_create_repo_tables::Migration)] vec![
Box::new(m20230813_000001_create_dist_tables::Migration),
Box::new(m20230813_000002_create_package_tables::Migration),
]
} }
} }

View File

@ -42,6 +42,7 @@ pub struct RieterDb {
conn: DatabaseConnection, conn: DatabaseConnection,
pub pkg: query::PackageQuery, pub pkg: query::PackageQuery,
pub repo: query::RepoQuery, pub repo: query::RepoQuery,
pub distro: query::DistroQuery,
} }
impl RieterDb { impl RieterDb {
@ -54,6 +55,7 @@ impl RieterDb {
conn: db.clone(), conn: db.clone(),
pkg: query::PackageQuery::new(db.clone()), pkg: query::PackageQuery::new(db.clone()),
repo: query::RepoQuery::new(db.clone()), repo: query::RepoQuery::new(db.clone()),
distro: query::DistroQuery::new(db.clone()),
}) })
} }
} }

View File

@ -0,0 +1,52 @@
use sea_orm::*;
use crate::db::*;
#[derive(Clone, Debug)]
pub struct DistroQuery {
conn: DatabaseConnection,
}
impl DistroQuery {
pub fn new(conn: DatabaseConnection) -> Self {
Self { conn }
}
pub async fn page(&self, per_page: u64, page: u64) -> Result<(u64, Vec<distro::Model>)> {
let paginator = Distro::find()
.order_by_asc(distro::Column::Id)
.paginate(&self.conn, per_page);
let results = paginator.fetch_page(page).await?;
let total_pages = paginator.num_pages().await?;
Ok((total_pages, results))
}
pub async fn by_id(&self, id: i32) -> Result<Option<distro::Model>> {
distro::Entity::find_by_id(id).one(&self.conn).await
}
pub async fn insert(
&self,
slug: &str,
name: &str,
description: Option<&str>,
url: Option<&str>,
) -> Result<InsertResult<distro::ActiveModel>> {
let model = distro::ActiveModel {
id: NotSet,
slug: Set(String::from(slug)),
name: Set(String::from(name)),
description: Set(description.map(String::from)),
url: Set(url.map(String::from)),
};
Distro::insert(model).exec(&self.conn).await
}
pub async fn insert_model(&self, model: distro::Model) -> Result<distro::Model> {
let mut model: distro::ActiveModel = model.into();
model.id = NotSet;
model.insert(&self.conn).await
}
}

View File

@ -1,6 +1,8 @@
mod distro;
mod package; mod package;
mod repo; mod repo;
pub use distro::DistroQuery;
pub use package::PackageQuery; pub use package::PackageQuery;
pub use repo::RepoQuery; pub use repo::RepoQuery;

View File

@ -40,6 +40,8 @@ impl RepoQuery {
) -> Result<InsertResult<repo::ActiveModel>> { ) -> Result<InsertResult<repo::ActiveModel>> {
let model = repo::ActiveModel { let model = repo::ActiveModel {
id: NotSet, id: NotSet,
// TODO CHANGE THIS
distro_id: NotSet,
name: Set(String::from(name)), name: Set(String::from(name)),
description: Set(description.map(String::from)), description: Set(description.map(String::from)),
}; };

View File

@ -35,10 +35,12 @@ impl IntoResponse for ServerError {
ServerError::IO(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), ServerError::IO(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
ServerError::Axum(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), ServerError::Axum(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
ServerError::Status(status) => status.into_response(), ServerError::Status(status) => status.into_response(),
ServerError::Db(sea_orm::DbErr::RecordNotFound(_)) => { ServerError::Db(err) => match err {
StatusCode::NOT_FOUND.into_response() sea_orm::DbErr::RecordNotFound(_) => StatusCode::NOT_FOUND,
sea_orm::DbErr::Query(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
} }
ServerError::Db(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), .into_response(),
} }
} }
} }

View File

@ -1,8 +1,8 @@
mod api;
mod cli; mod cli;
pub mod db; pub mod db;
mod error; mod error;
mod repo; mod repo;
mod web;
use clap::Parser; use clap::Parser;
pub use error::{Result, ServerError}; pub use error::{Result, ServerError};

View File

@ -5,8 +5,6 @@ use std::fs;
use std::io; use std::io;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
pub const ANY_ARCH: &str = "any";
/// Overarching abstraction that orchestrates updating the repositories stored on the server /// Overarching abstraction that orchestrates updating the repositories stored on the server
pub struct RepoGroupManager { pub struct RepoGroupManager {
repo_dir: PathBuf, repo_dir: PathBuf,
@ -48,9 +46,9 @@ impl RepoGroupManager {
// All architectures should also include the "any" architecture, except for the "any" // All architectures should also include the "any" architecture, except for the "any"
// architecture itself. // architecture itself.
let repo_any_dir = self.repo_dir.join(repo).join(ANY_ARCH); let repo_any_dir = self.repo_dir.join(repo).join(super::ANY_ARCH);
let any_entries_iter = if arch != ANY_ARCH && repo_any_dir.try_exists()? { let any_entries_iter = if arch != super::ANY_ARCH && repo_any_dir.try_exists()? {
Some(repo_any_dir.read_dir()?) Some(repo_any_dir.read_dir()?)
} else { } else {
None None
@ -159,7 +157,7 @@ impl RepoGroupManager {
pkg.write_files(&mut files_file)?; pkg.write_files(&mut files_file)?;
// If a package of type "any" is added, we need to update every existing database // If a package of type "any" is added, we need to update every existing database
if pkg.info.arch == ANY_ARCH { if pkg.info.arch == super::ANY_ARCH {
self.sync_all(repo)?; self.sync_all(repo)?;
} else { } else {
self.sync(repo, &pkg.info.arch)?; self.sync(repo, &pkg.info.arch)?;
@ -193,7 +191,7 @@ impl RepoGroupManager {
fs::remove_dir_all(self.pkg_dir.join(sub_path))?; fs::remove_dir_all(self.pkg_dir.join(sub_path))?;
// Removing the "any" architecture updates all other repositories // Removing the "any" architecture updates all other repositories
if arch == ANY_ARCH { if arch == super::ANY_ARCH {
self.sync_all(repo)?; self.sync_all(repo)?;
} }
@ -250,7 +248,7 @@ impl RepoGroupManager {
})?; })?;
if sync { if sync {
if arch == ANY_ARCH { if arch == super::ANY_ARCH {
self.sync_all(repo)?; self.sync_all(repo)?;
} else { } else {
self.sync(repo, arch)?; self.sync(repo, arch)?;
@ -288,7 +286,7 @@ impl RepoGroupManager {
fs::remove_dir_all(self.repo_dir.join(repo).join(arch).join(metadata_dir_name))?; fs::remove_dir_all(self.repo_dir.join(repo).join(arch).join(metadata_dir_name))?;
if sync { if sync {
if arch == ANY_ARCH { if arch == super::ANY_ARCH {
self.sync_all(&repo.to_string_lossy())?; self.sync_all(&repo.to_string_lossy())?;
} else { } else {
self.sync(&repo.to_string_lossy(), arch)?; self.sync(&repo.to_string_lossy(), arch)?;

View File

@ -1,255 +1,7 @@
mod manager; pub mod manager;
pub mod package; pub mod package;
pub use manager::RepoGroupManager; pub use manager::RepoGroupManager;
use std::path::PathBuf; pub const DB_FILE_EXTS: [&str; 4] = [".db", ".files", ".db.tar.gz", ".files.tar.gz"];
pub const ANY_ARCH: &str = "any";
use axum::body::Body;
use axum::extract::{BodyStream, Path, State};
use axum::http::Request;
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::routing::{delete, post};
use axum::Router;
use futures::StreamExt;
use sea_orm::ModelTrait;
use std::sync::Arc;
use tokio::{fs, io::AsyncWriteExt};
use tower::util::ServiceExt;
use tower_http::services::{ServeDir, ServeFile};
use tower_http::validate_request::ValidateRequestHeaderLayer;
use uuid::Uuid;
const DB_FILE_EXTS: [&str; 4] = [".db", ".files", ".db.tar.gz", ".files.tar.gz"];
pub fn router(api_key: &str) -> Router<crate::Global> {
Router::new()
.route(
"/:repo",
post(post_package_archive)
.delete(delete_repo)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
)
.route(
"/:repo/:arch",
delete(delete_arch_repo).route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
)
// Routes added after the layer do not get that layer applied, so the GET requests will not
// be authorized
.route(
"/:repo/:arch/:filename",
delete(delete_package)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key))
.get(get_file),
)
}
/// Serve the package archive files and database archives. If files are requested for an
/// architecture that does not have any explicit packages, a repository containing only "any" files
/// is returned.
async fn get_file(
State(global): State<crate::Global>,
Path((repo, arch, mut file_name)): Path<(String, String, String)>,
req: Request<Body>,
) -> crate::Result<impl IntoResponse> {
let repo_dir = global.config.repo_dir.join(&repo).join(&arch);
let repo_exists = tokio::fs::try_exists(&repo_dir).await?;
let res = if DB_FILE_EXTS.iter().any(|ext| file_name.ends_with(ext)) {
// Append tar extension to ensure we find the file
if !file_name.ends_with(".tar.gz") {
file_name.push_str(".tar.gz");
};
if repo_exists {
ServeFile::new(repo_dir.join(file_name)).oneshot(req).await
} else {
let path = global
.config
.repo_dir
.join(repo)
.join(manager::ANY_ARCH)
.join(file_name);
ServeFile::new(path).oneshot(req).await
}
} else {
let any_file = global
.config
.pkg_dir
.join(repo)
.join(manager::ANY_ARCH)
.join(file_name);
if repo_exists {
ServeDir::new(global.config.pkg_dir)
.fallback(ServeFile::new(any_file))
.oneshot(req)
.await
} else {
ServeFile::new(any_file).oneshot(req).await
}
};
Ok(res)
}
async fn post_package_archive(
State(global): State<crate::Global>,
Path(repo): Path<String>,
mut body: BodyStream,
) -> crate::Result<()> {
// We first stream the uploaded file to disk
let uuid: uuid::fmt::Simple = Uuid::new_v4().into();
let path = global.config.pkg_dir.join(uuid.to_string());
let mut f = fs::File::create(&path).await?;
while let Some(chunk) = body.next().await {
f.write_all(&chunk?).await?;
}
let clone = Arc::clone(&global.repo_manager);
let path_clone = path.clone();
let repo_clone = repo.clone();
let res = tokio::task::spawn_blocking(move || {
clone
.write()
.unwrap()
.add_pkg_from_path(&repo_clone, &path_clone)
})
.await?;
match res {
// Insert the newly added package into the database
Ok(pkg) => {
tracing::info!("Added '{}' to repository '{}'", pkg.file_name(), repo);
// Query the repo for its ID, or create it if it does not already exist
let res = global.db.repo.by_name(&repo).await?;
let repo_id = if let Some(repo_entity) = res {
repo_entity.id
} else {
global.db.repo.insert(&repo, None).await?.last_insert_id
};
// If the package already exists in the database, we remove it first
let res = global
.db
.pkg
.by_fields(repo_id, &pkg.info.name, None, &pkg.info.arch)
.await?;
if let Some(entry) = res {
entry.delete(&global.db).await?;
}
global.db.pkg.insert(repo_id, pkg).await?;
Ok(())
}
// Remove the uploaded file and return the error
Err(err) => {
tokio::fs::remove_file(path).await?;
Err(err.into())
}
}
}
async fn delete_repo(
State(global): State<crate::Global>,
Path(repo): Path<String>,
) -> crate::Result<StatusCode> {
let clone = Arc::clone(&global.repo_manager);
let repo_clone = repo.clone();
let repo_removed =
tokio::task::spawn_blocking(move || clone.write().unwrap().remove_repo(&repo_clone))
.await??;
if repo_removed {
let res = global.db.repo.by_name(&repo).await?;
if let Some(repo_entry) = res {
repo_entry.delete(&global.db).await?;
}
tracing::info!("Removed repository '{}'", repo);
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
}
async fn delete_arch_repo(
State(global): State<crate::Global>,
Path((repo, arch)): Path<(String, String)>,
) -> crate::Result<StatusCode> {
let clone = Arc::clone(&global.repo_manager);
let arch_clone = arch.clone();
let repo_clone = repo.clone();
let repo_removed = tokio::task::spawn_blocking(move || {
clone
.write()
.unwrap()
.remove_repo_arch(&repo_clone, &arch_clone)
})
.await??;
if repo_removed {
let res = global.db.repo.by_name(&repo).await?;
if let Some(repo_entry) = res {
global.db.pkg.delete_with_arch(repo_entry.id, &arch).await?;
}
tracing::info!("Removed architecture '{}' from repository '{}'", arch, repo);
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
}
async fn delete_package(
State(global): State<crate::Global>,
Path((repo, arch, file_name)): Path<(String, String, String)>,
) -> crate::Result<StatusCode> {
let clone = Arc::clone(&global.repo_manager);
let path = PathBuf::from(&repo).join(arch).join(&file_name);
let res = tokio::task::spawn_blocking(move || {
clone.write().unwrap().remove_pkg_from_path(path, true)
})
.await??;
if let Some((name, version, release, arch)) = res {
let res = global.db.repo.by_name(&repo).await?;
if let Some(repo_entry) = res {
let res = global
.db
.pkg
.by_fields(
repo_entry.id,
&name,
Some(&format!("{}-{}", version, release)),
&arch,
)
.await?;
if let Some(entry) = res {
entry.delete(&global.db).await?;
}
}
tracing::info!("Removed '{}' from repository '{}'", file_name, repo);
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
}

View File

@ -0,0 +1,50 @@
use axum::{
extract::{Path, Query, State},
routing::get,
Json, Router,
};
use super::pagination::{self, PaginatedResponse};
use crate::db;
pub fn router() -> Router<crate::Global> {
Router::new()
.route("/", get(get_distros).post(post_distro))
.route("/:id", get(get_single_distro))
}
async fn get_distros(
State(global): State<crate::Global>,
Query(pagination): Query<pagination::Query>,
) -> crate::Result<Json<PaginatedResponse<db::distro::Model>>> {
let (total_pages, repos) = global
.db
.distro
.page(
pagination.per_page.unwrap_or(25),
pagination.page.unwrap_or(1) - 1,
)
.await?;
Ok(Json(pagination.res(total_pages, repos)))
}
async fn get_single_distro(
State(global): State<crate::Global>,
Path(id): Path<i32>,
) -> crate::Result<Json<db::distro::Model>> {
let repo = global
.db
.distro
.by_id(id)
.await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
Ok(Json(repo))
}
async fn post_distro(
State(global): State<crate::Global>,
Json(model): Json<db::distro::Model>,
) -> crate::Result<Json<db::distro::Model>> {
Ok(Json(global.db.distro.insert_model(model).await?))
}

View File

@ -0,0 +1,13 @@
mod distros;
mod packages;
mod pagination;
mod repos;
use axum::Router;
pub fn router() -> Router<crate::Global> {
Router::new()
.nest("/distros", distros::router())
.nest("/repos", repos::router())
.nest("/packages", packages::router())
}

View File

@ -0,0 +1,44 @@
use axum::{
extract::{Path, Query, State},
routing::get,
Json, Router,
};
use super::pagination::{self, PaginatedResponse};
use crate::db;
pub fn router() -> Router<crate::Global> {
Router::new()
.route("/", get(get_packages))
.route("/:id", get(get_single_package))
}
async fn get_packages(
State(global): State<crate::Global>,
Query(pagination): Query<pagination::Query>,
) -> crate::Result<Json<PaginatedResponse<db::package::Model>>> {
let (total_pages, pkgs) = global
.db
.pkg
.page(
pagination.per_page.unwrap_or(25),
pagination.page.unwrap_or(1) - 1,
)
.await?;
Ok(Json(pagination.res(total_pages, pkgs)))
}
async fn get_single_package(
State(global): State<crate::Global>,
Path(id): Path<i32>,
) -> crate::Result<Json<crate::db::FullPackage>> {
let entry = global
.db
.pkg
.full(id)
.await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
Ok(Json(entry))
}

View File

@ -0,0 +1,43 @@
use axum::{
extract::{Path, Query, State},
routing::get,
Json, Router,
};
use super::pagination::{self, PaginatedResponse};
use crate::db;
pub fn router() -> Router<crate::Global> {
Router::new()
.route("/", get(get_repos))
.route("/:id", get(get_single_repo))
}
async fn get_repos(
State(global): State<crate::Global>,
Query(pagination): Query<pagination::Query>,
) -> crate::Result<Json<PaginatedResponse<db::repo::Model>>> {
let (total_pages, repos) = global
.db
.repo
.page(
pagination.per_page.unwrap_or(25),
pagination.page.unwrap_or(1) - 1,
)
.await?;
Ok(Json(pagination.res(total_pages, repos)))
}
async fn get_single_repo(
State(global): State<crate::Global>,
Path(id): Path<i32>,
) -> crate::Result<Json<db::repo::Model>> {
let repo = global
.db
.repo
.by_id(id)
.await?
.ok_or(axum::http::StatusCode::NOT_FOUND)?;
Ok(Json(repo))
}

View File

@ -0,0 +1,19 @@
mod api;
mod repo;
use axum::{Router, Server};
use tower_http::trace::TraceLayer;
pub fn app(global: crate::Global, api_key: &str) -> Router {
Router::new()
.nest("/api", api::router())
.merge(repo::router(api_key))
.with_state(global)
.layer(TraceLayer::new_for_http())
}
pub async fn serve(app: Router, port: u16) -> Result<(), hyper::Error> {
Server::bind(&format!("0.0.0.0:{}", port).parse().unwrap())
.serve(app.into_make_service())
.await
}

View File

@ -0,0 +1,251 @@
use std::path::PathBuf;
use axum::body::Body;
use axum::extract::{BodyStream, Path, State};
use axum::http::Request;
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::routing::{delete, post};
use axum::Router;
use futures::StreamExt;
use sea_orm::ModelTrait;
use std::sync::Arc;
use tokio::{fs, io::AsyncWriteExt};
use tower::util::ServiceExt;
use tower_http::services::{ServeDir, ServeFile};
use tower_http::validate_request::ValidateRequestHeaderLayer;
use uuid::Uuid;
pub fn router(api_key: &str) -> Router<crate::Global> {
Router::new()
.route(
"/:repo",
post(post_package_archive)
.delete(delete_repo)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
)
.route(
"/:repo/:arch",
delete(delete_arch_repo).route_layer(ValidateRequestHeaderLayer::bearer(api_key)),
)
// Routes added after the layer do not get that layer applied, so the GET requests will not
// be authorized
.route(
"/:repo/:arch/:filename",
delete(delete_package)
.route_layer(ValidateRequestHeaderLayer::bearer(api_key))
.get(get_file),
)
}
/// Serve the package archive files and database archives. If files are requested for an
/// architecture that does not have any explicit packages, a repository containing only "any" files
/// is returned.
async fn get_file(
State(global): State<crate::Global>,
Path((repo, arch, mut file_name)): Path<(String, String, String)>,
req: Request<Body>,
) -> crate::Result<impl IntoResponse> {
let repo_dir = global.config.repo_dir.join(&repo).join(&arch);
let repo_exists = tokio::fs::try_exists(&repo_dir).await?;
let res = if crate::repo::DB_FILE_EXTS
.iter()
.any(|ext| file_name.ends_with(ext))
{
// Append tar extension to ensure we find the file
if !file_name.ends_with(".tar.gz") {
file_name.push_str(".tar.gz");
};
if repo_exists {
ServeFile::new(repo_dir.join(file_name)).oneshot(req).await
} else {
let path = global
.config
.repo_dir
.join(repo)
.join(crate::repo::ANY_ARCH)
.join(file_name);
ServeFile::new(path).oneshot(req).await
}
} else {
let any_file = global
.config
.pkg_dir
.join(repo)
.join(crate::repo::ANY_ARCH)
.join(file_name);
if repo_exists {
ServeDir::new(global.config.pkg_dir)
.fallback(ServeFile::new(any_file))
.oneshot(req)
.await
} else {
ServeFile::new(any_file).oneshot(req).await
}
};
Ok(res)
}
async fn post_package_archive(
State(global): State<crate::Global>,
Path(repo): Path<String>,
mut body: BodyStream,
) -> crate::Result<()> {
// We first stream the uploaded file to disk
let uuid: uuid::fmt::Simple = Uuid::new_v4().into();
let path = global.config.pkg_dir.join(uuid.to_string());
let mut f = fs::File::create(&path).await?;
while let Some(chunk) = body.next().await {
f.write_all(&chunk?).await?;
}
let clone = Arc::clone(&global.repo_manager);
let path_clone = path.clone();
let repo_clone = repo.clone();
let res = tokio::task::spawn_blocking(move || {
clone
.write()
.unwrap()
.add_pkg_from_path(&repo_clone, &path_clone)
})
.await?;
match res {
// Insert the newly added package into the database
Ok(pkg) => {
tracing::info!("Added '{}' to repository '{}'", pkg.file_name(), repo);
// Query the repo for its ID, or create it if it does not already exist
let res = global.db.repo.by_name(&repo).await?;
let repo_id = if let Some(repo_entity) = res {
repo_entity.id
} else {
global.db.repo.insert(&repo, None).await?.last_insert_id
};
// If the package already exists in the database, we remove it first
let res = global
.db
.pkg
.by_fields(repo_id, &pkg.info.name, None, &pkg.info.arch)
.await?;
if let Some(entry) = res {
entry.delete(&global.db).await?;
}
global.db.pkg.insert(repo_id, pkg).await?;
Ok(())
}
// Remove the uploaded file and return the error
Err(err) => {
tokio::fs::remove_file(path).await?;
Err(err.into())
}
}
}
async fn delete_repo(
State(global): State<crate::Global>,
Path(repo): Path<String>,
) -> crate::Result<StatusCode> {
let clone = Arc::clone(&global.repo_manager);
let repo_clone = repo.clone();
let repo_removed =
tokio::task::spawn_blocking(move || clone.write().unwrap().remove_repo(&repo_clone))
.await??;
if repo_removed {
let res = global.db.repo.by_name(&repo).await?;
if let Some(repo_entry) = res {
repo_entry.delete(&global.db).await?;
}
tracing::info!("Removed repository '{}'", repo);
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
}
async fn delete_arch_repo(
State(global): State<crate::Global>,
Path((repo, arch)): Path<(String, String)>,
) -> crate::Result<StatusCode> {
let clone = Arc::clone(&global.repo_manager);
let arch_clone = arch.clone();
let repo_clone = repo.clone();
let repo_removed = tokio::task::spawn_blocking(move || {
clone
.write()
.unwrap()
.remove_repo_arch(&repo_clone, &arch_clone)
})
.await??;
if repo_removed {
let res = global.db.repo.by_name(&repo).await?;
if let Some(repo_entry) = res {
global.db.pkg.delete_with_arch(repo_entry.id, &arch).await?;
}
tracing::info!("Removed architecture '{}' from repository '{}'", arch, repo);
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
}
async fn delete_package(
State(global): State<crate::Global>,
Path((repo, arch, file_name)): Path<(String, String, String)>,
) -> crate::Result<StatusCode> {
let clone = Arc::clone(&global.repo_manager);
let path = PathBuf::from(&repo).join(arch).join(&file_name);
let res = tokio::task::spawn_blocking(move || {
clone.write().unwrap().remove_pkg_from_path(path, true)
})
.await??;
if let Some((name, version, release, arch)) = res {
let res = global.db.repo.by_name(&repo).await?;
if let Some(repo_entry) = res {
let res = global
.db
.pkg
.by_fields(
repo_entry.id,
&name,
Some(&format!("{}-{}", version, release)),
&arch,
)
.await?;
if let Some(entry) = res {
entry.delete(&global.db).await?;
}
}
tracing::info!("Removed '{}' from repository '{}'", file_name, repo);
Ok(StatusCode::OK)
} else {
Ok(StatusCode::NOT_FOUND)
}
}