forked from vieter-v/vieter
Very alpha support for multiple & multi-arch repos
parent
013ce511d7
commit
a47cace296
4
Makefile
4
Makefile
|
@ -25,7 +25,7 @@ dvieter: $(SOURCES)
|
|||
gdb: dvieter
|
||||
VIETER_API_KEY=test \
|
||||
VIETER_DOWNLOAD_DIR=data/downloads \
|
||||
VIETER_REPO_DIR=data/repo \
|
||||
VIETER_DATA_DIR=data/repo \
|
||||
VIETER_PKG_DIR=data/pkgs \
|
||||
VIETER_LOG_LEVEL=DEBUG \
|
||||
VIETER_REPOS_FILE=data/repos.json \
|
||||
|
@ -60,7 +60,7 @@ cli-prod:
|
|||
run: vieter
|
||||
VIETER_API_KEY=test \
|
||||
VIETER_DOWNLOAD_DIR=data/downloads \
|
||||
VIETER_REPO_DIR=data/repo \
|
||||
VIETER_DATA_DIR=data/repo \
|
||||
VIETER_PKG_DIR=data/pkgs \
|
||||
VIETER_LOG_LEVEL=DEBUG \
|
||||
VIETER_REPOS_FILE=data/repos.json \
|
||||
|
|
|
@ -16,7 +16,7 @@ pub:
|
|||
pkg_dir string
|
||||
download_dir string
|
||||
api_key string
|
||||
repo_dir string
|
||||
data_dir string
|
||||
repos_file string
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ fn parse_pkg_info_string(pkg_info_str &string) ?PkgInfo {
|
|||
|
||||
// read_pkg extracts the file list & .PKGINFO contents from an archive
|
||||
// NOTE: this command currently only supports zstd-compressed tarballs
|
||||
pub fn read_pkg(pkg_path string) ?Pkg {
|
||||
pub fn read_pkg_archive(pkg_path string) ?Pkg {
|
||||
if !os.is_file(pkg_path) {
|
||||
return error("'$pkg_path' doesn't exist or isn't a file.")
|
||||
}
|
||||
|
|
|
@ -4,15 +4,20 @@ import os
|
|||
import package
|
||||
import util
|
||||
|
||||
// This struct manages a single repository.
|
||||
pub struct Repo {
|
||||
// Manages a group of repositories. Each repository contains one or more
|
||||
// arch-repositories, each of which represent a specific architecture.
|
||||
pub struct RepoGroupManager {
|
||||
mut:
|
||||
mutex shared util.Dummy
|
||||
pub:
|
||||
// Where to store repository files
|
||||
repo_dir string [required]
|
||||
// Where to find packages; packages are expected to all be in the same directory
|
||||
// Where to store repositories' files
|
||||
data_dir string [required]
|
||||
// Where packages are stored; each architecture gets its own subdirectory
|
||||
pkg_dir string [required]
|
||||
// The default architecture to use for a repository. In reality, this value
|
||||
// is only required when a package with architecture "any" is added as the
|
||||
// first package to a repository.
|
||||
default_arch string [required]
|
||||
}
|
||||
|
||||
pub struct RepoAddResult {
|
||||
|
@ -21,28 +26,29 @@ pub:
|
|||
pkg &package.Pkg [required]
|
||||
}
|
||||
|
||||
// new creates a new Repo & creates the directories as needed
|
||||
pub fn new(repo_dir string, pkg_dir string) ?Repo {
|
||||
if !os.is_dir(repo_dir) {
|
||||
os.mkdir_all(repo_dir) or { return error('Failed to create repo directory: $err.msg') }
|
||||
// new creates a new RepoGroupManager & creates the directories as needed
|
||||
pub fn new(data_dir string, pkg_dir string, default_arch string) ?RepoGroupManager {
|
||||
if !os.is_dir(data_dir) {
|
||||
os.mkdir_all(data_dir) or { return error('Failed to create repo directory: $err.msg') }
|
||||
}
|
||||
|
||||
if !os.is_dir(pkg_dir) {
|
||||
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg') }
|
||||
}
|
||||
|
||||
return Repo{
|
||||
repo_dir: repo_dir
|
||||
return RepoGroupManager{
|
||||
data_dir: data_dir
|
||||
pkg_dir: pkg_dir
|
||||
default_arch: default_arch
|
||||
}
|
||||
}
|
||||
|
||||
// add_from_path adds a package from an arbitrary path & moves it into the pkgs
|
||||
// directory if necessary.
|
||||
pub fn (r &Repo) add_from_path(pkg_path string) ?RepoAddResult {
|
||||
pkg := package.read_pkg(pkg_path) or { return error('Failed to read package file: $err.msg') }
|
||||
pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult {
|
||||
pkg := package.read_pkg_archive(pkg_path) or { return error('Failed to read package file: $err.msg') }
|
||||
|
||||
added := r.add(pkg) ?
|
||||
added := r.add_pkg_in_repo(repo, pkg) ?
|
||||
|
||||
// If the add was successful, we move the file to the packages directory
|
||||
if added {
|
||||
|
@ -60,9 +66,33 @@ pub fn (r &Repo) add_from_path(pkg_path string) ?RepoAddResult {
|
|||
}
|
||||
}
|
||||
|
||||
// add adds a given Pkg to the repository
|
||||
fn (r &Repo) add(pkg &package.Pkg) ?bool {
|
||||
pkg_dir := r.pkg_path(pkg)
|
||||
fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?bool {
|
||||
if pkg.info.arch == "any" {
|
||||
repo_dir := os.join_path_single(r.data_dir, repo)
|
||||
|
||||
// We get a listing of all currently present arch-repos in the given repo
|
||||
mut arch_repos := os.ls(repo_dir) ?.filter(os.is_dir(os.join_path_single(repo_dir, it)))
|
||||
|
||||
if arch_repos.len == 0 {
|
||||
arch_repos << r.default_arch
|
||||
}
|
||||
|
||||
for arch in arch_repos {
|
||||
r.add_pkg_in_arch_repo(repo, arch, pkg) ?
|
||||
}
|
||||
}else{
|
||||
r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg) ?
|
||||
}
|
||||
|
||||
// TODO properly handle this
|
||||
return true
|
||||
}
|
||||
|
||||
// add_pkg_in_repo adds the given package to the specified repo. A repo is an
|
||||
// arbitrary subdirectory of r.repo_dir, but in practice, it will always be an
|
||||
// architecture-specific version of some sub-repository.
|
||||
fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ?bool {
|
||||
pkg_dir := os.join_path(r.data_dir, repo, arch, '$pkg.info.name-$pkg.info.version')
|
||||
|
||||
// We can't add the same package twice
|
||||
if os.exists(pkg_dir) {
|
||||
|
@ -70,9 +100,9 @@ fn (r &Repo) add(pkg &package.Pkg) ?bool {
|
|||
}
|
||||
|
||||
// We remove the older package version first, if present
|
||||
r.remove(pkg.info.name, false) ?
|
||||
r.remove_pkg_from_arch_repo(repo, arch, pkg, false) ?
|
||||
|
||||
os.mkdir(pkg_dir) or { return error('Failed to create package directory.') }
|
||||
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') }
|
||||
|
||||
os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()) or {
|
||||
os.rmdir_all(pkg_dir) ?
|
||||
|
@ -85,27 +115,35 @@ fn (r &Repo) add(pkg &package.Pkg) ?bool {
|
|||
return error('Failed to write files file.')
|
||||
}
|
||||
|
||||
r.sync() ?
|
||||
r.sync(repo, arch) ?
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// remove removes a package from the database. It returns false if the package
|
||||
// wasn't present in the database.
|
||||
fn (r &Repo) remove(pkg_name string, sync bool) ?bool {
|
||||
fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg &package.Pkg, sync bool) ?bool {
|
||||
repo_dir := os.join_path(r.data_dir, repo, arch)
|
||||
|
||||
// If the repository doesn't exist yet, the result is automatically false
|
||||
if !os.exists(repo_dir) {
|
||||
return false
|
||||
}
|
||||
|
||||
// We iterate over every directory in the repo dir
|
||||
for d in os.ls(r.repo_dir) ? {
|
||||
// TODO filter so we only check directories
|
||||
for d in os.ls(repo_dir) ? {
|
||||
name := d.split('-')#[..-2].join('-')
|
||||
|
||||
if name == pkg_name {
|
||||
if name == pkg.info.name {
|
||||
// We lock the mutex here to prevent other routines from creating a
|
||||
// new archive while we removed an entry
|
||||
lock r.mutex {
|
||||
os.rmdir_all(os.join_path_single(r.repo_dir, d)) ?
|
||||
os.rmdir_all(os.join_path_single(repo_dir, d)) ?
|
||||
}
|
||||
|
||||
if sync {
|
||||
r.sync() ?
|
||||
r.sync(repo, arch) ?
|
||||
}
|
||||
|
||||
return true
|
||||
|
@ -114,8 +152,3 @@ fn (r &Repo) remove(pkg_name string, sync bool) ?bool {
|
|||
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the path where the given package's desc & files files are stored
|
||||
fn (r &Repo) pkg_path(pkg &package.Pkg) string {
|
||||
return os.join_path(r.repo_dir, '$pkg.info.name-$pkg.info.version')
|
||||
}
|
||||
|
|
|
@ -30,8 +30,9 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
|
|||
}
|
||||
|
||||
// Re-generate the repo archive files
|
||||
fn (r &Repo) sync() ? {
|
||||
// TODO also write files archive
|
||||
fn (r &RepoGroupManager) sync(repo string, arch string) ? {
|
||||
subrepo_path := os.join_path(r.data_dir, repo, arch)
|
||||
|
||||
lock r.mutex {
|
||||
a_db := C.archive_write_new()
|
||||
a_files := C.archive_write_new()
|
||||
|
@ -44,18 +45,18 @@ fn (r &Repo) sync() ? {
|
|||
C.archive_write_add_filter_gzip(a_files)
|
||||
C.archive_write_set_format_pax_restricted(a_files)
|
||||
|
||||
db_path := os.join_path_single(r.repo_dir, 'vieter.db.tar.gz')
|
||||
files_path := os.join_path_single(r.repo_dir, 'vieter.files.tar.gz')
|
||||
db_path := os.join_path_single(subrepo_path, 'vieter.db.tar.gz')
|
||||
files_path := os.join_path_single(subrepo_path, 'vieter.files.tar.gz')
|
||||
|
||||
C.archive_write_open_filename(a_db, &char(db_path.str))
|
||||
C.archive_write_open_filename(a_files, &char(files_path.str))
|
||||
|
||||
// Iterate over each directory
|
||||
for d in os.ls(r.repo_dir) ?.filter(os.is_dir(os.join_path_single(r.repo_dir,
|
||||
for d in os.ls(subrepo_path) ?.filter(os.is_dir(os.join_path_single(subrepo_path,
|
||||
it))) {
|
||||
// desc
|
||||
mut inner_path := os.join_path_single(d, 'desc')
|
||||
mut actual_path := os.join_path_single(r.repo_dir, inner_path)
|
||||
mut actual_path := os.join_path_single(subrepo_path, inner_path)
|
||||
|
||||
archive_add_entry(a_db, entry, actual_path, inner_path)
|
||||
archive_add_entry(a_files, entry, actual_path, inner_path)
|
||||
|
@ -64,7 +65,7 @@ fn (r &Repo) sync() ? {
|
|||
|
||||
// files
|
||||
inner_path = os.join_path_single(d, 'files')
|
||||
actual_path = os.join_path_single(r.repo_dir, inner_path)
|
||||
actual_path = os.join_path_single(subrepo_path, inner_path)
|
||||
|
||||
archive_add_entry(a_files, entry, actual_path, inner_path)
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@ import rand
|
|||
import util
|
||||
import net.http
|
||||
|
||||
const default_repo = "vieter"
|
||||
|
||||
// healthcheck just returns a string, but can be used to quickly check if the
|
||||
// server is still responsive.
|
||||
['/health'; get]
|
||||
|
@ -21,9 +23,9 @@ fn (mut app App) get_root(filename string) web.Result {
|
|||
mut full_path := ''
|
||||
|
||||
if filename.ends_with('.db') || filename.ends_with('.files') {
|
||||
full_path = os.join_path_single(app.repo.repo_dir, '${filename}.tar.gz')
|
||||
full_path = os.join_path_single(app.repo.data_dir, '${filename}.tar.gz')
|
||||
} else if filename.ends_with('.db.tar.gz') || filename.ends_with('.files.tar.gz') {
|
||||
full_path = os.join_path_single(app.repo.repo_dir, '$filename')
|
||||
full_path = os.join_path_single(app.repo.data_dir, '$filename')
|
||||
} else {
|
||||
full_path = os.join_path_single(app.repo.pkg_dir, filename)
|
||||
}
|
||||
|
@ -74,7 +76,7 @@ fn (mut app App) put_package() web.Result {
|
|||
return app.text("Content-Type header isn't set.")
|
||||
}
|
||||
|
||||
res := app.repo.add_from_path(pkg_path) or {
|
||||
res := app.repo.add_pkg_from_path(default_repo, pkg_path) or {
|
||||
app.lerror('Error while adding package: $err.msg')
|
||||
|
||||
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") }
|
||||
|
|
|
@ -14,7 +14,7 @@ struct App {
|
|||
pub:
|
||||
conf env.ServerConfig [required; web_global]
|
||||
pub mut:
|
||||
repo repo.Repo [required; web_global]
|
||||
repo repo.RepoGroupManager [required; web_global]
|
||||
// This is used to claim the file lock on the repos file
|
||||
git_mutex shared util.Dummy
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ pub fn server() ? {
|
|||
}
|
||||
|
||||
// This also creates the directories if needed
|
||||
repo := repo.new(conf.repo_dir, conf.pkg_dir) or {
|
||||
repo := repo.new(conf.data_dir, conf.pkg_dir, "x86_64") or {
|
||||
logger.error(err.msg)
|
||||
exit(1)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue