forked from vieter-v/vieter
Merge pull request 'Release 0.3.0-alpha.1' (#164) from release-0.3.0-alpha.1 into main
Reviewed-on: vieter/vieter#164api-testing 0.3.0-alpha.1
commit
230920576d
|
@ -1,13 +1,13 @@
|
|||
*.c
|
||||
data/
|
||||
/data/
|
||||
|
||||
# Build artifacts
|
||||
vieter
|
||||
dvieter
|
||||
pvieter
|
||||
dvieterctl
|
||||
vieterctl
|
||||
vieter.c
|
||||
/vieter
|
||||
/dvieter
|
||||
/pvieter
|
||||
/suvieter
|
||||
/afvieter
|
||||
/vieter.c
|
||||
|
||||
# Ignore testing files
|
||||
*.pkg*
|
||||
|
@ -23,3 +23,6 @@ v/
|
|||
|
||||
# gdb log file
|
||||
gdb.txt
|
||||
|
||||
# Generated docs
|
||||
_docs/
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
[submodule "docs/themes/hugo-book"]
|
||||
path = docs/themes/hugo-book
|
||||
url = https://github.com/alex-shpak/hugo-book
|
|
@ -5,11 +5,14 @@ matrix:
|
|||
|
||||
platform: ${PLATFORM}
|
||||
branches: [dev]
|
||||
skip_clone: true
|
||||
|
||||
pipeline:
|
||||
build:
|
||||
image: 'menci/archlinuxarm:base-devel'
|
||||
commands:
|
||||
# Add the vieter repository so we can use the compiler
|
||||
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
|
||||
# Update packages
|
||||
- pacman -Syu --noconfirm
|
||||
# Create non-root user to perform build & switch to their home
|
||||
|
@ -18,8 +21,12 @@ pipeline:
|
|||
- chown -R builder:builder "$PWD"
|
||||
- "echo 'builder ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers"
|
||||
- su builder
|
||||
# Build the package
|
||||
# Due to a bug with the V compiler, we can't just use the PKGBUILD from
|
||||
# inside the repo
|
||||
- curl -OL https://git.rustybever.be/vieter/vieter/raw/branch/dev/PKGBUILD
|
||||
- makepkg -s --noconfirm --needed
|
||||
when:
|
||||
event: push
|
||||
|
||||
publish:
|
||||
image: 'curlimages/curl'
|
||||
|
@ -28,3 +35,5 @@ pipeline:
|
|||
- 'for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $VIETER_API_KEY" https://arch.r8r.be/vieter/publish; done'
|
||||
secrets:
|
||||
- vieter_api_key
|
||||
when:
|
||||
event: push
|
||||
|
|
|
@ -1,32 +1,29 @@
|
|||
matrix:
|
||||
PLATFORM:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
# I just don't have a performant enough runner for this platform
|
||||
# - linux/arm/v7
|
||||
- 'linux/amd64'
|
||||
- 'linux/arm64'
|
||||
|
||||
# These checks already get performed on the feature branches
|
||||
platform: ${PLATFORM}
|
||||
|
||||
pipeline:
|
||||
# The default build isn't needed, as alpine switches to gcc for the compiler anyways
|
||||
debug:
|
||||
image: 'chewingbever/vlang:latest'
|
||||
pull: true
|
||||
group: 'build'
|
||||
commands:
|
||||
- make debug
|
||||
- make
|
||||
when:
|
||||
event: push
|
||||
event: [pull_request]
|
||||
branch:
|
||||
exclude: [main]
|
||||
|
||||
prod:
|
||||
image: 'chewingbever/vlang:latest'
|
||||
pull: true
|
||||
environment:
|
||||
- LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static
|
||||
group: 'build'
|
||||
- LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3 -static
|
||||
commands:
|
||||
- make prod
|
||||
# Apparently this -D is *very* important
|
||||
- CFLAGS='-DGC_THREADS=1' make prod
|
||||
# Make sure the binary is actually statically built
|
||||
- readelf -d pvieter
|
||||
- du -h pvieter
|
||||
|
@ -35,7 +32,7 @@ pipeline:
|
|||
- strip -s pvieter
|
||||
- du -h pvieter
|
||||
when:
|
||||
event: push
|
||||
event: [push, pull_request]
|
||||
|
||||
upload:
|
||||
image: 'chewingbever/vlang:latest'
|
||||
|
@ -52,6 +49,7 @@ pipeline:
|
|||
- >
|
||||
curl
|
||||
--silent
|
||||
--fail
|
||||
-XPUT
|
||||
-T pvieter
|
||||
-H "Host: $URL"
|
||||
|
@ -60,4 +58,4 @@ pipeline:
|
|||
-H "Authorization: AWS $S3_USERNAME:$SIGNATURE"
|
||||
https://$URL$OBJ_PATH
|
||||
when:
|
||||
event: push
|
||||
event: [push, pull_request]
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
branches: [ 'dev' ]
|
||||
platform: 'linux/amd64'
|
||||
depends_on:
|
||||
- 'docker'
|
||||
|
||||
skip_clone: true
|
||||
|
||||
pipeline:
|
||||
webhooks:
|
||||
image: 'curlimages/curl'
|
||||
secrets:
|
||||
- 'webhook_app'
|
||||
- 'webhook_cron'
|
||||
commands:
|
||||
- 'curl -XPOST -s --fail $WEBHOOK_APP'
|
||||
- 'curl -XPOST -s --fail $WEBHOOK_CRON'
|
||||
when:
|
||||
event: push
|
|
@ -1,30 +1,34 @@
|
|||
branches: [main, dev]
|
||||
platform: linux/amd64
|
||||
platform: 'linux/amd64'
|
||||
depends_on:
|
||||
- build
|
||||
|
||||
pipeline:
|
||||
dev:
|
||||
image: woodpeckerci/plugin-docker-buildx
|
||||
secrets: [ docker_username, docker_password ]
|
||||
image: 'woodpeckerci/plugin-docker-buildx'
|
||||
secrets:
|
||||
- 'docker_username'
|
||||
- 'docker_password'
|
||||
settings:
|
||||
repo: chewingbever/vieter
|
||||
tag: dev
|
||||
platforms: [ linux/arm64/v8, linux/amd64 ]
|
||||
repo: 'chewingbever/vieter'
|
||||
tag: 'dev'
|
||||
platforms: [ 'linux/arm64/v8', 'linux/amd64' ]
|
||||
build_args_from_env:
|
||||
- CI_COMMIT_SHA
|
||||
- 'CI_COMMIT_SHA'
|
||||
when:
|
||||
event: push
|
||||
branch: dev
|
||||
|
||||
release:
|
||||
image: woodpeckerci/plugin-docker-buildx
|
||||
secrets: [ docker_username, docker_password ]
|
||||
image: 'woodpeckerci/plugin-docker-buildx'
|
||||
secrets:
|
||||
- 'docker_username'
|
||||
- 'docker_password'
|
||||
settings:
|
||||
repo: chewingbever/vieter
|
||||
repo: 'chewingbever/vieter'
|
||||
auto_tag: true
|
||||
platforms: [ linux/arm64/v8, linux/amd64 ]
|
||||
platforms: [ 'linux/arm64/v8', 'linux/amd64' ]
|
||||
build_args_from_env:
|
||||
- CI_COMMIT_SHA
|
||||
- 'CI_COMMIT_SHA'
|
||||
when:
|
||||
event: tag
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
platform: 'linux/amd64'
|
||||
branches:
|
||||
exclude: [ main ]
|
||||
|
||||
pipeline:
|
||||
docs:
|
||||
image: 'klakegg/hugo:alpine'
|
||||
group: 'generate'
|
||||
commands:
|
||||
- apk add git
|
||||
- make docs
|
||||
- 'cd docs/public && tar czvf ../../docs.tar.gz *'
|
||||
|
||||
api-docs:
|
||||
image: 'chewingbever/vlang:latest'
|
||||
pull: true
|
||||
group: 'generate'
|
||||
commands:
|
||||
- make api-docs
|
||||
- 'cd src/_docs && tar czvf ../../api-docs.tar.gz *'
|
||||
|
||||
deploy-docs:
|
||||
image: 'curlimages/curl'
|
||||
group: 'deploy'
|
||||
secrets:
|
||||
- 'site_api_key'
|
||||
commands:
|
||||
- 'curl -XPOST --fail -s -H "Authorization: Bearer $SITE_API_KEY" -T docs.tar.gz https://rustybever.be/api/deploy?dir=docs-vieter'
|
||||
when:
|
||||
event: push
|
||||
branch: dev
|
||||
|
||||
deploy-api-docs:
|
||||
image: 'curlimages/curl'
|
||||
group: 'deploy'
|
||||
secrets:
|
||||
- 'site_api_key'
|
||||
commands:
|
||||
- 'curl -XPOST --fail -s -H "Authorization: Bearer $SITE_API_KEY" -T api-docs.tar.gz https://rustybever.be/api/deploy?dir=api-docs-vieter'
|
||||
when:
|
||||
event: push
|
||||
branch: dev
|
|
@ -1,6 +1,5 @@
|
|||
# Yeah so this only works on tags so we'll worry about this later
|
||||
platform: linux/amd64
|
||||
branches: main
|
||||
platform: 'linux/amd64'
|
||||
branches: [ 'main' ]
|
||||
depends_on:
|
||||
- build
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
# These checks already get performed on the feature branches
|
||||
branches:
|
||||
exclude: [ main, dev ]
|
||||
platform: linux/amd64
|
||||
exclude: [ main ]
|
||||
platform: 'linux/amd64'
|
||||
|
||||
pipeline:
|
||||
lint:
|
||||
image: 'chewingbever/vlang:latest'
|
||||
pull: true
|
||||
group: lint
|
||||
commands:
|
||||
- make lint
|
||||
- make vet
|
||||
when:
|
||||
event: [ pull_request ]
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
matrix:
|
||||
PLATFORM:
|
||||
- 'linux/amd64'
|
||||
- 'linux/arm64'
|
||||
|
||||
branches:
|
||||
exclude: [ main ]
|
||||
platform: ${PLATFORM}
|
||||
|
||||
pipeline:
|
||||
test:
|
||||
image: 'chewingbever/vlang:latest'
|
||||
pull: true
|
||||
commands:
|
||||
- make test
|
||||
when:
|
||||
event: [pull_request]
|
25
CHANGELOG.md
25
CHANGELOG.md
|
@ -5,9 +5,26 @@ All notable changes to this project will be documented in this file.
|
|||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased](https://git.rustybever.be/Chewing_Bever/vieter)
|
||||
## [0.3.0-alpha.1](https://git.rustybever.be/vieter/vieter/src/tag/0.3.0-alpha.1)
|
||||
|
||||
## [0.2.0](https://git.rustybever.be/Chewing_Bever/vieter/src/tag/0.2.0)
|
||||
### Changed
|
||||
|
||||
* Switched from compiler fork to fully vanilla compiler mirror
|
||||
* `download_dir`, `repos_file` & `repos_dir` config values have been replaced
|
||||
with `data_dir`
|
||||
* Storage of metadata (e.g. Git repositories) is now done using Sqlite
|
||||
|
||||
### Added
|
||||
|
||||
* Implemented own cron daemon for builder
|
||||
* Build schedule can be configured globally or individually per repository
|
||||
* Added CLI command to show detailed information per repo
|
||||
|
||||
### Fixed
|
||||
|
||||
* Binary no longer panics when an env var is missing
|
||||
|
||||
## [0.2.0](https://git.rustybever.be/vieter/vieter/src/tag/0.2.0)
|
||||
|
||||
### Changed
|
||||
|
||||
|
@ -41,13 +58,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
* Packages with unknown fields in .PKGINFO are now allowed
|
||||
* Old packages are now properly removed
|
||||
|
||||
## [0.1.0](https://git.rustybever.be/Chewing_Bever/vieter/src/tag/0.1.0)
|
||||
## [0.1.0](https://git.rustybever.be/vieter/vieter/src/tag/0.1.0)
|
||||
|
||||
### Changed
|
||||
|
||||
* Improved logging
|
||||
|
||||
## [0.1.0-rc.1](https://git.rustybever.be/Chewing_Bever/vieter/src/tag/0.1.0-rc.1)
|
||||
## [0.1.0-rc.1](https://git.rustybever.be/vieter/vieter/src/tag/0.1.0-rc.1)
|
||||
|
||||
### Added
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ RUN if [ -n "${CI_COMMIT_SHA}" ]; then \
|
|||
"https://s3.rustybever.be/vieter/commits/${CI_COMMIT_SHA}/vieter-$(echo "${TARGETPLATFORM}" | sed 's:/:-:g')" && \
|
||||
chmod +x vieter ; \
|
||||
else \
|
||||
LDFLAGS='-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static' make prod && \
|
||||
LDFLAGS='-lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3 -static' make prod && \
|
||||
mv pvieter vieter ; \
|
||||
fi
|
||||
|
||||
|
@ -31,10 +31,8 @@ RUN if [ -n "${CI_COMMIT_SHA}" ]; then \
|
|||
FROM busybox:1.35.0
|
||||
|
||||
ENV PATH=/bin \
|
||||
VIETER_REPOS_DIR=/data/repos \
|
||||
VIETER_PKG_DIR=/data/pkgs \
|
||||
VIETER_DOWNLOAD_DIR=/data/downloads \
|
||||
VIETER_REPOS_FILE=/data/repos.json
|
||||
VIETER_DATA_DIR=/data \
|
||||
VIETER_PKG_DIR=/data/pkgs
|
||||
|
||||
COPY --from=builder /app/dumb-init /app/vieter /bin/
|
||||
|
||||
|
|
49
Makefile
49
Makefile
|
@ -2,11 +2,12 @@
|
|||
SRC_DIR := src
|
||||
SOURCES != find '$(SRC_DIR)' -iname '*.v'
|
||||
|
||||
V_PATH ?= v/v
|
||||
V_PATH ?= v
|
||||
V := $(V_PATH) -showcc -gc boehm
|
||||
|
||||
all: vieter
|
||||
|
||||
|
||||
# =====COMPILATION=====
|
||||
# Regular binary
|
||||
vieter: $(SOURCES)
|
||||
|
@ -23,7 +24,7 @@ dvieter: $(SOURCES)
|
|||
# Run the debug build inside gdb
|
||||
.PHONY: gdb
|
||||
gdb: dvieter
|
||||
gdb --args './dvieter -f vieter.toml server'
|
||||
gdb --args ./dvieter -f vieter.toml server
|
||||
|
||||
# Optimised production build
|
||||
.PHONY: prod
|
||||
|
@ -33,39 +34,69 @@ pvieter: $(SOURCES)
|
|||
|
||||
# Only generate C code
|
||||
.PHONY: c
|
||||
c:
|
||||
c: $(SOURCES)
|
||||
$(V) -o vieter.c $(SRC_DIR)
|
||||
|
||||
|
||||
# =====EXECUTION=====
|
||||
# Run the server in the default 'data' directory
|
||||
.PHONY: run
|
||||
run: vieter
|
||||
./vieter -f vieter.toml server
|
||||
./vieter -f vieter.toml server
|
||||
|
||||
.PHONY: run-prod
|
||||
run-prod: prod
|
||||
./pvieter -f vieter.toml server
|
||||
|
||||
|
||||
# =====DOCS=====
|
||||
.PHONY: docs
|
||||
docs:
|
||||
rm -rf 'docs/public'
|
||||
cd docs && hugo
|
||||
|
||||
.PHONY: api-docs
|
||||
api-docs:
|
||||
rm -rf '$(SRC_DIR)/_docs'
|
||||
cd '$(SRC_DIR)' && v doc -all -f html -m -readme .
|
||||
|
||||
|
||||
# =====OTHER=====
|
||||
.PHONY: lint
|
||||
lint:
|
||||
$(V) fmt -verify $(SRC_DIR)
|
||||
$(V) vet -W $(SRC_DIR)
|
||||
$(V_PATH) missdoc -p $(SRC_DIR)
|
||||
@ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ]
|
||||
|
||||
# Format the V codebase
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
$(V) fmt -w $(SRC_DIR)
|
||||
|
||||
.PHONY: vet
|
||||
vet:
|
||||
$(V) vet -W $(SRC_DIR)
|
||||
.PHONY: test
|
||||
test:
|
||||
$(V) test $(SRC_DIR)
|
||||
|
||||
# Build & patch the V compiler
|
||||
.PHONY: v
|
||||
v: v/v
|
||||
v/v:
|
||||
git clone --single-branch --branch patches https://git.rustybever.be/Chewing_Bever/vieter-v v
|
||||
git clone --single-branch https://git.rustybever.be/Chewing_Bever/v v
|
||||
make -C v
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter'
|
||||
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' 'docs/public'
|
||||
|
||||
|
||||
# =====EXPERIMENTAL=====
|
||||
.PHONY: autofree
|
||||
autofree: afvieter
|
||||
afvieter: $(SOURCES)
|
||||
$(V_PATH) -showcc -autofree -o afvieter $(SRC_DIR)
|
||||
|
||||
.PHONY: skip-unused
|
||||
skip-unused: suvieter
|
||||
suvieter: $(SOURCES)
|
||||
$(V_PATH) -showcc -skip-unused -o suvieter $(SRC_DIR)
|
||||
|
|
18
PKGBUILD
18
PKGBUILD
|
@ -2,33 +2,31 @@
|
|||
|
||||
pkgbase='vieter'
|
||||
pkgname='vieter'
|
||||
pkgver=0.1.0.rc1.r117.gc3ac00f
|
||||
pkgver=0.2.0.r25.g20112b8
|
||||
pkgrel=1
|
||||
depends=('glibc' 'openssl' 'libarchive' 'gc')
|
||||
makedepends=('git' 'gcc')
|
||||
arch=('x86_64' 'aarch64' 'armv7')
|
||||
url='https://git.rustybever.be/Chewing_Bever/vieter'
|
||||
depends=('glibc' 'openssl' 'libarchive' 'gc' 'sqlite')
|
||||
makedepends=('git' 'gcc' 'vieter-v')
|
||||
arch=('x86_64' 'aarch64')
|
||||
url='https://git.rustybever.be/vieter/vieter'
|
||||
license=('AGPL3')
|
||||
source=($pkgname::git+https://git.rustybever.be/Chewing_Bever/vieter#branch=dev)
|
||||
source=($pkgname::git+https://git.rustybever.be/vieter/vieter#branch=dev)
|
||||
md5sums=('SKIP')
|
||||
|
||||
pkgver() {
|
||||
cd "$pkgname"
|
||||
|
||||
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
|
||||
}
|
||||
|
||||
build() {
|
||||
cd "$pkgname"
|
||||
|
||||
# Build the compiler
|
||||
CFLAGS= make v
|
||||
|
||||
make prod
|
||||
}
|
||||
|
||||
package() {
|
||||
pkgdesc="Vieter is a lightweight implementation of an Arch repository server."
|
||||
install -dm755 "$pkgdir/usr/bin"
|
||||
|
||||
install -dm755 "$pkgdir/usr/bin"
|
||||
install -Dm755 "$pkgbase/pvieter" "$pkgdir/usr/bin/vieter"
|
||||
}
|
||||
|
|
27
README.md
27
README.md
|
@ -2,7 +2,9 @@
|
|||
|
||||
## Documentation
|
||||
|
||||
I host documentation for Vieter over at https://rustybever.be/docs/vieter/.
|
||||
I host documentation for Vieter over at https://rustybever.be/docs/vieter/. API
|
||||
documentation for the current codebase can be found at
|
||||
https://rustybever.be/api-docs/vieter/.
|
||||
|
||||
## Overview
|
||||
|
||||
|
@ -20,15 +22,12 @@ a while now. I wanted a fast language that I could code while relaxing, without
|
|||
having to exert too much mental effort & V seemed like the right choice for
|
||||
that.
|
||||
|
||||
### Custom Compiler
|
||||
### Compiler
|
||||
|
||||
Currently, this program only works with a very slightly modified version of the
|
||||
V standard library, and therefore the compiler. The source code for this fork
|
||||
can be found [here](https://git.rustybever.be/Chewing_Bever/vieter-v). You can
|
||||
obtain this modified version of the compiler by running `make v`, which will
|
||||
clone & build the compiler. Afterwards, all make commands that require the V
|
||||
compiler will use this new binary. I try to keep this fork as up to date with
|
||||
upstream as possible.
|
||||
Vieter compiles with the standard Vlang compiler. However, I do maintain a
|
||||
[mirror](https://git.rustybever.be/Chewing_Bever/v). This is to ensure my CI
|
||||
does not break without reason, as I control when & how frequently the mirror is
|
||||
updated to reflect the official repository.
|
||||
|
||||
## Features
|
||||
|
||||
|
@ -44,9 +43,15 @@ upstream as possible.
|
|||
|
||||
In order to build Vieter, you'll need a couple of libraries:
|
||||
|
||||
* An installation of V
|
||||
* gc
|
||||
* libarchive
|
||||
* openssl
|
||||
|
||||
Before building Vieter, you'll have to build the compiler using `make v`.
|
||||
Afterwards, run `make` to build the debug binary.
|
||||
**NOTE**: if you encounter any issues compiling Vieter using the absolute
|
||||
latest version of V, it might be because my mirror is missing a specific commit
|
||||
that causes issues. For this reason, the `make v` command exists which will
|
||||
clone my compiler in the `v` directory & build it. Afterwards, you can use this
|
||||
compiler with make by prepending all make commands with `V_PATH=v/v`. If you do
|
||||
encounter this issue, please let me know so I can update my mirror & the
|
||||
codebase to fix it!
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
.hugo_build.lock
|
||||
/public/
|
|
@ -0,0 +1,17 @@
|
|||
platform: 'linux/amd64'
|
||||
branches: 'main'
|
||||
|
||||
pipeline:
|
||||
release:
|
||||
image: 'klakegg/hugo:alpine'
|
||||
commands:
|
||||
- apk add git
|
||||
- hugo
|
||||
- 'cd public && tar czvf ../public.tar.gz *'
|
||||
|
||||
deploy:
|
||||
image: 'curlimages/curl'
|
||||
secrets:
|
||||
- 'api_key'
|
||||
commands:
|
||||
- 'curl -XPOST --fail -s -H "Authorization: Bearer $API_KEY" -T public.tar.gz https://rustybever.be/api/deploy?dir=docs'
|
|
@ -0,0 +1,9 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) <year> <copyright holders>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,3 @@
|
|||
# docs
|
||||
|
||||
Repository containing docs for various personal projects I've made.
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
title: "{{ replace .Name "-" " " | title }}"
|
||||
date: {{ .Date }}
|
||||
draft: true
|
||||
---
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
# hugo server --minify --themesDir ... --baseURL=http://0.0.0.0:1313/theme/hugo-book/
|
||||
|
||||
baseURL = 'https://rustybever.be/docs/vieter/'
|
||||
title = 'The Rusty Bever - Docs'
|
||||
theme = 'hugo-book'
|
||||
|
||||
# Book configuration
|
||||
disablePathToLower = true
|
||||
enableGitInfo = true
|
||||
|
||||
# Needed for mermaid/katex shortcodes
|
||||
[markup]
|
||||
[markup.goldmark.renderer]
|
||||
unsafe = true
|
||||
|
||||
[markup.tableOfContents]
|
||||
startLevel = 1
|
||||
|
||||
# Multi-lingual mode config
|
||||
# There are different options to translate files
|
||||
# See https://gohugo.io/content-management/multilingual/#translation-by-filename
|
||||
# And https://gohugo.io/content-management/multilingual/#translation-by-content-directory
|
||||
[languages]
|
||||
[languages.en]
|
||||
languageName = 'English'
|
||||
contentDir = 'content'
|
||||
weight = 1
|
||||
|
||||
[menu]
|
||||
# [[menu.before]]
|
||||
[[menu.after]]
|
||||
name = "Source"
|
||||
url = "https://git.rustybever.be/Chewing_Bever/docs"
|
||||
weight = 10
|
||||
|
||||
[[menu.after]]
|
||||
name = "Hugo Theme"
|
||||
url = "https://github.com/alex-shpak/hugo-book"
|
||||
weight = 20
|
||||
|
||||
[params]
|
||||
# (Optional, default light) Sets color theme: light, dark or auto.
|
||||
# Theme 'auto' switches between dark and light modes based on browser/os preferences
|
||||
BookTheme = 'auto'
|
||||
|
||||
# (Optional, default true) Controls table of contents visibility on right side of pages.
|
||||
# Start and end levels can be controlled with markup.tableOfContents setting.
|
||||
# You can also specify this parameter per page in front matter.
|
||||
BookToC = true
|
||||
|
||||
# (Optional, default none) Set the path to a logo for the book. If the logo is
|
||||
# /static/logo.png then the path would be logo.png
|
||||
# BookLogo = 'logo.png'
|
||||
|
||||
# (Optional, default none) Set leaf bundle to render as side menu
|
||||
# When not specified file structure and weights will be used
|
||||
# BookMenuBundle = '/menu'
|
||||
|
||||
# (Optional, default docs) Specify root page to render child pages as menu.
|
||||
# Page is resoled by .GetPage function: https://gohugo.io/functions/getpage/
|
||||
# For backward compatibility you can set '*' to render all sections to menu. Acts same as '/'
|
||||
BookSection = '/'
|
||||
|
||||
# Set source repository location.
|
||||
# Used for 'Last Modified' and 'Edit this page' links.
|
||||
BookRepo = 'https://git.rustybever.be/Chewing_Bever/docs'
|
||||
|
||||
# (Optional, default 'commit') Specifies commit portion of the link to the page's last modified
|
||||
# commit hash for 'doc' page type.
|
||||
# Requires 'BookRepo' param.
|
||||
# Value used to construct a URL consisting of BookRepo/BookCommitPath/<commit-hash>
|
||||
# Github uses 'commit', Bitbucket uses 'commits'
|
||||
# BookCommitPath = 'commit'
|
||||
|
||||
# Enable "Edit this page" links for 'doc' page type.
|
||||
# Disabled by default. Uncomment to enable. Requires 'BookRepo' param.
|
||||
# Edit path must point to root directory of repo.
|
||||
# BookEditPath = 'edit/main/exampleSite'
|
||||
|
||||
# Configure the date format used on the pages
|
||||
# - In git information
|
||||
# - In blog posts
|
||||
BookDateFormat = 'January 2, 2006'
|
||||
|
||||
# (Optional, default true) Enables search function with flexsearch,
|
||||
# Index is built on fly, therefore it might slowdown your website.
|
||||
# Configuration for indexing can be adjusted in i18n folder per language.
|
||||
BookSearch = true
|
||||
|
||||
# (Optional, default true) Enables comments template on pages
|
||||
# By default partals/docs/comments.html includes Disqus template
|
||||
# See https://gohugo.io/content-management/comments/#configure-disqus
|
||||
# Can be overwritten by same param in page frontmatter
|
||||
BookComments = false
|
||||
|
||||
# /!\ This is an experimental feature, might be removed or changed at any time
|
||||
# (Optional, experimental, default false) Enables portable links and link checks in markdown pages.
|
||||
# Portable links meant to work with text editors and let you write markdown without {{< relref >}} shortcode
|
||||
# Theme will print warning if page referenced in markdown does not exists.
|
||||
BookPortableLinks = true
|
||||
|
||||
# /!\ This is an experimental feature, might be removed or changed at any time
|
||||
# (Optional, experimental, default false) Enables service worker that caches visited pages and resources for offline use.
|
||||
BookServiceWorker = true
|
||||
|
||||
# /!\ This is an experimental feature, might be removed or changed at any time
|
||||
# (Optional, experimental, default false) Enables a drop-down menu for translations only if a translation is present.
|
||||
BookTranslatedOnly = false
|
|
@ -0,0 +1,27 @@
|
|||
# Vieter CLI
|
||||
|
||||
I provide a simple CLI tool that currently only allows changing the Git
|
||||
repository API. Its usage is quite simple.
|
||||
|
||||
First, you need to create a file in your home directory called `.vieterrc` with
|
||||
the following content:
|
||||
|
||||
```toml
|
||||
address = "https://example.com"
|
||||
api_key = "your-api-key"
|
||||
```
|
||||
|
||||
You can also use a different file or use environment variables, as described in
|
||||
[Configuration](/configuration).
|
||||
|
||||
Now you're ready to use the CLI tool.
|
||||
|
||||
## Usage
|
||||
|
||||
* `vieter repos list` returns all repositories currently stored in the API.
|
||||
* `vieter repos add url branch repo arch...` adds the repository with the given
|
||||
URL, branch, repo & arch to the API.
|
||||
* `vieter repos remove id` removes the repository with the given ID prefix.
|
||||
|
||||
You can always check `vieter -help` or `vieter repos -help` for more
|
||||
information about the commands.
|
|
@ -0,0 +1,62 @@
|
|||
# Vieter
|
||||
|
||||
{{< hint warning >}}
|
||||
**Important**
|
||||
Because this project is still in heavy development, this documentation tries to
|
||||
follow the development branch & not the latest release. This means that the
|
||||
documentation might not be relevant anymore for the latest release.
|
||||
{{< /hint >}}
|
||||
|
||||
## Overview
|
||||
|
||||
Vieter has a few main features:
|
||||
|
||||
* It's a simple & lightweight implementation of an Arch repository server
|
||||
* It allows for uploading of built package archives
|
||||
* It supports a basic build system to periodically re-build packages & upload
|
||||
them to the server
|
||||
|
||||
{{< hint info >}}
|
||||
**Note**
|
||||
While I mention Vieter being an "Arch" repository server, it works with any
|
||||
distribution that uses Pacman as the package manager. I do recommend using a
|
||||
base docker image for your distribution if you wish to use the build system as
|
||||
well.
|
||||
{{< /hint >}}
|
||||
|
||||
### Why?
|
||||
|
||||
Vieter is my personal solution for a problem I've been facing for months:
|
||||
extremely long AUR package build times. I run EndeavourOS on both my laptops,
|
||||
one of which being a rather old MacBook Air. I really like being a beta-tester
|
||||
for projects & run development builds for multiple packages (nheko,
|
||||
newsflash...). The issue with this is that I have to regularly re-build these
|
||||
packages in order to stay up to date with development & these builds can take a
|
||||
really long time on the old MacBook. This project is a solution to that
|
||||
problem: instead of building the packages locally, I can build them
|
||||
automatically in the cloud & just download them whenever I update my system!
|
||||
Thanks to this solution, I'm able to shave 10-15 minutes off my update times,
|
||||
just from not having to compile everything every time there's an update.
|
||||
|
||||
Besides this, it's also just really useful to have a repository server that you
|
||||
control & can upload your own packages to. For example, I package my st
|
||||
terminal using a CI pipeline & upload it to my repository!
|
||||
|
||||
### Why V?
|
||||
|
||||
I had been interested in learning V for a couple of months ever since I
|
||||
stumbled upon it by accident. It looked like a promising language & turned out
|
||||
to be very fun to use! It's fast & easy to learn, & it's a nice contrast with
|
||||
my usual Rust-based projects, which tend to get quite complex.
|
||||
|
||||
I recommend checking out their [homepage](https://vlang.io/)!
|
||||
|
||||
### What's with the name?
|
||||
|
||||
Before deciding to write this project in V, I wrote a prototype in Python,
|
||||
called [Pieter](https://git.rustybever.be/Chewing_Bever/pieter). The name
|
||||
Pieter came from Pieter Post, the Dutch name for [Postname
|
||||
Pat](https://en.wikipedia.org/wiki/Postman_Pat). The idea was that the server
|
||||
"delivered packages", & a good friend of mine suggested the name. When I
|
||||
decided to switch over to Vieter, I changed the P (for Python) to a V, it
|
||||
seemed fitting.
|
|
@ -0,0 +1,84 @@
|
|||
# API Reference
|
||||
|
||||
All routes that return JSON use the following shape:
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "some message",
|
||||
"data": {}
|
||||
}
|
||||
```
|
||||
|
||||
Here, data can be any JSON object, so it's not guaranteed to be a struct.
|
||||
|
||||
### `GET /<repo>/<arch>/<filename>`
|
||||
|
||||
This route serves the contents of a specific architecture' repo.
|
||||
|
||||
If `<filename>` is one of `<repo>.db`, `<repo>.files`, `<repo>.db.tar.gz` or
|
||||
`<repo>.files.tar.gz`, it will serve the respective archive file from the
|
||||
repository.
|
||||
|
||||
If `<filename>` contains `.pkg`, it assumes the request to be for a package
|
||||
archive & will serve that file from the specific arch-repo's package directory.
|
||||
|
||||
Finally, if none of the above are true, Vieter assumes it to be request for a
|
||||
package version's desc file & tries to serve this instead. This functionality
|
||||
is very useful for the build system for checking whether a package needs to be
|
||||
rebuilt or not.
|
||||
|
||||
### `HEAD /<repo>/<arch>/<filename>`
|
||||
|
||||
Behaves the same as the above route, but instead of returning actual data, it
|
||||
returns either 200 or 404, depending on whether the file exists. This route is
|
||||
used by the build system to determine whether a package needs to be rebuilt.
|
||||
|
||||
### `POST /<repo>/publish`
|
||||
|
||||
This route is used to upload packages to a repository. It requires the API
|
||||
key to be provided using the `X-Api-Key` HTTP header. Vieter will parse the
|
||||
package's contents & update the repository files accordingely. I find the
|
||||
easiest way to use this route is using cURL:
|
||||
|
||||
```sh
|
||||
curl -XPOST -T "path-to-package.pkg.tar.zst" -H "X-API-KEY: your-api-key" https://example.com/somerepo/publish
|
||||
```
|
||||
|
||||
Packages are automatically added to the correct arch-repo. If a package type is
|
||||
`any`, the package is added to the configured `default_arch`, as well as all
|
||||
already present arch-repos. To prevent unnecessary duplication of package
|
||||
files, these packages are shared between arch-repos' package directories using
|
||||
hard links.
|
||||
|
||||
{{< hint info >}}
|
||||
**Note**
|
||||
Vieter only supports uploading archives compressed using either gzip, zstd or
|
||||
xz at the moment.
|
||||
{{< /hint >}}
|
||||
|
||||
## API
|
||||
|
||||
All API routes require the API key to provided using the `X-Api-Key` header.
|
||||
Otherwise, they'll return a status code 401.
|
||||
|
||||
### `GET /api/repos`
|
||||
|
||||
Returns the current list of Git repositories.
|
||||
|
||||
### `GET /api/repos/<id>`
|
||||
|
||||
Get the information for the Git repo with the given ID.
|
||||
|
||||
### `POST /api/repos?<url>&<branch>&<arch>&<repo>`
|
||||
|
||||
Adds a new Git repository with the provided URL, Git branch & comma-separated
|
||||
list of architectures.
|
||||
|
||||
### `DELETE /api/repos/<id>`
|
||||
|
||||
Deletes the Git repository with the provided ID.
|
||||
|
||||
### `PATCH /api/repos/<id>?<url>&<branch>&<arch>&<repo>`
|
||||
|
||||
Updates the provided parameters for the repo with the given ID. All arguments
|
||||
are optional.
|
|
@ -0,0 +1,56 @@
|
|||
# Builder
|
||||
|
||||
Vieter supports a basic build system that allows you to build the packages
|
||||
defined using the Git repositories API by running `vieter build`. For
|
||||
configuration, see [here](/configuration#builder).
|
||||
|
||||
## How it works
|
||||
|
||||
The build system works in two stages. First it pulls down the
|
||||
`archlinux:latest` image from Docker Hub, runs `pacman -Syu` & configures a
|
||||
non-root build user. It then creates a new Docker image from this container.
|
||||
This is to prevent each build having to fully update the container's
|
||||
repositories. After the image has been created, each repository returned by
|
||||
`/api/repos` is built sequentially by starting up a new container with the
|
||||
previously created image as a base. Each container goes through the following steps:
|
||||
|
||||
1. The repository is cloned
|
||||
2. `makepkg --nobuild --nodeps` is ran to update the `pkgver` variable inside
|
||||
the `PKGBUILD` file
|
||||
3. A HEAD request is sent to the Vieter server to check whether the specific
|
||||
version of the package is already present. If it is, the container exits.
|
||||
4. `makepkg` is ran with `MAKEFLAGS="-j\$(nproc)`
|
||||
5. Each produced package archive is uploaded to the Vieter instance's
|
||||
repository, as defined in the API for that specific Git repo.
|
||||
|
||||
## Cron image
|
||||
|
||||
The Vieter Docker image contains crond & a cron config that runs `vieter build`
|
||||
every night at 3AM. This value is currently hardcoded, but I wish to change
|
||||
that down the line (work is in progress). There's also some other caveats you
|
||||
should be aware of, namely that the image should be run as root & that the
|
||||
healthcheck will always fail, so you might have to disable it. This boils down
|
||||
to the following docker-compose file:
|
||||
|
||||
```yaml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
cron:
|
||||
image: 'chewingbever/vieter:dev'
|
||||
command: crond -f
|
||||
user: root
|
||||
|
||||
healthcheck:
|
||||
disable: true
|
||||
|
||||
environment:
|
||||
- 'VIETER_API_KEY=some-key'
|
||||
- 'VIETER_ADDRESS=https://example.com'
|
||||
volumes:
|
||||
- '/var/run/docker.sock:/var/run/docker.sock'
|
||||
```
|
||||
|
||||
Important to note is that the container also requires the host's Docker socket
|
||||
to be mounted as this is how it spawns the necessary containers, as well as a
|
||||
change to the container's command.
|
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
weight: 20
|
||||
---
|
||||
# Configuration
|
||||
|
||||
All vieter operations by default try to read in the TOML file `~/.vieterrc` for
|
||||
configuration. The location of this file can be changed by using the `-f` flag.
|
||||
|
||||
If the above file doesn't exist or you wish to override some of its settings,
|
||||
configuration is also possible using environment variables. Every variable in
|
||||
the config file has a respective environment variable of the following form:
|
||||
say the variable is called `api_key`, then the respective environment variable
|
||||
would be `VIETER_API_KEY`. In essence, it's the variable in uppercase prepended
|
||||
with `VIETER_`.
|
||||
|
||||
If a variable is both present in the config file & as an environment variable,
|
||||
the value in the environment variable is used.
|
||||
|
||||
{{< hint info >}}
|
||||
**Note**
|
||||
All environment variables can also be provided from a file by appending them
|
||||
with `_FILE`. This for example allows you to provide the API key from a docker
|
||||
secrets file.
|
||||
{{< /hint >}}
|
||||
|
||||
## Modes
|
||||
|
||||
The vieter binary can run in several "modes", indicated by the first argument
|
||||
passed to them. Each mode requires a different configuration.
|
||||
|
||||
### Server
|
||||
|
||||
* `log_level`: defines how much logs to show. Valid values are one of `FATAL`,
|
||||
`ERROR`, `WARN`, `INFO` or `DEBUG`. Defaults to `WARN`
|
||||
* `log_file`: log file to write logs to. Defaults to `vieter.log` in the
|
||||
current directory.
|
||||
* `pkg_dir`: where Vieter should store the actual package archives.
|
||||
* `data_dir`: where Vieter stores the repositories, log file & database.
|
||||
* `api_key`: the API key to use when authenticating requests.
|
||||
* `default_arch`: architecture to always add packages of arch `any` to.
|
||||
|
||||
### Builder
|
||||
|
||||
* `api_key`: the API key to use when authenticating requests.
|
||||
* `address`: Base your URL of your Vieter instance, e.g. https://example.com
|
||||
* `base_image`: image to use when building a package. It should be an Archlinux
|
||||
image. The default if not configured is `archlinux:base-devel`, but this
|
||||
image only supports arm64. If you require aarch64 support as well, consider
|
||||
using
|
||||
[`menci/archlinuxarm:base-devel`](https://hub.docker.com/r/menci/archlinuxarm)
|
||||
([GH](https://github.com/Menci/docker-archlinuxarm))
|
||||
|
||||
### Repos
|
||||
|
||||
* `api_key`: the API key to use when authenticating requests.
|
||||
* `address`: Base your URL of your Vieter instance, e.g. https://example.com
|
||||
|
||||
### Cron
|
||||
|
||||
* `log_level`: defines how much logs to show. Valid values are one of `FATAL`,
|
||||
`ERROR`, `WARN`, `INFO` or `DEBUG`. Defaults to `WARN`
|
||||
* `api_key`: the API key to use when authenticating requests.
|
||||
* `address`: Base your URL of your Vieter instance, e.g. https://example.com.
|
||||
This *must* be the publicly facing URL of your Vieter instance.
|
||||
* `data_dir`: where Vieter stores the log file.
|
||||
* `base_image`: Docker image from which to create the builder images.
|
||||
* `max_concurrent_builds`: amount of builds to run at once.
|
||||
* `api_update_frequency`: how frequenty to check for changes in the repo list.
|
||||
* `image_rebuild+frequency`: how frequently to rebuild the builder image
|
||||
* `global_schedule`: cron schedule to use for any repo without an individual
|
||||
schedule
|
|
@ -0,0 +1,78 @@
|
|||
---
|
||||
weight: 10
|
||||
---
|
||||
# Installation
|
||||
|
||||
## Docker
|
||||
|
||||
Docker is the recommended way to install vieter. The images can be pulled from
|
||||
[`chewingbever/vieter`](https://hub.docker.com/r/chewingbever/vieter). You can
|
||||
either pull a release tag (e.g. `chewingbever/vieter:0.1.0-rc1`), or pull the
|
||||
`chewingbever/vieter:dev` tag. The latter is updated every time a new commit is
|
||||
pushed to the development branch. This branch will be the most up to date, but
|
||||
does not give any guarantees about stability, so beware!
|
||||
|
||||
The simplest way to run the Docker image is using a plain Docker command:
|
||||
|
||||
```sh
|
||||
docker run \
|
||||
--rm \
|
||||
-d \
|
||||
-v /path/to/data:/data \
|
||||
-e VIETER_API_KEY=changeme \
|
||||
-e VIETER_DEFAULT_ARCH=x86_64 \
|
||||
-p 8000:8000 \
|
||||
chewingbever/vieter:dev
|
||||
```
|
||||
|
||||
Here, you should change `/path/to/data` to the path on your host where you want
|
||||
vieter to store its files.
|
||||
|
||||
The default configuration will store everything inside the `/data` directory.
|
||||
|
||||
Inside the container, the Vieter server runs on port 8000. This port should be
|
||||
exposed to the public accordingely.
|
||||
|
||||
For an overview of how to configure vieter & which environment variables can be
|
||||
used, see the [Configuration](/configuration) page.
|
||||
|
||||
## Binary
|
||||
|
||||
On the [releases](https://git.rustybever.be/Chewing_Bever/vieter/releases)
|
||||
page, you can find statically compiled binaries for all released versions. You
|
||||
can download the binary for your host's architecture & run it that way.
|
||||
|
||||
For more information about configuring the binary, check out the
|
||||
[Configuration](/configuration) page.
|
||||
|
||||
## Building from source
|
||||
|
||||
Because the project is still in heavy development, it might be useful to build
|
||||
from source instead. Luckily, this process is very easy. You'll need make,
|
||||
libarchive & openssl; all of which should be present on an every-day Arch
|
||||
install. Then, after cloning the repository, you can use the following commands:
|
||||
|
||||
```sh
|
||||
# Builds the compiler; should usually only be ran once. Vieter compiles using
|
||||
# the default compiler, but I maintain my own mirror to ensure nothing breaks
|
||||
# without me knowing.
|
||||
make v
|
||||
|
||||
# Build vieter
|
||||
# Alternatively, use `make prod` to build the production build.
|
||||
make
|
||||
```
|
||||
{{< hint info >}}
|
||||
**Note**
|
||||
My version of the V compiler is also available on my Vieter instance,
|
||||
https://arch.r8r.be. It's in the `vieter` repository, with the package being
|
||||
named `vieter-v`. The compiler is available for both x86_64 & aarch64.
|
||||
{{< /hint >}}
|
||||
|
||||
## My Vieter instance
|
||||
|
||||
Besides uploading development Docker images, my CI also publishes x86_64 &
|
||||
aarch64 packages to my personal Vieter instance, https://arch.r8r.be. If you'd
|
||||
like, you can use this repository as well by adding it to your Pacman
|
||||
configuration as described [here](/usage#configuring-pacman). Both the
|
||||
repository & the package are called `vieter`.
|
|
@ -0,0 +1,54 @@
|
|||
---
|
||||
weight: 30
|
||||
---
|
||||
# Usage
|
||||
|
||||
## Starting the server
|
||||
|
||||
To start a server, either install it using Docker (see
|
||||
[Installation](/installation)) or run it locally by executing `vieter
|
||||
server`. See [Configuration](/configuration) for more information about
|
||||
configuring the binary.
|
||||
|
||||
## Multiple repositories
|
||||
|
||||
Vieter works with multiple repositories. This means that a single Vieter server
|
||||
can serve multiple repositories in Pacman. It also automatically divides files
|
||||
with specific architectures among arch-repos. Arch-repos are the actual
|
||||
repositories you add to your `/etc/pacman.conf` file. See [Configuring
|
||||
Pacman](/usage#configuring-pacman) below for more info.
|
||||
|
||||
## Adding packages
|
||||
|
||||
Using Vieter is currently very simple. If you wish to add a package to Vieter,
|
||||
build it using makepkg & POST that file to the `/<repo>/publish` endpoint of
|
||||
your server. This will add the package to the repository. Authentification
|
||||
requires you to add the API key as the `X-Api-Key` header.
|
||||
|
||||
All of this can be combined into a simple cURL call:
|
||||
|
||||
```
|
||||
curl -XPOST -H "X-API-KEY: your-key" -T some-package.pkg.tar.zst https://example.com/somerepo/publish
|
||||
```
|
||||
|
||||
`somerepo` is automatically created if it doesn't exist yet.
|
||||
|
||||
## Configuring Pacman
|
||||
|
||||
Configuring Pacman to use a Vieter instance is very simple. In your
|
||||
`/etc/pacman.conf` file, add the following lines:
|
||||
|
||||
```
|
||||
[vieter]
|
||||
Server = https://example.com/$repo/$arch
|
||||
SigLevel = Optional
|
||||
```
|
||||
|
||||
Here, you see two important placeholder variables. `$repo` is replaced by the
|
||||
name within the square brackets, which in this case would be `vieter`. `$arch`
|
||||
is replaced by the output of `uname -m`. Because Vieter supports multiple
|
||||
repositories & architectures per repository, using this notation makes sure you
|
||||
always use the correct endpoint for fetching files.
|
||||
|
||||
I recommend placing this below all other repository entries, as the order
|
||||
decides which repository should be used if there's ever a naming conflict.
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1 @@
|
|||
{"Target":"book.min.97cfda4f5e3c9fa49a2bf8d401f4ddc0eec576c99cdcf6afbec19173200c37db.css","MediaType":"text/css","Data":{"Integrity":"sha256-l8/aT148n6SaK/jUAfTdwO7Fdsmc3PavvsGRcyAMN9s="}}
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1 @@
|
|||
{"Target":"book.min.97cfda4f5e3c9fa49a2bf8d401f4ddc0eec576c99cdcf6afbec19173200c37db.css","MediaType":"text/css","Data":{"Integrity":"sha256-l8/aT148n6SaK/jUAfTdwO7Fdsmc3PavvsGRcyAMN9s="}}
|
|
@ -0,0 +1 @@
|
|||
Subproject commit 4ef38f3bbf5dae9a11a711d2ed1ced9294c6af5f
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
|
||||
}
|
|
@ -5,12 +5,18 @@ import encoding.base64
|
|||
import time
|
||||
import git
|
||||
import os
|
||||
import db
|
||||
|
||||
const container_build_dir = '/build'
|
||||
|
||||
const build_image_repo = 'vieter-build'
|
||||
|
||||
fn create_build_image(base_image string) ?string {
|
||||
// create_build_image creates a builder image given some base image which can
|
||||
// then be used to build & package Arch images. It mostly just updates the
|
||||
// system, install some necessary packages & creates a non-root user to run
|
||||
// makepkg with. The base image should be some Linux distribution that uses
|
||||
// Pacman as its package manager.
|
||||
pub fn create_build_image(base_image string) ?string {
|
||||
commands := [
|
||||
// Update repos & install required packages
|
||||
'pacman -Syu --needed --noconfirm base-devel git'
|
||||
|
@ -53,12 +59,13 @@ fn create_build_image(base_image string) ?string {
|
|||
break
|
||||
}
|
||||
|
||||
// Wait for 5 seconds
|
||||
time.sleep(5000000000)
|
||||
time.sleep(1 * time.second)
|
||||
}
|
||||
|
||||
// Finally, we create the image from the container
|
||||
// As the tag, we use the epoch value
|
||||
// TODO also add the base image's name into the image name to prevent
|
||||
// conflicts.
|
||||
tag := time.sys_mono_now().str()
|
||||
image := docker.create_image_from_container(id, 'vieter-build', tag) ?
|
||||
docker.remove_container(id) ?
|
||||
|
@ -66,15 +73,64 @@ fn create_build_image(base_image string) ?string {
|
|||
return image.id
|
||||
}
|
||||
|
||||
// build_repo builds, packages & publishes a given Arch package based on the
|
||||
// provided GitRepo. The base image ID should be of an image previously created
|
||||
// by create_build_image.
|
||||
pub fn build_repo(address string, api_key string, base_image_id string, repo &db.GitRepo) ? {
|
||||
build_arch := os.uname().machine
|
||||
|
||||
// TODO what to do with PKGBUILDs that build multiple packages?
|
||||
commands := [
|
||||
'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
|
||||
'cd repo',
|
||||
'makepkg --nobuild --nodeps',
|
||||
'source PKGBUILD',
|
||||
// The build container checks whether the package is already
|
||||
// present on the server
|
||||
'curl --head --fail $address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
|
||||
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $address/$repo.repo/publish; done',
|
||||
]
|
||||
|
||||
// We convert the list of commands into a base64 string, which then gets
|
||||
// passed to the container as an env var
|
||||
cmds_str := base64.encode_str(commands.join('\n'))
|
||||
|
||||
c := docker.NewContainer{
|
||||
image: '$base_image_id'
|
||||
env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$api_key']
|
||||
entrypoint: ['/bin/sh', '-c']
|
||||
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e']
|
||||
work_dir: '/build'
|
||||
user: 'builder:builder'
|
||||
}
|
||||
|
||||
id := docker.create_container(c) ?
|
||||
docker.start_container(id) ?
|
||||
|
||||
// This loop waits until the container has stopped, so we can remove it after
|
||||
for {
|
||||
data := docker.inspect_container(id) ?
|
||||
|
||||
if !data.state.running {
|
||||
break
|
||||
}
|
||||
|
||||
time.sleep(1 * time.second)
|
||||
}
|
||||
|
||||
docker.remove_container(id) ?
|
||||
}
|
||||
|
||||
// build builds every Git repo in the server's list.
|
||||
fn build(conf Config) ? {
|
||||
build_arch := os.uname().machine
|
||||
|
||||
// We get the repos map from the Vieter instance
|
||||
repos_map := git.get_repos(conf.address, conf.api_key) ?
|
||||
repos := git.get_repos(conf.address, conf.api_key) ?
|
||||
|
||||
// We filter out any repos that aren't allowed to be built on this
|
||||
// architecture
|
||||
filtered_repos := repos_map.keys().map(repos_map[it]).filter(it.arch.contains(build_arch))
|
||||
filtered_repos := repos.filter(it.arch.map(it.value).contains(build_arch))
|
||||
|
||||
// No point in doing work if there's no repos present
|
||||
if filtered_repos.len == 0 {
|
||||
|
@ -85,47 +141,7 @@ fn build(conf Config) ? {
|
|||
image_id := create_build_image(conf.base_image) ?
|
||||
|
||||
for repo in filtered_repos {
|
||||
// TODO what to do with PKGBUILDs that build multiple packages?
|
||||
commands := [
|
||||
'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
|
||||
'cd repo',
|
||||
'makepkg --nobuild --nodeps',
|
||||
'source PKGBUILD',
|
||||
// The build container checks whether the package is already
|
||||
// present on the server
|
||||
'curl --head --fail $conf.address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
|
||||
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $conf.address/$repo.repo/publish; done',
|
||||
]
|
||||
|
||||
// We convert the list of commands into a base64 string, which then gets
|
||||
// passed to the container as an env var
|
||||
cmds_str := base64.encode_str(commands.join('\n'))
|
||||
|
||||
c := docker.NewContainer{
|
||||
image: '$image_id'
|
||||
env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$conf.api_key']
|
||||
entrypoint: ['/bin/sh', '-c']
|
||||
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e']
|
||||
work_dir: '/build'
|
||||
user: 'builder:builder'
|
||||
}
|
||||
|
||||
id := docker.create_container(c) ?
|
||||
docker.start_container(id) ?
|
||||
|
||||
// This loop waits until the container has stopped, so we can remove it after
|
||||
for {
|
||||
data := docker.inspect_container(id) ?
|
||||
|
||||
if !data.state.running {
|
||||
break
|
||||
}
|
||||
|
||||
// Wait for 5 seconds
|
||||
time.sleep(5000000000)
|
||||
}
|
||||
|
||||
docker.remove_container(id) ?
|
||||
build_repo(conf.address, conf.api_key, image_id, repo) ?
|
||||
}
|
||||
|
||||
// Finally, we remove the builder image
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
module cron
|
||||
|
||||
import cli
|
||||
import env
|
||||
|
||||
struct Config {
|
||||
pub:
|
||||
log_level string = 'WARN'
|
||||
api_key string
|
||||
address string
|
||||
data_dir string
|
||||
base_image string = 'archlinux:base-devel'
|
||||
max_concurrent_builds int = 1
|
||||
api_update_frequency int = 15
|
||||
image_rebuild_frequency int = 1440
|
||||
// Replicates the behavior of the original cron system
|
||||
global_schedule string = '0 3'
|
||||
}
|
||||
|
||||
// cmd returns the cli module that handles the cron daemon.
|
||||
pub fn cmd() cli.Command {
|
||||
return cli.Command{
|
||||
name: 'cron'
|
||||
description: 'Start the cron service that periodically runs builds.'
|
||||
execute: fn (cmd cli.Command) ? {
|
||||
config_file := cmd.flags.get_string('config-file') ?
|
||||
conf := env.load<Config>(config_file) ?
|
||||
|
||||
cron(conf) ?
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
module cron
|
||||
|
||||
import log
|
||||
import cron.daemon
|
||||
import cron.expression
|
||||
import os
|
||||
|
||||
const log_file_name = 'vieter.cron.log'
|
||||
|
||||
// cron starts a cron daemon & starts periodically scheduling builds.
|
||||
pub fn cron(conf Config) ? {
|
||||
// Configure logger
|
||||
log_level := log.level_from_tag(conf.log_level) or {
|
||||
return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
|
||||
}
|
||||
|
||||
mut logger := log.Log{
|
||||
level: log_level
|
||||
}
|
||||
|
||||
log_file := os.join_path_single(conf.data_dir, cron.log_file_name)
|
||||
logger.set_full_logpath(log_file)
|
||||
logger.log_to_console_too()
|
||||
|
||||
ce := expression.parse_expression(conf.global_schedule) or {
|
||||
return error('Error while parsing global cron expression: $err.msg()')
|
||||
}
|
||||
|
||||
mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
|
||||
conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency) ?
|
||||
|
||||
d.run()
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
module daemon
|
||||
|
||||
import time
|
||||
import sync.stdatomic
|
||||
import build
|
||||
|
||||
const (
|
||||
build_empty = 0
|
||||
build_running = 1
|
||||
build_done = 2
|
||||
)
|
||||
|
||||
// clean_finished_builds removes finished builds from the build slots & returns
|
||||
// them.
|
||||
fn (mut d Daemon) clean_finished_builds() []ScheduledBuild {
|
||||
mut out := []ScheduledBuild{}
|
||||
|
||||
for i in 0 .. d.atomics.len {
|
||||
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
|
||||
stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
|
||||
out << d.builds[i]
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// update_builds starts as many builds as possible.
|
||||
fn (mut d Daemon) start_new_builds() {
|
||||
now := time.now()
|
||||
|
||||
for d.queue.len() > 0 {
|
||||
elem := d.queue.peek() or {
|
||||
d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if elem.timestamp < now {
|
||||
sb := d.queue.pop() or {
|
||||
d.lerror("queue.pop() unexpectedly returned an error. This shouldn't happen.")
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// If this build couldn't be scheduled, no more will be possible.
|
||||
if !d.start_build(sb) {
|
||||
d.queue.insert(sb)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// start_build starts a build for the given ScheduledBuild object.
|
||||
fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
|
||||
for i in 0 .. d.atomics.len {
|
||||
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty {
|
||||
stdatomic.store_u64(&d.atomics[i], daemon.build_running)
|
||||
d.builds[i] = sb
|
||||
|
||||
go d.run_build(i, sb)
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// run_build actually starts the build process for a given repo.
|
||||
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
|
||||
d.linfo('started build: $sb.repo.url $sb.repo.branch')
|
||||
|
||||
// 0 means success, 1 means failure
|
||||
mut status := 0
|
||||
|
||||
build.build_repo(d.address, d.api_key, d.builder_images.last(), &sb.repo) or {
|
||||
d.ldebug('build_repo error: $err.msg()')
|
||||
status = 1
|
||||
}
|
||||
|
||||
if status == 0 {
|
||||
d.linfo('finished build: $sb.repo.url $sb.repo.branch')
|
||||
} else {
|
||||
d.linfo('failed build: $sb.repo.url $sb.repo.branch')
|
||||
}
|
||||
|
||||
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
|
||||
}
|
||||
|
||||
// current_build_count returns how many builds are currently running.
|
||||
fn (mut d Daemon) current_build_count() int {
|
||||
mut res := 0
|
||||
|
||||
for i in 0 .. d.atomics.len {
|
||||
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_running {
|
||||
res += 1
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
|
@ -0,0 +1,268 @@
|
|||
module daemon
|
||||
|
||||
import git
|
||||
import time
|
||||
import log
|
||||
import datatypes { MinHeap }
|
||||
import cron.expression { CronExpression, parse_expression }
|
||||
import math
|
||||
import build
|
||||
import docker
|
||||
import db
|
||||
import os
|
||||
|
||||
const (
|
||||
// How many seconds to wait before retrying to update API if failed
|
||||
api_update_retry_timeout = 5
|
||||
// How many seconds to wait before retrying to rebuild image if failed
|
||||
rebuild_base_image_retry_timout = 30
|
||||
)
|
||||
|
||||
struct ScheduledBuild {
|
||||
pub:
|
||||
repo db.GitRepo
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// Overloaded operator for comparing ScheduledBuild objects
|
||||
fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
|
||||
return r1.timestamp < r2.timestamp
|
||||
}
|
||||
|
||||
pub struct Daemon {
|
||||
mut:
|
||||
address string
|
||||
api_key string
|
||||
base_image string
|
||||
builder_images []string
|
||||
global_schedule CronExpression
|
||||
api_update_frequency int
|
||||
image_rebuild_frequency int
|
||||
// Repos currently loaded from API.
|
||||
repos []db.GitRepo
|
||||
// At what point to update the list of repositories.
|
||||
api_update_timestamp time.Time
|
||||
image_build_timestamp time.Time
|
||||
queue MinHeap<ScheduledBuild>
|
||||
// Which builds are currently running
|
||||
builds []ScheduledBuild
|
||||
// Atomic variables used to detect when a build has finished; length is the
|
||||
// same as builds
|
||||
atomics []u64
|
||||
logger shared log.Log
|
||||
}
|
||||
|
||||
// init_daemon initializes a new Daemon object. It renews the repositories &
|
||||
// populates the build queue for the first time.
|
||||
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon {
|
||||
mut d := Daemon{
|
||||
address: address
|
||||
api_key: api_key
|
||||
base_image: base_image
|
||||
global_schedule: global_schedule
|
||||
api_update_frequency: api_update_frequency
|
||||
image_rebuild_frequency: image_rebuild_frequency
|
||||
atomics: []u64{len: max_concurrent_builds}
|
||||
builds: []ScheduledBuild{len: max_concurrent_builds}
|
||||
logger: logger
|
||||
}
|
||||
|
||||
// Initialize the repos & queue
|
||||
d.renew_repos()
|
||||
d.renew_queue()
|
||||
if !d.rebuild_base_image() {
|
||||
return error('The base image failed to build. The Vieter cron daemon cannot run without an initial builder image.')
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// run starts the actual daemon process. It runs builds when possible &
|
||||
// periodically refreshes the list of repositories to ensure we stay in sync.
|
||||
pub fn (mut d Daemon) run() {
|
||||
for {
|
||||
finished_builds := d.clean_finished_builds()
|
||||
|
||||
// Update the API's contents if needed & renew the queue
|
||||
if time.now() >= d.api_update_timestamp {
|
||||
d.renew_repos()
|
||||
d.renew_queue()
|
||||
}
|
||||
// The finished builds should only be rescheduled if the API contents
|
||||
// haven't been renewed.
|
||||
else {
|
||||
for sb in finished_builds {
|
||||
d.schedule_build(sb.repo)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO remove old builder images.
|
||||
// This issue is less trivial than it sounds, because a build could
|
||||
// still be running when the image has to be rebuilt. That would
|
||||
// prevent the image from being removed. Therefore, we will need to
|
||||
// keep track of a list or something & remove an image once we have
|
||||
// made sure it isn't being used anymore.
|
||||
if time.now() >= d.image_build_timestamp {
|
||||
d.rebuild_base_image()
|
||||
// In theory, executing this function here allows an old builder
|
||||
// image to exist for at most image_rebuild_frequency minutes.
|
||||
d.clean_old_base_images()
|
||||
}
|
||||
|
||||
// Schedules new builds when possible
|
||||
d.start_new_builds()
|
||||
|
||||
// If there are builds currently running, the daemon should refresh
|
||||
// every second to clean up any finished builds & start new ones.
|
||||
mut delay := time.Duration(1 * time.second)
|
||||
|
||||
// Sleep either until we have to refresh the repos or when the next
|
||||
// build has to start, with a minimum of 1 second.
|
||||
if d.current_build_count() == 0 {
|
||||
now := time.now()
|
||||
delay = d.api_update_timestamp - now
|
||||
|
||||
if d.queue.len() > 0 {
|
||||
elem := d.queue.peek() or {
|
||||
d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
|
||||
|
||||
// This is just a fallback option. In theory, queue.peek()
|
||||
// should *never* return an error or none, because we check
|
||||
// its len beforehand.
|
||||
time.sleep(1)
|
||||
continue
|
||||
}
|
||||
|
||||
time_until_next_job := elem.timestamp - now
|
||||
|
||||
delay = math.min(delay, time_until_next_job)
|
||||
}
|
||||
}
|
||||
|
||||
// We sleep for at least one second. This is to prevent the program
|
||||
// from looping agressively when a cronjob can be scheduled, but
|
||||
// there's no spots free for it to be started.
|
||||
delay = math.max(delay, 1 * time.second)
|
||||
|
||||
d.ldebug('Sleeping for ${delay}...')
|
||||
|
||||
time.sleep(delay)
|
||||
}
|
||||
}
|
||||
|
||||
// schedule_build adds the next occurence of the given repo build to the queue.
|
||||
fn (mut d Daemon) schedule_build(repo db.GitRepo) {
|
||||
ce := if repo.schedule != '' {
|
||||
parse_expression(repo.schedule) or {
|
||||
// TODO This shouldn't return an error if the expression is empty.
|
||||
d.lerror("Error while parsing cron expression '$repo.schedule' (id $repo.id): $err.msg()")
|
||||
|
||||
d.global_schedule
|
||||
}
|
||||
} else {
|
||||
d.global_schedule
|
||||
}
|
||||
|
||||
// A repo that can't be scheduled will just be skipped for now
|
||||
timestamp := ce.next_from_now() or {
|
||||
d.lerror("Couldn't calculate next timestamp from '$repo.schedule'; skipping")
|
||||
return
|
||||
}
|
||||
|
||||
d.queue.insert(ScheduledBuild{
|
||||
repo: repo
|
||||
timestamp: timestamp
|
||||
})
|
||||
}
|
||||
|
||||
// renew_repos requests the newest list of Git repos from the server & replaces
|
||||
// the old one.
|
||||
fn (mut d Daemon) renew_repos() {
|
||||
d.linfo('Renewing repos...')
|
||||
|
||||
mut new_repos := git.get_repos(d.address, d.api_key) or {
|
||||
d.lerror('Failed to renew repos. Retrying in ${daemon.api_update_retry_timeout}s...')
|
||||
d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Filter out any repos that shouldn't run on this architecture
|
||||
cur_arch := os.uname().machine
|
||||
new_repos = new_repos.filter(it.arch.any(it.value == cur_arch))
|
||||
|
||||
d.repos = new_repos
|
||||
|
||||
d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency)
|
||||
}
|
||||
|
||||
// renew_queue replaces the old queue with a new one that reflects the newest
|
||||
// values in repos_map.
|
||||
fn (mut d Daemon) renew_queue() {
|
||||
d.linfo('Renewing queue...')
|
||||
mut new_queue := MinHeap<ScheduledBuild>{}
|
||||
|
||||
// Move any jobs that should have already started from the old queue onto
|
||||
// the new one
|
||||
now := time.now()
|
||||
|
||||
// For some reason, using
|
||||
// ```v
|
||||
// for d.queue.len() > 0 && d.queue.peek() ?.timestamp < now {
|
||||
//```
|
||||
// here causes the function to prematurely just exit, without any errors or anything, very weird
|
||||
// https://github.com/vlang/v/issues/14042
|
||||
for d.queue.len() > 0 {
|
||||
elem := d.queue.pop() or {
|
||||
d.lerror("queue.pop() returned an error. This shouldn't happen.")
|
||||
continue
|
||||
}
|
||||
|
||||
if elem.timestamp < now {
|
||||
new_queue.insert(elem)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
d.queue = new_queue
|
||||
|
||||
// For each repository in repos_map, parse their cron expression (or use
|
||||
// the default one if not present) & add them to the queue
|
||||
for repo in d.repos {
|
||||
d.schedule_build(repo)
|
||||
}
|
||||
}
|
||||
|
||||
// rebuild_base_image recreates the builder image.
|
||||
fn (mut d Daemon) rebuild_base_image() bool {
|
||||
d.linfo('Rebuilding builder image....')
|
||||
|
||||
d.builder_images << build.create_build_image(d.base_image) or {
|
||||
d.lerror('Failed to rebuild base image. Retrying in ${daemon.rebuild_base_image_retry_timout}s...')
|
||||
d.image_build_timestamp = time.now().add_seconds(daemon.rebuild_base_image_retry_timout)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// clean_old_base_images tries to remove any old but still present builder
|
||||
// images.
|
||||
fn (mut d Daemon) clean_old_base_images() {
|
||||
mut i := 0
|
||||
|
||||
for i < d.builder_images.len - 1 {
|
||||
// For each builder image, we try to remove it by calling the Docker
|
||||
// API. If the function returns an error or false, that means the image
|
||||
// wasn't deleted. Therefore, we move the index over. If the function
|
||||
// returns true, the array's length has decreased by one so we don't
|
||||
// move the index.
|
||||
if !docker.remove_image(d.builder_images[i]) or { false } {
|
||||
i += 1
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
module daemon
|
||||
|
||||
import log
|
||||
|
||||
// log reate a log message with the given level
|
||||
pub fn (mut d Daemon) log(msg &string, level log.Level) {
|
||||
lock d.logger {
|
||||
d.logger.send_output(msg, level)
|
||||
}
|
||||
}
|
||||
|
||||
// lfatal create a log message with the fatal level
|
||||
pub fn (mut d Daemon) lfatal(msg &string) {
|
||||
d.log(msg, log.Level.fatal)
|
||||
}
|
||||
|
||||
// lerror create a log message with the error level
|
||||
pub fn (mut d Daemon) lerror(msg &string) {
|
||||
d.log(msg, log.Level.error)
|
||||
}
|
||||
|
||||
// lwarn create a log message with the warn level
|
||||
pub fn (mut d Daemon) lwarn(msg &string) {
|
||||
d.log(msg, log.Level.warn)
|
||||
}
|
||||
|
||||
// linfo create a log message with the info level
|
||||
pub fn (mut d Daemon) linfo(msg &string) {
|
||||
d.log(msg, log.Level.info)
|
||||
}
|
||||
|
||||
// ldebug create a log message with the debug level
|
||||
pub fn (mut d Daemon) ldebug(msg &string) {
|
||||
d.log(msg, log.Level.debug)
|
||||
}
|
|
@ -0,0 +1,261 @@
|
|||
module expression
|
||||
|
||||
import time
|
||||
|
||||
pub struct CronExpression {
|
||||
minutes []int
|
||||
hours []int
|
||||
days []int
|
||||
months []int
|
||||
}
|
||||
|
||||
// next calculates the earliest time this cron expression is valid. It will
|
||||
// always pick a moment in the future, even if ref matches completely up to the
|
||||
// minute. This function conciously does not take gap years into account.
|
||||
pub fn (ce &CronExpression) next(ref time.Time) ?time.Time {
|
||||
// If the given ref matches the next cron occurence up to the minute, it
|
||||
// will return that value. Because we always want to return a value in the
|
||||
// future, we artifically shift the ref 60 seconds to make sure we always
|
||||
// match in the future. A shift of 60 seconds is enough because the cron
|
||||
// expression does not allow for accuracy smaller than one minute.
|
||||
sref := ref
|
||||
|
||||
// For all of these values, the rule is the following: if their value is
|
||||
// the length of their respective array in the CronExpression object, that
|
||||
// means we've looped back around. This means that the "bigger" value has
|
||||
// to be incremented by one. For example, if the minutes have looped
|
||||
// around, that means that the hour has to be incremented as well.
|
||||
mut minute_index := 0
|
||||
mut hour_index := 0
|
||||
mut day_index := 0
|
||||
mut month_index := 0
|
||||
|
||||
// This chain is the same logic multiple times, namely that if a "bigger"
|
||||
// value loops around, then the smaller value will always reset as well.
|
||||
// For example, if we're going to a new day, the hour & minute will always
|
||||
// be their smallest value again.
|
||||
for month_index < ce.months.len && sref.month > ce.months[month_index] {
|
||||
month_index++
|
||||
}
|
||||
|
||||
if month_index < ce.months.len && sref.month == ce.months[month_index] {
|
||||
for day_index < ce.days.len && sref.day > ce.days[day_index] {
|
||||
day_index++
|
||||
}
|
||||
|
||||
if day_index < ce.days.len && ce.days[day_index] == sref.day {
|
||||
for hour_index < ce.hours.len && sref.hour > ce.hours[hour_index] {
|
||||
hour_index++
|
||||
}
|
||||
|
||||
if hour_index < ce.hours.len && ce.hours[hour_index] == sref.hour {
|
||||
// Minute is the only value where we explicitely make sure we
|
||||
// can't match sref's value exactly. This is to ensure we only
|
||||
// return values in the future.
|
||||
for minute_index < ce.minutes.len && sref.minute >= ce.minutes[minute_index] {
|
||||
minute_index++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Here, we increment the "bigger" values by one if the smaller ones loop
|
||||
// around. The order is important, as it allows a sort-of waterfall effect
|
||||
// to occur which updates all values if required.
|
||||
if minute_index == ce.minutes.len && hour_index < ce.hours.len {
|
||||
hour_index += 1
|
||||
}
|
||||
|
||||
if hour_index == ce.hours.len && day_index < ce.days.len {
|
||||
day_index += 1
|
||||
}
|
||||
|
||||
if day_index == ce.days.len && month_index < ce.months.len {
|
||||
month_index += 1
|
||||
}
|
||||
|
||||
mut minute := ce.minutes[minute_index % ce.minutes.len]
|
||||
mut hour := ce.hours[hour_index % ce.hours.len]
|
||||
mut day := ce.days[day_index % ce.days.len]
|
||||
|
||||
// Sometimes, we end up with a day that does not exist within the selected
|
||||
// month, e.g. day 30 in February. When this occurs, we reset day back to
|
||||
// the smallest value & loop over to the next month that does have this
|
||||
// day.
|
||||
if day > time.month_days[ce.months[month_index % ce.months.len] - 1] {
|
||||
day = ce.days[0]
|
||||
month_index += 1
|
||||
|
||||
for day > time.month_days[ce.months[month_index & ce.months.len] - 1] {
|
||||
month_index += 1
|
||||
|
||||
// If for whatever reason the day value ends up being something
|
||||
// that can't be scheduled in any month, we have to make sure we
|
||||
// don't create an infinite loop.
|
||||
if month_index == 2 * ce.months.len {
|
||||
return error('No schedulable moment.')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
month := ce.months[month_index % ce.months.len]
|
||||
mut year := sref.year
|
||||
|
||||
// If the month loops over, we need to increment the year.
|
||||
if month_index >= ce.months.len {
|
||||
year++
|
||||
}
|
||||
|
||||
return time.new_time(time.Time{
|
||||
year: year
|
||||
month: month
|
||||
day: day
|
||||
minute: minute
|
||||
hour: hour
|
||||
})
|
||||
}
|
||||
|
||||
// next_from_now returns the result of ce.next(ref) where ref is the result of
|
||||
// time.now().
|
||||
pub fn (ce &CronExpression) next_from_now() ?time.Time {
|
||||
return ce.next(time.now())
|
||||
}
|
||||
|
||||
// parse_range parses a given string into a range of sorted integers, if
|
||||
// possible.
|
||||
fn parse_range(s string, min int, max int, mut bitv []bool) ? {
|
||||
mut start := min
|
||||
mut end := max
|
||||
mut interval := 1
|
||||
|
||||
exps := s.split('/')
|
||||
|
||||
if exps.len > 2 {
|
||||
return error('Invalid expression.')
|
||||
}
|
||||
|
||||
if exps[0] != '*' {
|
||||
dash_parts := exps[0].split('-')
|
||||
|
||||
if dash_parts.len > 2 {
|
||||
return error('Invalid expression.')
|
||||
}
|
||||
|
||||
start = dash_parts[0].int()
|
||||
|
||||
// The builtin parsing functions return zero if the string can't be
|
||||
// parsed into a number, so we have to explicitely check whether they
|
||||
// actually entered zero or if it's an invalid number.
|
||||
if start == 0 && dash_parts[0] != '0' {
|
||||
return error('Invalid number.')
|
||||
}
|
||||
|
||||
// Check whether the start value is out of range
|
||||
if start < min || start > max {
|
||||
return error('Out of range.')
|
||||
}
|
||||
|
||||
if dash_parts.len == 2 {
|
||||
end = dash_parts[1].int()
|
||||
|
||||
if end == 0 && dash_parts[1] != '0' {
|
||||
return error('Invalid number.')
|
||||
}
|
||||
|
||||
if end < start || end > max {
|
||||
return error('Out of range.')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if exps.len > 1 {
|
||||
interval = exps[1].int()
|
||||
|
||||
// interval being zero is always invalid, but we want to check why
|
||||
// it's invalid for better error messages.
|
||||
if interval == 0 {
|
||||
if exps[1] != '0' {
|
||||
return error('Invalid number.')
|
||||
} else {
|
||||
return error('Step size zero not allowed.')
|
||||
}
|
||||
}
|
||||
|
||||
if interval > max - min {
|
||||
return error('Step size too large.')
|
||||
}
|
||||
}
|
||||
// Here, s solely consists of a number, so that's the only value we
|
||||
// should return.
|
||||
else if exps[0] != '*' && !exps[0].contains('-') {
|
||||
bitv[start - min] = true
|
||||
return
|
||||
}
|
||||
|
||||
for start <= end {
|
||||
bitv[start - min] = true
|
||||
start += interval
|
||||
}
|
||||
}
|
||||
|
||||
// bitv_to_ints converts a bit vector into an array containing the
|
||||
// corresponding values.
|
||||
fn bitv_to_ints(bitv []bool, min int) []int {
|
||||
mut out := []int{}
|
||||
|
||||
for i in 0 .. bitv.len {
|
||||
if bitv[i] {
|
||||
out << min + i
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// parse_part parses a given part of a cron expression & returns the
|
||||
// corresponding array of ints.
|
||||
fn parse_part(s string, min int, max int) ?[]int {
|
||||
mut bitv := []bool{len: max - min + 1, init: false}
|
||||
|
||||
for range in s.split(',') {
|
||||
parse_range(range, min, max, mut bitv) ?
|
||||
}
|
||||
|
||||
return bitv_to_ints(bitv, min)
|
||||
}
|
||||
|
||||
// parse_expression parses an entire cron expression string into a
|
||||
// CronExpression object, if possible.
|
||||
pub fn parse_expression(exp string) ?CronExpression {
|
||||
// The filter allows for multiple spaces between parts
|
||||
mut parts := exp.split(' ').filter(it != '')
|
||||
|
||||
if parts.len < 2 || parts.len > 4 {
|
||||
return error('Expression must contain between 2 and 4 space-separated parts.')
|
||||
}
|
||||
|
||||
// For ease of use, we allow the user to only specify as many parts as they
|
||||
// need.
|
||||
for parts.len < 4 {
|
||||
parts << '*'
|
||||
}
|
||||
|
||||
mut part_results := [][]int{}
|
||||
|
||||
mins := [0, 0, 1, 1]
|
||||
maxs := [59, 23, 31, 12]
|
||||
|
||||
// This for loop allows us to more clearly propagate the error to the user.
|
||||
for i, min in mins {
|
||||
part_results << parse_part(parts[i], min, maxs[i]) or {
|
||||
return error('An error occurred with part $i: $err.msg()')
|
||||
}
|
||||
}
|
||||
|
||||
return CronExpression{
|
||||
minutes: part_results[0]
|
||||
hours: part_results[1]
|
||||
days: part_results[2]
|
||||
months: part_results[3]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
module expression
|
||||
|
||||
// parse_range_error returns the returned error message. If the result is '',
|
||||
// that means the function didn't error.
|
||||
fn parse_range_error(s string, min int, max int) string {
|
||||
mut bitv := []bool{len: max - min + 1, init: false}
|
||||
|
||||
parse_range(s, min, max, mut bitv) or { return err.msg }
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
// =====parse_range=====
|
||||
fn test_range_star_range() ? {
|
||||
mut bitv := []bool{len: 6, init: false}
|
||||
parse_range('*', 0, 5, mut bitv) ?
|
||||
|
||||
assert bitv == [true, true, true, true, true, true]
|
||||
}
|
||||
|
||||
fn test_range_number() ? {
|
||||
mut bitv := []bool{len: 6, init: false}
|
||||
parse_range('4', 0, 5, mut bitv) ?
|
||||
|
||||
assert bitv_to_ints(bitv, 0) == [4]
|
||||
}
|
||||
|
||||
fn test_range_number_too_large() ? {
|
||||
assert parse_range_error('10', 0, 6) == 'Out of range.'
|
||||
}
|
||||
|
||||
fn test_range_number_too_small() ? {
|
||||
assert parse_range_error('0', 2, 6) == 'Out of range.'
|
||||
}
|
||||
|
||||
fn test_range_number_invalid() ? {
|
||||
assert parse_range_error('x', 0, 6) == 'Invalid number.'
|
||||
}
|
||||
|
||||
fn test_range_step_star_1() ? {
|
||||
mut bitv := []bool{len: 21, init: false}
|
||||
parse_range('*/4', 0, 20, mut bitv) ?
|
||||
|
||||
assert bitv_to_ints(bitv, 0) == [0, 4, 8, 12, 16, 20]
|
||||
}
|
||||
|
||||
fn test_range_step_star_2() ? {
|
||||
mut bitv := []bool{len: 8, init: false}
|
||||
parse_range('*/3', 1, 8, mut bitv) ?
|
||||
|
||||
assert bitv_to_ints(bitv, 1) == [1, 4, 7]
|
||||
}
|
||||
|
||||
fn test_range_step_star_too_large() ? {
|
||||
assert parse_range_error('*/21', 0, 20) == 'Step size too large.'
|
||||
}
|
||||
|
||||
fn test_range_step_zero() ? {
|
||||
assert parse_range_error('*/0', 0, 20) == 'Step size zero not allowed.'
|
||||
}
|
||||
|
||||
fn test_range_step_number() ? {
|
||||
mut bitv := []bool{len: 21, init: false}
|
||||
parse_range('5/4', 2, 22, mut bitv) ?
|
||||
|
||||
assert bitv_to_ints(bitv, 2) == [5, 9, 13, 17, 21]
|
||||
}
|
||||
|
||||
fn test_range_step_number_too_large() ? {
|
||||
assert parse_range_error('10/4', 0, 5) == 'Out of range.'
|
||||
}
|
||||
|
||||
fn test_range_step_number_too_small() ? {
|
||||
assert parse_range_error('2/4', 5, 10) == 'Out of range.'
|
||||
}
|
||||
|
||||
fn test_range_dash() ? {
|
||||
mut bitv := []bool{len: 10, init: false}
|
||||
parse_range('4-8', 0, 9, mut bitv) ?
|
||||
|
||||
assert bitv_to_ints(bitv, 0) == [4, 5, 6, 7, 8]
|
||||
}
|
||||
|
||||
fn test_range_dash_step() ? {
|
||||
mut bitv := []bool{len: 10, init: false}
|
||||
parse_range('4-8/2', 0, 9, mut bitv) ?
|
||||
|
||||
assert bitv_to_ints(bitv, 0) == [4, 6, 8]
|
||||
}
|
||||
|
||||
// =====parse_part=====
|
||||
fn test_part_single() ? {
|
||||
assert parse_part('*', 0, 5) ? == [0, 1, 2, 3, 4, 5]
|
||||
}
|
||||
|
||||
fn test_part_multiple() ? {
|
||||
assert parse_part('*/2,2/3', 1, 8) ? == [1, 2, 3, 5, 7, 8]
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
module expression
|
||||
|
||||
import time { parse }
|
||||
|
||||
fn util_test_time(exp string, t1_str string, t2_str string) ? {
|
||||
ce := parse_expression(exp) ?
|
||||
t1 := parse(t1_str) ?
|
||||
t2 := parse(t2_str) ?
|
||||
|
||||
t3 := ce.next(t1) ?
|
||||
|
||||
assert t2.year == t3.year
|
||||
assert t2.month == t3.month
|
||||
assert t2.day == t3.day
|
||||
assert t2.hour == t3.hour
|
||||
assert t2.minute == t3.minute
|
||||
}
|
||||
|
||||
fn test_next_simple() ? {
|
||||
// Very simple
|
||||
util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00') ?
|
||||
|
||||
// Overlap to next day
|
||||
util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00') ?
|
||||
util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00') ?
|
||||
|
||||
util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00') ?
|
||||
|
||||
// Overlap to next month
|
||||
util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00') ?
|
||||
|
||||
// Overlap to next year
|
||||
util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00') ?
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
module db
|
||||
|
||||
import sqlite
|
||||
|
||||
struct VieterDb {
|
||||
conn sqlite.DB
|
||||
}
|
||||
|
||||
// init initializes a database & adds the correct tables.
|
||||
pub fn init(db_path string) ?VieterDb {
|
||||
conn := sqlite.connect(db_path) ?
|
||||
|
||||
sql conn {
|
||||
create table GitRepo
|
||||
}
|
||||
|
||||
return VieterDb{
|
||||
conn: conn
|
||||
}
|
||||
}
|
|
@ -0,0 +1,154 @@
|
|||
module db
|
||||
|
||||
pub struct GitRepoArch {
|
||||
pub:
|
||||
id int [primary; sql: serial]
|
||||
repo_id int [nonull]
|
||||
value string [nonull]
|
||||
}
|
||||
|
||||
// str returns a string representation.
|
||||
pub fn (gra &GitRepoArch) str() string {
|
||||
return gra.value
|
||||
}
|
||||
|
||||
pub struct GitRepo {
|
||||
pub mut:
|
||||
id int [optional; primary; sql: serial]
|
||||
// URL of the Git repository
|
||||
url string [nonull]
|
||||
// Branch of the Git repository to use
|
||||
branch string [nonull]
|
||||
// Which repo the builder should publish packages to
|
||||
repo string [nonull]
|
||||
// Cron schedule describing how frequently to build the repo.
|
||||
schedule string [optional]
|
||||
// On which architectures the package is allowed to be built. In reality,
|
||||
// this controls which builders will periodically build the image.
|
||||
arch []GitRepoArch [fkey: 'repo_id']
|
||||
}
|
||||
|
||||
// str returns a string representation.
|
||||
pub fn (gr &GitRepo) str() string {
|
||||
mut parts := [
|
||||
'id: $gr.id',
|
||||
'url: $gr.url',
|
||||
'branch: $gr.branch',
|
||||
'repo: $gr.repo',
|
||||
'schedule: $gr.schedule',
|
||||
'arch: ${gr.arch.map(it.value).join(', ')}',
|
||||
]
|
||||
str := parts.join('\n')
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// patch_from_params patches a GitRepo from a map[string]string, usually
|
||||
// provided from a web.App's params
|
||||
pub fn (mut r GitRepo) patch_from_params(params map[string]string) {
|
||||
$for field in GitRepo.fields {
|
||||
if field.name in params {
|
||||
$if field.typ is string {
|
||||
r.$(field.name) = params[field.name]
|
||||
// This specific type check is needed for the compiler to ensure
|
||||
// our types are correct
|
||||
} $else $if field.typ is []GitRepoArch {
|
||||
r.$(field.name) = params[field.name].split(',').map(GitRepoArch{ value: it })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// git_repo_from_params creates a GitRepo from a map[string]string, usually
|
||||
// provided from a web.App's params
|
||||
pub fn git_repo_from_params(params map[string]string) ?GitRepo {
|
||||
mut repo := GitRepo{}
|
||||
|
||||
// If we're creating a new GitRepo, we want all fields to be present before
|
||||
// "patching".
|
||||
$for field in GitRepo.fields {
|
||||
if field.name !in params && !field.attrs.contains('optional') {
|
||||
return error('Missing parameter: ${field.name}.')
|
||||
}
|
||||
}
|
||||
repo.patch_from_params(params)
|
||||
|
||||
return repo
|
||||
}
|
||||
|
||||
// get_git_repos returns all GitRepo's in the database.
|
||||
pub fn (db &VieterDb) get_git_repos() []GitRepo {
|
||||
res := sql db.conn {
|
||||
select from GitRepo order by id
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// get_git_repo tries to return a specific GitRepo.
|
||||
pub fn (db &VieterDb) get_git_repo(repo_id int) ?GitRepo {
|
||||
res := sql db.conn {
|
||||
select from GitRepo where id == repo_id
|
||||
}
|
||||
|
||||
// If a select statement fails, it returns a zeroed object. By
|
||||
// checking one of the required fields, we can see whether the query
|
||||
// returned a result or not.
|
||||
if res.url == '' {
|
||||
return none
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// add_git_repo inserts the given GitRepo into the database.
|
||||
pub fn (db &VieterDb) add_git_repo(repo GitRepo) {
|
||||
sql db.conn {
|
||||
insert repo into GitRepo
|
||||
}
|
||||
}
|
||||
|
||||
// delete_git_repo deletes the repo with the given ID from the database.
|
||||
pub fn (db &VieterDb) delete_git_repo(repo_id int) {
|
||||
sql db.conn {
|
||||
delete from GitRepo where id == repo_id
|
||||
delete from GitRepoArch where repo_id == repo_id
|
||||
}
|
||||
}
|
||||
|
||||
// update_git_repo updates any non-array values for a given GitRepo.
|
||||
pub fn (db &VieterDb) update_git_repo(repo_id int, params map[string]string) {
|
||||
mut values := []string{}
|
||||
|
||||
// TODO does this allow for SQL injection?
|
||||
$for field in GitRepo.fields {
|
||||
if field.name in params {
|
||||
// Any fields that are array types require their own update method
|
||||
$if field.typ is string {
|
||||
values << "$field.name = '${params[field.name]}'"
|
||||
}
|
||||
}
|
||||
}
|
||||
values_str := values.join(', ')
|
||||
query := 'update GitRepo set $values_str where id == $repo_id'
|
||||
|
||||
db.conn.exec_none(query)
|
||||
}
|
||||
|
||||
// update_git_repo_archs updates a given GitRepo's arch value.
|
||||
pub fn (db &VieterDb) update_git_repo_archs(repo_id int, archs []GitRepoArch) {
|
||||
archs_with_id := archs.map(GitRepoArch{
|
||||
...it
|
||||
repo_id: repo_id
|
||||
})
|
||||
|
||||
sql db.conn {
|
||||
delete from GitRepoArch where repo_id == repo_id
|
||||
}
|
||||
|
||||
for arch in archs_with_id {
|
||||
sql db.conn {
|
||||
insert arch into GitRepoArch
|
||||
}
|
||||
}
|
||||
}
|
|
@ -9,6 +9,8 @@ const socket = '/var/run/docker.sock'
|
|||
|
||||
const buf_len = 1024
|
||||
|
||||
// send writes a request to the Docker socket, waits for a response & returns
|
||||
// it.
|
||||
fn send(req &string) ?http.Response {
|
||||
// Open a connection to the socket
|
||||
mut s := unix.connect_stream(docker.socket) or {
|
||||
|
@ -28,8 +30,8 @@ fn send(req &string) ?http.Response {
|
|||
s.wait_for_write() ?
|
||||
|
||||
mut c := 0
|
||||
mut buf := []byte{len: docker.buf_len}
|
||||
mut res := []byte{}
|
||||
mut buf := []u8{len: docker.buf_len}
|
||||
mut res := []u8{}
|
||||
|
||||
for {
|
||||
c = s.read(mut buf) or { return error('Failed to read data from socket ${docker.socket}.') }
|
||||
|
@ -52,7 +54,7 @@ fn send(req &string) ?http.Response {
|
|||
|
||||
// We loop until we've encountered the end of the chunked response
|
||||
// A chunked HTTP response always ends with '0\r\n\r\n'.
|
||||
for res.len < 5 || res#[-5..] != [byte(`0`), `\r`, `\n`, `\r`, `\n`] {
|
||||
for res.len < 5 || res#[-5..] != [u8(`0`), `\r`, `\n`, `\r`, `\n`] {
|
||||
// Wait for the server to respond
|
||||
s.wait_for_write() ?
|
||||
|
||||
|
@ -72,12 +74,14 @@ fn send(req &string) ?http.Response {
|
|||
return http.parse_response(res.bytestr())
|
||||
}
|
||||
|
||||
// request_with_body sends a request to the Docker socket with the given body.
|
||||
fn request_with_body(method string, url urllib.URL, content_type string, body string) ?http.Response {
|
||||
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n'
|
||||
|
||||
return send(req)
|
||||
}
|
||||
|
||||
// request sends a request to the Docker socket with an empty body.
|
||||
fn request(method string, url urllib.URL) ?http.Response {
|
||||
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n'
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ fn get_env_var(field_name string) ?string {
|
|||
|
||||
// Otherwise, we process the file
|
||||
return os.read_file(env_file) or {
|
||||
error('Failed to read file defined in $env_file_name: ${err.msg}.')
|
||||
error('Failed to read file defined in $env_file_name: ${err.msg()}.')
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,27 +55,41 @@ pub fn load<T>(path string) ?T {
|
|||
$for field in T.fields {
|
||||
s := doc.value(field.name)
|
||||
|
||||
// We currently only support strings
|
||||
if s.type_name() == 'string' {
|
||||
res.$(field.name) = s.string()
|
||||
if s !is toml.Null {
|
||||
$if field.typ is string {
|
||||
res.$(field.name) = s.string()
|
||||
} $else $if field.typ is int {
|
||||
res.$(field.name) = s.int()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$for field in T.fields {
|
||||
$if field.typ is string {
|
||||
env_value := get_env_var(field.name) ?
|
||||
env_value := get_env_var(field.name) ?
|
||||
|
||||
// The value of the env var will always be chosen over the config
|
||||
// file
|
||||
if env_value != '' {
|
||||
// The value of an env var will always take precedence over the toml
|
||||
// file.
|
||||
if env_value != '' {
|
||||
$if field.typ is string {
|
||||
res.$(field.name) = env_value
|
||||
} $else $if field.typ is int {
|
||||
res.$(field.name) = env_value.int()
|
||||
}
|
||||
// If there's no value from the toml file either, we try to find a
|
||||
// default value
|
||||
else if res.$(field.name) == '' {
|
||||
return error("Missing config variable '$field.name' with no provided default. Either add it to the config file or provide it using an environment variable.")
|
||||
}
|
||||
}
|
||||
|
||||
// Now, we check whether a value is present. If there isn't, that means
|
||||
// it isn't in the config file, nor is there a default or an env var.
|
||||
mut has_value := false
|
||||
|
||||
$if field.typ is string {
|
||||
has_value = res.$(field.name) != ''
|
||||
} $else $if field.typ is int {
|
||||
has_value = res.$(field.name) != 0
|
||||
}
|
||||
|
||||
if !has_value {
|
||||
return error("Missing config variable '$field.name' with no provided default. Either add it to the config file or provide it using an environment variable.")
|
||||
}
|
||||
}
|
||||
return res
|
100
src/git/cli.v
100
src/git/cli.v
|
@ -2,6 +2,7 @@ module git
|
|||
|
||||
import cli
|
||||
import env
|
||||
import cron.expression { parse_expression }
|
||||
|
||||
struct Config {
|
||||
address string [required]
|
||||
|
@ -26,14 +27,14 @@ pub fn cmd() cli.Command {
|
|||
},
|
||||
cli.Command{
|
||||
name: 'add'
|
||||
required_args: 4
|
||||
usage: 'url branch repo arch...'
|
||||
required_args: 3
|
||||
usage: 'url branch repo'
|
||||
description: 'Add a new repository.'
|
||||
execute: fn (cmd cli.Command) ? {
|
||||
config_file := cmd.flags.get_string('config-file') ?
|
||||
conf := env.load<Config>(config_file) ?
|
||||
|
||||
add(conf, cmd.args[0], cmd.args[1], cmd.args[2], cmd.args[3..]) ?
|
||||
add(conf, cmd.args[0], cmd.args[1], cmd.args[2]) ?
|
||||
}
|
||||
},
|
||||
cli.Command{
|
||||
|
@ -48,6 +49,18 @@ pub fn cmd() cli.Command {
|
|||
remove(conf, cmd.args[0]) ?
|
||||
}
|
||||
},
|
||||
cli.Command{
|
||||
name: 'info'
|
||||
required_args: 1
|
||||
usage: 'id'
|
||||
description: 'Show detailed information for the repo matching the ID prefix.'
|
||||
execute: fn (cmd cli.Command) ? {
|
||||
config_file := cmd.flags.get_string('config-file') ?
|
||||
conf := env.load<Config>(config_file) ?
|
||||
|
||||
info(conf, cmd.args[0]) ?
|
||||
}
|
||||
},
|
||||
cli.Command{
|
||||
name: 'edit'
|
||||
required_args: 1
|
||||
|
@ -74,6 +87,11 @@ pub fn cmd() cli.Command {
|
|||
description: 'Comma-separated list of architectures to build on.'
|
||||
flag: cli.FlagType.string
|
||||
},
|
||||
cli.Flag{
|
||||
name: 'schedule'
|
||||
description: 'Cron schedule for repository.'
|
||||
flag: cli.FlagType.string
|
||||
},
|
||||
]
|
||||
execute: fn (cmd cli.Command) ? {
|
||||
config_file := cmd.flags.get_string('config-file') ?
|
||||
|
@ -96,52 +114,62 @@ pub fn cmd() cli.Command {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_repo_id_by_prefix(conf Config, id_prefix string) ?string {
|
||||
repos := get_repos(conf.address, conf.api_key) ?
|
||||
|
||||
mut res := []string{}
|
||||
|
||||
for id, _ in repos {
|
||||
if id.starts_with(id_prefix) {
|
||||
res << id
|
||||
}
|
||||
}
|
||||
|
||||
if res.len == 0 {
|
||||
return error('No repo found for given prefix.')
|
||||
}
|
||||
|
||||
if res.len > 1 {
|
||||
return error('Multiple repos found for given prefix.')
|
||||
}
|
||||
|
||||
return res[0]
|
||||
}
|
||||
// get_repo_by_prefix tries to find the repo with the given prefix in its
|
||||
// ID. If multiple or none are found, an error is raised.
|
||||
|
||||
// list prints out a list of all repositories.
|
||||
fn list(conf Config) ? {
|
||||
repos := get_repos(conf.address, conf.api_key) ?
|
||||
|
||||
for id, details in repos {
|
||||
println('${id[..8]}\t$details.url\t$details.branch\t$details.repo\t$details.arch')
|
||||
for repo in repos {
|
||||
println('$repo.id\t$repo.url\t$repo.branch\t$repo.repo')
|
||||
}
|
||||
}
|
||||
|
||||
fn add(conf Config, url string, branch string, repo string, arch []string) ? {
|
||||
res := add_repo(conf.address, conf.api_key, url, branch, repo, arch) ?
|
||||
// add adds a new repository to the server's list.
|
||||
fn add(conf Config, url string, branch string, repo string) ? {
|
||||
res := add_repo(conf.address, conf.api_key, url, branch, repo, []) ?
|
||||
|
||||
println(res.message)
|
||||
}
|
||||
|
||||
fn remove(conf Config, id_prefix string) ? {
|
||||
id := get_repo_id_by_prefix(conf, id_prefix) ?
|
||||
res := remove_repo(conf.address, conf.api_key, id) ?
|
||||
// remove removes a repository from the server's list.
|
||||
fn remove(conf Config, id string) ? {
|
||||
// id, _ := get_repo_by_prefix(conf, id_prefix) ?
|
||||
id_int := id.int()
|
||||
|
||||
println(res.message)
|
||||
if id_int != 0 {
|
||||
res := remove_repo(conf.address, conf.api_key, id_int) ?
|
||||
println(res.message)
|
||||
}
|
||||
}
|
||||
|
||||
fn patch(conf Config, id_prefix string, params map[string]string) ? {
|
||||
id := get_repo_id_by_prefix(conf, id_prefix) ?
|
||||
res := patch_repo(conf.address, conf.api_key, id, params) ?
|
||||
// patch patches a given repository with the provided params.
|
||||
fn patch(conf Config, id string, params map[string]string) ? {
|
||||
// We check the cron expression first because it's useless to send an
|
||||
// invalid one to the server.
|
||||
if 'schedule' in params && params['schedule'] != '' {
|
||||
parse_expression(params['schedule']) or {
|
||||
return error('Invalid cron expression: $err.msg()')
|
||||
}
|
||||
}
|
||||
|
||||
println(res.message)
|
||||
id_int := id.int()
|
||||
if id_int != 0 {
|
||||
res := patch_repo(conf.address, conf.api_key, id_int, params) ?
|
||||
|
||||
println(res.message)
|
||||
}
|
||||
}
|
||||
|
||||
// info shows detailed information for a given repo.
|
||||
fn info(conf Config, id string) ? {
|
||||
id_int := id.int()
|
||||
|
||||
if id_int == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
repo := get_repo(conf.address, conf.api_key, id_int) ?
|
||||
println(repo)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,11 @@ module git
|
|||
import json
|
||||
import response { Response }
|
||||
import net.http
|
||||
import db
|
||||
|
||||
// send_request<T> is a convenience method for sending requests to the repos
|
||||
// API. It mostly does string manipulation to create a query string containing
|
||||
// the provided params.
|
||||
fn send_request<T>(method http.Method, address string, url string, api_key string, params map[string]string) ?Response<T> {
|
||||
mut full_url := '$address$url'
|
||||
|
||||
|
@ -23,8 +27,16 @@ fn send_request<T>(method http.Method, address string, url string, api_key strin
|
|||
}
|
||||
|
||||
// get_repos returns the current list of repos.
|
||||
pub fn get_repos(address string, api_key string) ?map[string]GitRepo {
|
||||
data := send_request<map[string]GitRepo>(http.Method.get, address, '/api/repos', api_key,
|
||||
pub fn get_repos(address string, api_key string) ?[]db.GitRepo {
|
||||
data := send_request<[]db.GitRepo>(http.Method.get, address, '/api/repos', api_key,
|
||||
{}) ?
|
||||
|
||||
return data.data
|
||||
}
|
||||
|
||||
// get_repo returns the repo for a specific ID.
|
||||
pub fn get_repo(address string, api_key string, id int) ?db.GitRepo {
|
||||
data := send_request<db.GitRepo>(http.Method.get, address, '/api/repos/$id', api_key,
|
||||
{}) ?
|
||||
|
||||
return data.data
|
||||
|
@ -32,19 +44,23 @@ pub fn get_repos(address string, api_key string) ?map[string]GitRepo {
|
|||
|
||||
// add_repo adds a new repo to the server.
|
||||
pub fn add_repo(address string, api_key string, url string, branch string, repo string, arch []string) ?Response<string> {
|
||||
params := {
|
||||
mut params := {
|
||||
'url': url
|
||||
'branch': branch
|
||||
'repo': repo
|
||||
'arch': arch.join(',')
|
||||
}
|
||||
|
||||
if arch.len > 0 {
|
||||
params['arch'] = arch.join(',')
|
||||
}
|
||||
|
||||
data := send_request<string>(http.Method.post, address, '/api/repos', api_key, params) ?
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// remove_repo removes the repo with the given ID from the server.
|
||||
pub fn remove_repo(address string, api_key string, id string) ?Response<string> {
|
||||
pub fn remove_repo(address string, api_key string, id int) ?Response<string> {
|
||||
data := send_request<string>(http.Method.delete, address, '/api/repos/$id', api_key,
|
||||
{}) ?
|
||||
|
||||
|
@ -53,7 +69,7 @@ pub fn remove_repo(address string, api_key string, id string) ?Response<string>
|
|||
|
||||
// patch_repo sends a PATCH request to the given repo with the params as
|
||||
// payload.
|
||||
pub fn patch_repo(address string, api_key string, id string, params map[string]string) ?Response<string> {
|
||||
pub fn patch_repo(address string, api_key string, id int, params map[string]string) ?Response<string> {
|
||||
data := send_request<string>(http.Method.patch, address, '/api/repos/$id', api_key,
|
||||
params) ?
|
||||
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
module git
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
pub struct GitRepo {
|
||||
pub mut:
|
||||
// URL of the Git repository
|
||||
url string
|
||||
// Branch of the Git repository to use
|
||||
branch string
|
||||
// On which architectures the package is allowed to be built. In reality,
|
||||
// this controls which builders will periodically build the image.
|
||||
arch []string
|
||||
// Which repo the builder should publish packages to
|
||||
repo string
|
||||
}
|
||||
|
||||
// patch_from_params patches a GitRepo from a map[string]string, usually
|
||||
// provided from a web.App's params
|
||||
pub fn (mut r GitRepo) patch_from_params(params map[string]string) {
|
||||
$for field in GitRepo.fields {
|
||||
if field.name in params {
|
||||
$if field.typ is string {
|
||||
r.$(field.name) = params[field.name]
|
||||
// This specific type check is needed for the compiler to ensure
|
||||
// our types are correct
|
||||
} $else $if field.typ is []string {
|
||||
r.$(field.name) = params[field.name].split(',')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// read_repos reads the provided path & parses it into a map of GitRepo's.
|
||||
pub fn read_repos(path string) ?map[string]GitRepo {
|
||||
if !os.exists(path) {
|
||||
mut f := os.create(path) ?
|
||||
|
||||
defer {
|
||||
f.close()
|
||||
}
|
||||
|
||||
f.write_string('{}') ?
|
||||
|
||||
return {}
|
||||
}
|
||||
|
||||
content := os.read_file(path) ?
|
||||
res := json.decode(map[string]GitRepo, content) ?
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// write_repos writes a map of GitRepo's back to disk given the provided path.
|
||||
pub fn write_repos(path string, repos &map[string]GitRepo) ? {
|
||||
mut f := os.create(path) ?
|
||||
|
||||
defer {
|
||||
f.close()
|
||||
}
|
||||
|
||||
value := json.encode(repos)
|
||||
f.write_string(value) ?
|
||||
}
|
||||
|
||||
// repo_from_params creates a GitRepo from a map[string]string, usually
|
||||
// provided from a web.App's params
|
||||
pub fn repo_from_params(params map[string]string) ?GitRepo {
|
||||
mut repo := GitRepo{}
|
||||
|
||||
// If we're creating a new GitRepo, we want all fields to be present before
|
||||
// "patching".
|
||||
$for field in GitRepo.fields {
|
||||
if field.name !in params {
|
||||
return error('Missing parameter: ${field.name}.')
|
||||
}
|
||||
}
|
||||
repo.patch_from_params(params)
|
||||
|
||||
return repo
|
||||
}
|
|
@ -5,12 +5,13 @@ import server
|
|||
import cli
|
||||
import build
|
||||
import git
|
||||
import cron
|
||||
|
||||
fn main() {
|
||||
mut app := cli.Command{
|
||||
name: 'vieter'
|
||||
description: 'Vieter is a lightweight implementation of an Arch repository server.'
|
||||
version: '0.2.0'
|
||||
version: '0.3.0-alpha.1'
|
||||
flags: [
|
||||
cli.Flag{
|
||||
flag: cli.FlagType.string
|
||||
|
@ -25,6 +26,7 @@ fn main() {
|
|||
server.cmd(),
|
||||
build.cmd(),
|
||||
git.cmd(),
|
||||
cron.cmd(),
|
||||
]
|
||||
}
|
||||
|
||||
|
|
|
@ -175,6 +175,7 @@ pub fn read_pkg_archive(pkg_path string) ?Pkg {
|
|||
}
|
||||
}
|
||||
|
||||
// format_entry returns a string properly formatted to be added to a desc file.
|
||||
fn format_entry(key string, value string) string {
|
||||
return '\n%$key%\n$value\n'
|
||||
}
|
|
@ -30,11 +30,11 @@ pub:
|
|||
// new creates a new RepoGroupManager & creates the directories as needed
|
||||
pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager {
|
||||
if !os.is_dir(repos_dir) {
|
||||
os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg') }
|
||||
os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') }
|
||||
}
|
||||
|
||||
if !os.is_dir(pkg_dir) {
|
||||
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg') }
|
||||
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg()') }
|
||||
}
|
||||
|
||||
return RepoGroupManager{
|
||||
|
@ -50,7 +50,7 @@ pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupMana
|
|||
// the right subdirectories in r.pkg_dir if it was successfully added.
|
||||
pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult {
|
||||
pkg := package.read_pkg_archive(pkg_path) or {
|
||||
return error('Failed to read package file: $err.msg')
|
||||
return error('Failed to read package file: $err.msg()')
|
||||
}
|
||||
|
||||
added := r.add_pkg_in_repo(repo, pkg) ?
|
||||
|
|
|
@ -2,6 +2,8 @@ module repo
|
|||
|
||||
import os
|
||||
|
||||
// archive_add_entry writes a file to an archive, given its path & inner path
|
||||
// inside the archive.
|
||||
fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &string, inner_path &string) {
|
||||
st := C.stat{}
|
||||
|
||||
|
@ -19,7 +21,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
|
|||
}
|
||||
|
||||
// Write the file to the archive
|
||||
buf := [8192]byte{}
|
||||
buf := [8192]u8{}
|
||||
mut len := C.read(fd, &buf, sizeof(buf))
|
||||
|
||||
for len > 0 {
|
||||
|
@ -29,7 +31,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
|
|||
}
|
||||
}
|
||||
|
||||
// Re-generate the repo archive files
|
||||
// sync regenerates the repository archive files.
|
||||
fn (r &RepoGroupManager) sync(repo string, arch string) ? {
|
||||
subrepo_path := os.join_path(r.repos_dir, repo, arch)
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ module server
|
|||
|
||||
import net.http
|
||||
|
||||
// is_authorized checks whether the provided API key is correct.
|
||||
fn (mut app App) is_authorized() bool {
|
||||
x_header := app.req.header.get_custom('X-Api-Key', http.HeaderQueryConfig{ exact: true }) or {
|
||||
return false
|
||||
|
|
|
@ -6,12 +6,9 @@ import env
|
|||
struct Config {
|
||||
pub:
|
||||
log_level string = 'WARN'
|
||||
log_file string = 'vieter.log'
|
||||
pkg_dir string
|
||||
download_dir string
|
||||
data_dir string
|
||||
api_key string
|
||||
repos_dir string
|
||||
repos_file string
|
||||
default_arch string
|
||||
}
|
||||
|
||||
|
|
107
src/server/git.v
107
src/server/git.v
|
@ -1,140 +1,83 @@
|
|||
module server
|
||||
|
||||
import web
|
||||
import git
|
||||
import net.http
|
||||
import rand
|
||||
import response { new_data_response, new_response }
|
||||
import db
|
||||
|
||||
const repos_file = 'repos.json'
|
||||
|
||||
// get_repos returns the current list of repos.
|
||||
['/api/repos'; get]
|
||||
fn (mut app App) get_repos() web.Result {
|
||||
if !app.is_authorized() {
|
||||
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
|
||||
}
|
||||
|
||||
repos := rlock app.git_mutex {
|
||||
git.read_repos(app.conf.repos_file) or {
|
||||
app.lerror('Failed to read repos file: $err.msg')
|
||||
|
||||
return app.status(http.Status.internal_server_error)
|
||||
}
|
||||
}
|
||||
repos := app.db.get_git_repos()
|
||||
|
||||
return app.json(http.Status.ok, new_data_response(repos))
|
||||
}
|
||||
|
||||
// get_single_repo returns the information for a single repo.
|
||||
['/api/repos/:id'; get]
|
||||
fn (mut app App) get_single_repo(id string) web.Result {
|
||||
fn (mut app App) get_single_repo(id int) web.Result {
|
||||
if !app.is_authorized() {
|
||||
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
|
||||
}
|
||||
|
||||
repos := rlock app.git_mutex {
|
||||
git.read_repos(app.conf.repos_file) or {
|
||||
app.lerror('Failed to read repos file.')
|
||||
|
||||
return app.status(http.Status.internal_server_error)
|
||||
}
|
||||
}
|
||||
|
||||
if id !in repos {
|
||||
return app.not_found()
|
||||
}
|
||||
|
||||
repo := repos[id]
|
||||
repo := app.db.get_git_repo(id) or { return app.not_found() }
|
||||
|
||||
return app.json(http.Status.ok, new_data_response(repo))
|
||||
}
|
||||
|
||||
// post_repo creates a new repo from the provided query string.
|
||||
['/api/repos'; post]
|
||||
fn (mut app App) post_repo() web.Result {
|
||||
if !app.is_authorized() {
|
||||
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
|
||||
}
|
||||
|
||||
new_repo := git.repo_from_params(app.query) or {
|
||||
return app.json(http.Status.bad_request, new_response(err.msg))
|
||||
mut params := app.query.clone()
|
||||
|
||||
// If a repo is created without specifying the arch, we assume it's meant
|
||||
// for the default architecture.
|
||||
if 'arch' !in params {
|
||||
params['arch'] = app.conf.default_arch
|
||||
}
|
||||
|
||||
id := rand.uuid_v4()
|
||||
|
||||
mut repos := rlock app.git_mutex {
|
||||
git.read_repos(app.conf.repos_file) or {
|
||||
app.lerror('Failed to read repos file.')
|
||||
|
||||
return app.status(http.Status.internal_server_error)
|
||||
}
|
||||
new_repo := db.git_repo_from_params(params) or {
|
||||
return app.json(http.Status.bad_request, new_response(err.msg()))
|
||||
}
|
||||
|
||||
// We need to check for duplicates
|
||||
for _, repo in repos {
|
||||
if repo == new_repo {
|
||||
return app.json(http.Status.bad_request, new_response('Duplicate repository.'))
|
||||
}
|
||||
}
|
||||
|
||||
repos[id] = new_repo
|
||||
|
||||
lock app.git_mutex {
|
||||
git.write_repos(app.conf.repos_file, &repos) or {
|
||||
return app.status(http.Status.internal_server_error)
|
||||
}
|
||||
}
|
||||
app.db.add_git_repo(new_repo)
|
||||
|
||||
return app.json(http.Status.ok, new_response('Repo added successfully.'))
|
||||
}
|
||||
|
||||
// delete_repo removes a given repo from the server's list.
|
||||
['/api/repos/:id'; delete]
|
||||
fn (mut app App) delete_repo(id string) web.Result {
|
||||
fn (mut app App) delete_repo(id int) web.Result {
|
||||
if !app.is_authorized() {
|
||||
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
|
||||
}
|
||||
|
||||
mut repos := rlock app.git_mutex {
|
||||
git.read_repos(app.conf.repos_file) or {
|
||||
app.lerror('Failed to read repos file.')
|
||||
|
||||
return app.status(http.Status.internal_server_error)
|
||||
}
|
||||
}
|
||||
|
||||
if id !in repos {
|
||||
return app.not_found()
|
||||
}
|
||||
|
||||
repos.delete(id)
|
||||
|
||||
lock app.git_mutex {
|
||||
git.write_repos(app.conf.repos_file, &repos) or { return app.server_error(500) }
|
||||
}
|
||||
app.db.delete_git_repo(id)
|
||||
|
||||
return app.json(http.Status.ok, new_response('Repo removed successfully.'))
|
||||
}
|
||||
|
||||
// patch_repo updates a repo's data with the given query params.
|
||||
['/api/repos/:id'; patch]
|
||||
fn (mut app App) patch_repo(id string) web.Result {
|
||||
fn (mut app App) patch_repo(id int) web.Result {
|
||||
if !app.is_authorized() {
|
||||
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
|
||||
}
|
||||
|
||||
mut repos := rlock app.git_mutex {
|
||||
git.read_repos(app.conf.repos_file) or {
|
||||
app.lerror('Failed to read repos file.')
|
||||
app.db.update_git_repo(id, app.query)
|
||||
|
||||
return app.status(http.Status.internal_server_error)
|
||||
}
|
||||
}
|
||||
if 'arch' in app.query {
|
||||
arch_objs := app.query['arch'].split(',').map(db.GitRepoArch{ value: it })
|
||||
|
||||
if id !in repos {
|
||||
return app.not_found()
|
||||
}
|
||||
|
||||
repos[id].patch_from_params(app.query)
|
||||
|
||||
lock app.git_mutex {
|
||||
git.write_repos(app.conf.repos_file, &repos) or { return app.server_error(500) }
|
||||
app.db.update_git_repo_archs(id, arch_objs)
|
||||
}
|
||||
|
||||
return app.json(http.Status.ok, new_response('Repo updated successfully.'))
|
||||
|
|
|
@ -16,6 +16,9 @@ pub fn (mut app App) healthcheck() web.Result {
|
|||
return app.json(http.Status.ok, new_response('Healthy.'))
|
||||
}
|
||||
|
||||
// get_repo_file handles all Pacman-related routes. It returns both the
|
||||
// repository's archives, but also package archives or the contents of a
|
||||
// package's desc file.
|
||||
['/:repo/:arch/:filename'; get; head]
|
||||
fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Result {
|
||||
mut full_path := ''
|
||||
|
@ -54,6 +57,7 @@ fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Re
|
|||
return app.file(full_path)
|
||||
}
|
||||
|
||||
// put_package handles publishing a package to a repository.
|
||||
['/:repo/publish'; post]
|
||||
fn (mut app App) put_package(repo string) web.Result {
|
||||
if !app.is_authorized() {
|
||||
|
@ -64,7 +68,7 @@ fn (mut app App) put_package(repo string) web.Result {
|
|||
|
||||
if length := app.req.header.get(.content_length) {
|
||||
// Generate a random filename for the temp file
|
||||
pkg_path = os.join_path_single(app.conf.download_dir, rand.uuid_v4())
|
||||
pkg_path = os.join_path_single(app.repo.pkg_dir, rand.uuid_v4())
|
||||
|
||||
app.ldebug("Uploading $length bytes (${util.pretty_bytes(length.int())}) to '$pkg_path'.")
|
||||
|
||||
|
@ -87,15 +91,15 @@ fn (mut app App) put_package(repo string) web.Result {
|
|||
}
|
||||
|
||||
res := app.repo.add_pkg_from_path(repo, pkg_path) or {
|
||||
app.lerror('Error while adding package: $err.msg')
|
||||
app.lerror('Error while adding package: $err.msg()')
|
||||
|
||||
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") }
|
||||
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg()") }
|
||||
|
||||
return app.json(http.Status.internal_server_error, new_response('Failed to add package.'))
|
||||
}
|
||||
|
||||
if !res.added {
|
||||
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") }
|
||||
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg()") }
|
||||
|
||||
app.lwarn("Duplicate package '$res.pkg.full_name()' in repo '$repo'.")
|
||||
|
||||
|
|
|
@ -5,8 +5,14 @@ import os
|
|||
import log
|
||||
import repo
|
||||
import util
|
||||
import db
|
||||
|
||||
const port = 8000
|
||||
const (
|
||||
port = 8000
|
||||
log_file_name = 'vieter.log'
|
||||
repo_dir_name = 'repos'
|
||||
db_file_name = 'vieter.sqlite'
|
||||
)
|
||||
|
||||
struct App {
|
||||
web.Context
|
||||
|
@ -14,8 +20,7 @@ pub:
|
|||
conf Config [required; web_global]
|
||||
pub mut:
|
||||
repo repo.RepoGroupManager [required; web_global]
|
||||
// This is used to claim the file lock on the repos file
|
||||
git_mutex shared util.Dummy
|
||||
db db.VieterDb
|
||||
}
|
||||
|
||||
// server starts the web server & starts listening for requests
|
||||
|
@ -30,11 +35,14 @@ pub fn server(conf Config) ? {
|
|||
util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
|
||||
}
|
||||
|
||||
os.mkdir_all(conf.data_dir) or { util.exit_with_message(1, 'Failed to create data directory.') }
|
||||
|
||||
mut logger := log.Log{
|
||||
level: log_level
|
||||
}
|
||||
|
||||
logger.set_full_logpath(conf.log_file)
|
||||
log_file := os.join_path_single(conf.data_dir, server.log_file_name)
|
||||
logger.set_full_logpath(log_file)
|
||||
logger.log_to_console_too()
|
||||
|
||||
defer {
|
||||
|
@ -43,19 +51,20 @@ pub fn server(conf Config) ? {
|
|||
logger.close()
|
||||
}
|
||||
|
||||
repo_dir := os.join_path_single(conf.data_dir, server.repo_dir_name)
|
||||
// This also creates the directories if needed
|
||||
repo := repo.new(conf.repos_dir, conf.pkg_dir, conf.default_arch) or {
|
||||
logger.error(err.msg)
|
||||
repo := repo.new(repo_dir, conf.pkg_dir, conf.default_arch) or {
|
||||
logger.error(err.msg())
|
||||
exit(1)
|
||||
}
|
||||
|
||||
os.mkdir_all(conf.download_dir) or {
|
||||
util.exit_with_message(1, 'Failed to create download directory.')
|
||||
}
|
||||
db_file := os.join_path_single(conf.data_dir, server.db_file_name)
|
||||
db := db.init(db_file) or { util.exit_with_message(1, 'Failed to initialize database.') }
|
||||
|
||||
web.run(&App{
|
||||
logger: logger
|
||||
conf: conf
|
||||
repo: repo
|
||||
db: db
|
||||
}, server.port)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? {
|
|||
file.close()
|
||||
}
|
||||
|
||||
mut buf := []byte{len: util.reader_buf_size}
|
||||
mut buf := []u8{len: util.reader_buf_size}
|
||||
mut bytes_left := length
|
||||
|
||||
// Repeat as long as the stream still has data
|
||||
|
@ -44,6 +44,7 @@ pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? {
|
|||
for to_write > 0 {
|
||||
// TODO don't just loop infinitely here
|
||||
bytes_written := file.write(buf[bytes_read - to_write..bytes_read]) or { continue }
|
||||
// file.flush()
|
||||
|
||||
to_write = to_write - bytes_written
|
||||
}
|
||||
|
@ -59,7 +60,7 @@ pub fn hash_file(path &string) ?(string, string) {
|
|||
mut sha256sum := sha256.new()
|
||||
|
||||
buf_size := int(1_000_000)
|
||||
mut buf := []byte{len: buf_size}
|
||||
mut buf := []u8{len: buf_size}
|
||||
mut bytes_left := os.file_size(path)
|
||||
|
||||
for bytes_left > 0 {
|
|
@ -47,6 +47,7 @@ fn parse_attrs(name string, attrs []string) ?([]http.Method, string) {
|
|||
return methods, path.to_lower()
|
||||
}
|
||||
|
||||
// Extracts query parameters from a URL.
|
||||
fn parse_query_from_url(url urllib.URL) map[string]string {
|
||||
mut query := map[string]string{}
|
||||
for v in url.query().data {
|
||||
|
@ -55,6 +56,7 @@ fn parse_query_from_url(url urllib.URL) map[string]string {
|
|||
return query
|
||||
}
|
||||
|
||||
// Extract form data from an HTTP request.
|
||||
fn parse_form_from_request(request http.Request) ?(map[string]string, map[string][]http.FileData) {
|
||||
mut form := map[string]string{}
|
||||
mut files := map[string][]http.FileData{}
|
||||
|
|
|
@ -249,7 +249,7 @@ pub fn (mut ctx Context) file(f_path string) Result {
|
|||
|
||||
// ext := os.file_ext(f_path)
|
||||
// data := os.read_file(f_path) or {
|
||||
// eprint(err.msg)
|
||||
// eprint(err.msg())
|
||||
// ctx.server_error(500)
|
||||
// return Result{}
|
||||
// }
|
||||
|
@ -267,7 +267,7 @@ pub fn (mut ctx Context) file(f_path string) Result {
|
|||
file_size := os.file_size(f_path)
|
||||
|
||||
file := os.open(f_path) or {
|
||||
eprintln(err.msg)
|
||||
eprintln(err.msg())
|
||||
ctx.server_error(500)
|
||||
return Result{}
|
||||
}
|
||||
|
@ -285,7 +285,7 @@ pub fn (mut ctx Context) file(f_path string) Result {
|
|||
resp.set_status(ctx.status)
|
||||
send_string(mut ctx.conn, resp.bytestr()) or { return Result{} }
|
||||
|
||||
mut buf := []byte{len: 1_000_000}
|
||||
mut buf := []u8{len: 1_000_000}
|
||||
mut bytes_left := file_size
|
||||
|
||||
// Repeat as long as the stream still has data
|
||||
|
@ -361,7 +361,7 @@ interface DbInterface {
|
|||
// run runs the app
|
||||
[manualfree]
|
||||
pub fn run<T>(global_app &T, port int) {
|
||||
mut l := net.listen_tcp(.ip6, ':$port') or { panic('failed to listen $err.code $err') }
|
||||
mut l := net.listen_tcp(.ip6, ':$port') or { panic('failed to listen $err.code() $err') }
|
||||
|
||||
// Parsing methods attributes
|
||||
mut routes := map[string]Route{}
|
||||
|
@ -393,7 +393,7 @@ pub fn run<T>(global_app &T, port int) {
|
|||
request_app.Context = global_app.Context // copy the context ref that contains static files map etc
|
||||
mut conn := l.accept() or {
|
||||
// failures should not panic
|
||||
eprintln('accept() failed with error: $err.msg')
|
||||
eprintln('accept() failed with error: $err.msg()')
|
||||
continue
|
||||
}
|
||||
go handle_conn<T>(mut conn, mut request_app, routes)
|
||||
|
|
10
vieter.toml
10
vieter.toml
|
@ -1,10 +1,14 @@
|
|||
# This file contains settings used during development
|
||||
api_key = "test"
|
||||
download_dir = "data/downloads"
|
||||
repos_dir = "data/repos"
|
||||
data_dir = "data"
|
||||
pkg_dir = "data/pkgs"
|
||||
log_level = "DEBUG"
|
||||
repos_file = "data/repos.json"
|
||||
default_arch = "x86_64"
|
||||
|
||||
address = "http://localhost:8000"
|
||||
|
||||
global_schedule = '* *'
|
||||
api_update_frequency = 2
|
||||
image_rebuild_frequency = 1
|
||||
max_concurrent_builds = 3
|
||||
|
||||
|
|
Loading…
Reference in New Issue