Compare commits

..

No commits in common. "dev" and "dev" have entirely different histories.
dev ... dev

119 changed files with 2250 additions and 3531 deletions

View File

@ -1,4 +0,0 @@
# To stay consistent with the V formatting style, we use tabs
UseTab: Always
IndentWidth: 4
TabWidth: 4

View File

@ -5,5 +5,6 @@ root = true
end_of_line = lf
insert_final_newline = true
[*.{v,c,h}]
[*.v]
# vfmt wants it :(
indent_style = tab

6
.gitignore vendored
View File

@ -1,4 +1,4 @@
vieter.c
*.c
/data/
# Build artifacts
@ -26,8 +26,4 @@ gdb.txt
# Generated docs
_docs/
docs/resources/_gen/
/man/
# VLS logs
vls.log

3
.gitmodules vendored
View File

@ -1,6 +1,3 @@
[submodule "docs/themes/hugo-book"]
path = docs/themes/hugo-book
url = https://github.com/alex-shpak/hugo-book
[submodule "src/libvieter"]
path = src/libvieter
url = https://git.rustybever.be/vieter-v/libvieter

View File

@ -9,8 +9,7 @@ skip_clone: true
pipeline:
build:
image: 'git.rustybever.be/vieter-v/vieter-builder'
pull: true
image: 'menci/archlinuxarm:base-devel'
commands:
# Add the vieter repository so we can use the compiler
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf

View File

@ -9,8 +9,7 @@ skip_clone: true
pipeline:
build:
image: 'git.rustybever.be/vieter-v/vieter-builder'
pull: true
image: 'menci/archlinuxarm:base-devel'
commands:
# Add the vieter repository so we can use the compiler
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf

View File

@ -1,6 +1,3 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
matrix:
PLATFORM:
- 'linux/amd64'
@ -10,7 +7,7 @@ platform: ${PLATFORM}
pipeline:
install-modules:
image: *vlang_image
image: 'chewingbever/vlang:latest'
pull: true
commands:
- export VMODULES=$PWD/.vmodules
@ -19,7 +16,7 @@ pipeline:
event: [push, pull_request]
debug:
image: *vlang_image
image: 'chewingbever/vlang:latest'
commands:
- export VMODULES=$PWD/.vmodules
- make
@ -29,7 +26,7 @@ pipeline:
exclude: [main]
prod:
image: *vlang_image
image: 'chewingbever/vlang:latest'
environment:
- LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3 -static
commands:
@ -47,7 +44,7 @@ pipeline:
event: [push, pull_request]
upload:
image: *vlang_image
image: 'chewingbever/vlang:latest'
secrets: [ s3_username, s3_password ]
commands:
# https://gist.github.com/JustinTimperio/7c7115f87b775618637d67ac911e595f
@ -57,7 +54,7 @@ pipeline:
- export OBJ_PATH="/vieter/commits/$CI_COMMIT_SHA/vieter-$(echo '${PLATFORM}' | sed 's:/:-:g')"
- export SIG_STRING="PUT\n\n$CONTENT_TYPE\n$DATE\n$OBJ_PATH"
- export SIGNATURE="$(echo -en $SIG_STRING | openssl dgst -sha1 -hmac $S3_PASSWORD -binary | base64)"
- export SIGNATURE="$(echo -en $SIG_STRING | openssl sha1 -hmac $S3_PASSWORD -binary | base64)"
- >
curl
--silent

View File

@ -1,6 +1,3 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
platform: 'linux/amd64'
branches:
exclude: [ main ]
@ -14,16 +11,15 @@ pipeline:
- make docs
api-docs:
image: *vlang_image
image: 'chewingbever/vlang:latest'
pull: true
group: 'generate'
commands:
- make api-docs
slate-docs:
image: 'slatedocs/slate:v2.13.0'
image: 'slatedocs/slate'
group: 'generate'
# Slate requires a specific directory to run in
commands:
- cd docs/api
- bundle exec middleman build --clean

View File

@ -1,6 +1,3 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
platform: 'linux/amd64'
branches: [ 'main' ]
depends_on:
@ -11,7 +8,7 @@ skip_clone: true
pipeline:
prepare:
image: *vlang_image
image: 'chewingbever/vlang:latest'
pull: true
secrets: [ s3_username, s3_password ]
commands:

View File

@ -0,0 +1,13 @@
# These checks already get performed on the feature branches
branches:
exclude: [ main ]
platform: 'linux/amd64'
pipeline:
lint:
image: 'chewingbever/vlang:latest'
pull: true
commands:
- make lint
when:
event: [ pull_request ]

View File

@ -1,6 +1,3 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
platform: 'linux/amd64'
branches:
exclude: [ main ]
@ -8,21 +5,15 @@ branches:
depends_on:
- build
skip_clone: true
pipeline:
install-modules:
image: *vlang_image
generate:
image: 'chewingbever/vlang:latest'
pull: true
commands:
- export VMODULES=$PWD/.vmodules
- 'cd src && v install'
generate:
image: *vlang_image
commands:
# - curl -o vieter -L "https://s3.rustybever.be/vieter/commits/$CI_COMMIT_SHA/vieter-linux-amd64"
# - chmod +x vieter
- export VMODULES=$PWD/.vmodules
- make
- curl -o vieter -L "https://s3.rustybever.be/vieter/commits/$CI_COMMIT_SHA/vieter-linux-amd64"
- chmod +x vieter
- ./vieter man man
- cd man

View File

@ -1,6 +1,3 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
matrix:
PLATFORM:
- 'linux/amd64'
@ -12,7 +9,7 @@ platform: ${PLATFORM}
pipeline:
install-modules:
image: *vlang_image
image: 'chewingbever/vlang:latest'
pull: true
commands:
- export VMODULES=$PWD/.vmodules
@ -21,7 +18,7 @@ pipeline:
event: [pull_request]
test:
image: *vlang_image
image: 'chewingbever/vlang:latest'
pull: true
commands:
- export VMODULES=$PWD/.vmodules

View File

@ -1,27 +0,0 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
# These checks already get performed on the feature branches
branches:
exclude: [ main ]
platform: 'linux/amd64'
pipeline:
# vfmt seems to get confused if these aren't present
install-modules:
image: *vlang_image
pull: true
commands:
- export VMODULES=$PWD/.vmodules
- 'cd src && v install'
when:
event: [pull_request]
lint:
image: *vlang_image
pull: true
commands:
- export VMODULES=$PWD/.vmodules
- make lint
when:
event: [pull_request]

View File

@ -7,78 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/vieter-v/vieter/src/branch/dev)
## [0.6.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.6.0)
### Added
* Metrics endpoint for Prometheus integration
* Search in list of targets using API & CLI
* Allow filtering targets by arch value
* Configurable global timeout for builds
### Changed
* Rewrote cron expression logic in C
* Updated codebase to V commit after 0.3.3
* Agents now use worker threads and no longer spawn a new thread for every
build
### Fixed
* Package upload now fails if TCP connection is closed before all bytes have
been received
### Removed
* Deprecated cron daemon
## [0.5.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0)
### Added
* CLI commands for removing packages, arch-repos & repositories
## [0.5.0-rc.2](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0-rc.2)
### Added
* API route for removing logs & accompanying CLI command
* Daemon for periodically removing old logs
* CLI flag to filter logs by specific exit codes
### Changed
* Use `--long-option` instead of `-long-option` for CLI
## [0.5.0-rc.1](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0-rc.1)
### Added
* Allow specifying subdirectory inside Git repository
* Added option to deploy using agent-server architecture instead of cron daemon
* Allow scheduling builds on the server from the CLI tool instead of building
them locally
* Allow force-building packages, meaning the build won't check if the
repository is already up to date
### Changed
* Migrated codebase to V 0.3.2
* Cron expression parser now uses bitfields instead of bool arrays
### Fixed
* Arch value for target is now properly set if not provided
* Allow NULL values for branch in database
* Endpoint for adding targets now returns the correct id
* CLI now correctly errors and doesn't error when sending requests
* Fixed possible infinite loop when removing old build images
* Check whether build image still exists before starting build
* Don't run makepkg `prepare()` function twice
* Don't buffer stdout in Docker containers
## [0.4.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.4.0)
### Added
* Server port can now be configured
@ -86,15 +14,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
URL to a PKGBUILD
* Targets with kind 'url' can provide a direct URL to a PKGBUILD instead of
providing a Git repository
* CLI commands for searching the AUR & directly adding packages
* HTTP routes for removing packages, arch-repos & repos
* All endpoints serving files now support HTTP byte range requests
* Better CLI UX
* When adding targets, the ID of the created target is returned
* The `-r` flag only shows raw data of action
* When adding a target, only ID is shown and not surrounding text
* Tabled output returns a tab-separated list (easy to script using
`cut`)
### Changed
@ -104,11 +23,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Branch name for 'git' targets is now optional; if not provided, the
repository will be cloned with the default branch
* Build containers now explicitely set the PATH variable
* Refactor of web framework
* API endpoints now return id of newly created entries
* Repo POST requests now return information on published package
* `api` can no longer be used as a repository name
* CLI client now allows setting values to an empty value
### Removed

View File

@ -1,4 +1,4 @@
FROM git.rustybever.be/chewing_bever/vlang:0.3.2 AS builder
FROM chewingbever/vlang:latest AS builder
ARG TARGETPLATFORM
ARG CI_COMMIT_SHA
@ -23,7 +23,6 @@ RUN if [ -n "${CI_COMMIT_SHA}" ]; then \
"https://s3.rustybever.be/vieter/commits/${CI_COMMIT_SHA}/vieter-$(echo "${TARGETPLATFORM}" | sed 's:/:-:g')" && \
chmod +x vieter ; \
else \
cd src && v install && cd .. && \
LDFLAGS='-lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3 -static' make prod && \
mv pvieter vieter ; \
fi

View File

@ -1,20 +1,16 @@
# =====CONFIG=====
SRC_DIR := src
SRCS != find '$(SRC_DIR)' -iname '*.v'
SOURCES != find '$(SRC_DIR)' -iname '*.v'
V_PATH ?= v
V := $(V_PATH) -showcc -gc boehm -d use_openssl -skip-unused
V := $(V_PATH) -showcc -gc boehm
all: vieter
# =====COMPILATION=====
.PHONY: libvieter
libvieter:
make -C '$(SRC_DIR)/libvieter' CFLAGS='-O3'
# Regular binary
vieter: $(SOURCES) libvieter
vieter: $(SOURCES)
$(V) -g -o vieter $(SRC_DIR)
# Debug build using gcc
@ -22,7 +18,7 @@ vieter: $(SOURCES) libvieter
# multi-threaded and causes issues when running vieter inside gdb.
.PHONY: debug
debug: dvieter
dvieter: $(SOURCES) libvieter
dvieter: $(SOURCES)
$(V_PATH) -showcc -keepc -cg -o dvieter $(SRC_DIR)
# Run the debug build inside gdb
@ -33,12 +29,12 @@ gdb: dvieter
# Optimised production build
.PHONY: prod
prod: pvieter
pvieter: $(SOURCES) libvieter
pvieter: $(SOURCES)
$(V) -o pvieter -prod $(SRC_DIR)
# Only generate C code
.PHONY: c
c: $(SOURCES) libvieter
c: $(SOURCES)
$(V) -o vieter.c $(SRC_DIR)
@ -71,7 +67,6 @@ man: vieter
# =====OTHER=====
# Linting
.PHONY: lint
lint:
$(V) fmt -verify $(SRC_DIR)
@ -79,33 +74,34 @@ lint:
$(V_PATH) missdoc -p $(SRC_DIR)
@ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ]
# Formatting
# Format the V codebase
.PHONY: fmt
fmt:
$(V) fmt -w $(SRC_DIR)
# Testing
.PHONY: test
test: libvieter
$(V) -g test $(SRC_DIR)
test:
$(V) test $(SRC_DIR)
# Build & patch the V compiler
.PHONY: v
v: v/v
v/v:
git clone --single-branch https://git.rustybever.be/vieter-v/v v
make -C v
# Cleaning
.PHONY: clean
clean:
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' 'docs/public'
make -C '$(SRC_DIR)/libvieter' clean
# =====EXPERIMENTAL=====
.PHONY: autofree
autofree: afvieter
afvieter: $(SOURCES)
$(V) -showcc -autofree -o afvieter $(SRC_DIR)
$(V_PATH) -showcc -autofree -o afvieter $(SRC_DIR)
.PHONY: skip-unused
skip-unused: suvieter
suvieter: $(SOURCES)
$(V) -skip-unused -o suvieter $(SRC_DIR)
$(V_PATH) -showcc -skip-unused -o suvieter $(SRC_DIR)

View File

@ -3,31 +3,21 @@
pkgbase='vieter'
pkgname='vieter'
pkgver='0.6.0'
pkgver='0.3.0'
pkgrel=1
pkgdesc="Lightweight Arch repository server & package build system"
pkgdesc="Vieter is a lightweight implementation of an Arch repository server."
depends=('glibc' 'openssl' 'libarchive' 'sqlite')
makedepends=('git' 'vieter-vlang')
makedepends=('git' 'vieter-v')
arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter-v/vieter'
license=('AGPL3')
source=(
"$pkgname::git+https://git.rustybever.be/vieter-v/vieter#tag=${pkgver//_/-}"
"libvieter::git+https://git.rustybever.be/vieter-v/libvieter"
)
md5sums=('SKIP' 'SKIP')
source=("$pkgname::git+https://git.rustybever.be/vieter-v/vieter#tag=${pkgver//_/-}")
md5sums=('SKIP')
prepare() {
cd "${pkgname}"
export VMODULES="$srcdir/.vmodules"
# Add the libvieter submodule
git submodule init
git config submodules.src/libvieter.url "${srcdir}/libvieter"
git -c protocol.file.allow=always submodule update
export VMODULES="${srcdir}/.vmodules"
cd src && v install
cd "$pkgname/src" && v install
}
build() {

View File

@ -5,43 +5,33 @@ pkgbase='vieter-git'
pkgname='vieter-git'
pkgver=0.2.0.r25.g20112b8
pkgrel=1
pkgdesc="Lightweight Arch repository server & package build system (development version)"
pkgdesc="Vieter is a lightweight implementation of an Arch repository server."
depends=('glibc' 'openssl' 'libarchive' 'sqlite')
makedepends=('git' 'vieter-vlang')
makedepends=('git' 'vieter-v')
arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter-v/vieter'
license=('AGPL3')
source=(
"${pkgname}::git+https://git.rustybever.be/vieter-v/vieter#branch=dev"
"libvieter::git+https://git.rustybever.be/vieter-v/libvieter"
)
md5sums=('SKIP' 'SKIP')
source=("$pkgname::git+https://git.rustybever.be/vieter-v/vieter#branch=dev")
md5sums=('SKIP')
provides=('vieter')
conflicts=('vieter')
pkgver() {
cd "${pkgname}"
cd "$pkgname"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
prepare() {
cd "${pkgname}"
export VMODULES="$srcdir/.vmodules"
# Add the libvieter submodule
git submodule init
git config submodules.src/libvieter.url "${srcdir}/libvieter"
git -c protocol.file.allow=always submodule update
export VMODULES="${srcdir}/.vmodules"
cd src && v install
cd "$pkgname/src" && v install
}
build() {
export VMODULES="${srcdir}/.vmodules"
export VMODULES="$srcdir/.vmodules"
cd "${pkgname}"
cd "$pkgname"
make prod
@ -52,9 +42,9 @@ build() {
}
package() {
install -dm755 "${pkgdir}/usr/bin"
install -Dm755 "${pkgname}/pvieter" "${pkgdir}/usr/bin/vieter"
install -dm755 "$pkgdir/usr/bin"
install -Dm755 "$pkgname/pvieter" "$pkgdir/usr/bin/vieter"
install -dm755 "${pkgdir}/usr/share/man/man1"
install -Dm644 "${pkgname}/man"/*.1 "${pkgdir}/usr/share/man/man1"
install -dm755 "$pkgdir/usr/share/man/man1"
install -Dm644 "$pkgname/man"/*.1 "$pkgdir/usr/share/man/man1"
}

View File

@ -21,8 +21,7 @@ quicker.
I chose [V](https://vlang.io/) as I've been very intrigued by this language for
a while now. I wanted a fast language that I could code while relaxing, without
having to exert too much mental effort & V seemed like the right choice for
that. Sadly, this didn't quite turn out the way I expected, but I'm sticking
with it anyways ;p
that.
## Features
@ -38,6 +37,7 @@ with it anyways ;p
Besides a V installer, Vieter also requires the following libraries to work:
* gc
* libarchive
* openssl
* sqlite3
@ -48,9 +48,15 @@ update`.
### Compiler
V is developed using a specific compiler commit that is usually updated
whenever a new version is released. Information on this can be found in the
[tools](https://git.rustybever.be/vieter-v/tools) repository.
Vieter compiles with the standard Vlang compiler. However, I do maintain a
[mirror](https://git.rustybever.be/vieter-v/v). This is to ensure my CI does
not break without reason, as I control when & how frequently the mirror is
updated to reflect the official repository.
If you encounter issues using the latest V compiler, try using my mirror
instead. `make v` will clone the repository & build the mirror. Afterwards,
prepending any make command with `V_PATH=v/v` tells make to use the locally
compiled mirror instead.
## Contributing

View File

@ -1,78 +0,0 @@
# Jobs
<aside class="notice">
All routes in this section require authentication.
</aside>
## Manually schedule a job
```shell
curl \
-H 'X-Api-Key: secret' \
https://example.com/api/v1/jobs/queue?target=10&force&arch=x86_64
```
Manually schedule a job on the server.
### HTTP Request
`POST /api/v1/jobs/queue`
### Query Parameters
Parameter | Description
--------- | -----------
target | Id of target to schedule build for
arch | Architecture to build on
force | Whether it's a forced build (true if present)
## Poll for new jobs
<aside class="warning">
This endpoint is used by the agents and should not be used manually. It's just
here for completeness. Requests to this endpoint modify the build queue,
meaning manual requests can cause builds to be skipped.
</aside>
```shell
curl \
-H 'X-Api-Key: secret' \
https://example.com/api/v1/jobs/poll?arch=x86_64&max=2
```
> JSON output format
```json
{
"message": "",
"data": [
{
"target_id": 1,
"kind": "git",
"url": "https://aur.archlinux.org/discord-ptb.git",
"branch": "master",
"path": "",
"repo": "bur",
"base_image": "archlinux:base-devel",
"force": true
}
]
}
```
Poll the server for new builds.
### HTTP Request
`GET /api/v1/jobs/poll`
### Query Parameters
Parameter | Description
--------- | -----------
arch | For which architecture to receive jobs
max | How many jobs to receive at most

View File

@ -112,21 +112,10 @@ id | ID of requested log
## Publish build log
> JSON output format
```json
{
"message": "",
"data": {
"id": 15
}
}
```
<aside class="warning">
This endpoint is used by the agents and should not be used manually unless you
know what you're doing. It's just here for completeness.
You should probably not use this endpoint, as it's used by the build system to
publish its logs.
</aside>
@ -149,24 +138,3 @@ target | id of target this build is for
### Request body
Plaintext contents of the build log.
## Remove a build log
```shell
curl \
-XDELETE \
-H 'X-Api-Key: secret' \
https://example.com/api/v1/logs/1
```
Remove a build log from the server.
### HTTP Request
`DELETE /api/v1/logs/:id`
### URL Parameters
Parameter | Description
--------- | -----------
id | id of log to remove

View File

@ -93,87 +93,3 @@ other already present arch-repos.
Parameter | Description
--------- | -----------
repo | Repository to publish package to
## Remove package from arch-repo
<aside class="notice">
This endpoint requests authentication.
</aside>
```shell
curl \
-H 'X-Api-Key: secret' \
-XDELETE \
https://example.com/vieter/x86_64/mike
```
This endpoint allows you to remove a package from a given arch-repo.
### HTTP Request
`DELETE /:repo/:arch/:pkg`
### URL Parameters
Parameter | Description
--------- | -----------
repo | Repository to delete package from
arch | Specific arch-repo to remove package from
pkg | Name of package to remove (without any version information)
## Remove arch-repo
<aside class="notice">
This endpoint requests authentication.
</aside>
```shell
curl \
-H 'X-Api-Key: secret' \
-XDELETE \
https://example.com/vieter/x86_64
```
This endpoint allows removing an entire arch-repo.
### HTTP Request
`DELETE /:repo/:arch`
### URL Parameters
Parameter | Description
--------- | -----------
repo | Repository to delete arch-repo from
arch | Specific architecture to remove
## Remove repo
<aside class="notice">
This endpoint requests authentication.
</aside>
```shell
curl \
-H 'X-Api-Key: secret' \
-XDELETE \
https://example.com/vieter
```
This endpoint allows removing an entire repo.
### HTTP Request
`DELETE /:repo`
### URL Parameters
Parameter | Description
--------- | -----------
repo | Repository to delete

View File

@ -27,7 +27,6 @@ curl \
"kind": "git",
"url": "https://aur.archlinux.org/discord-ptb.git",
"branch": "master",
"path" : "",
"repo": "bur",
"schedule": "",
"arch": [
@ -55,8 +54,6 @@ Parameter | Description
limit | Maximum amount of results to return.
offset | Offset of results.
repo | Limit results to targets that publish to the given repo.
query | Only return targets that have this substring in their URL, path or branch.
arch | Only return targets that publish to this arch.
## Get specific target
@ -76,9 +73,8 @@ curl \
"kind": "git",
"url": "https://aur.archlinux.org/discord-ptb.git",
"branch": "master",
"path": "",
"repo": "bur",
"schedule": "0 2",
"schedule": "0 3",
"arch": [
{
"id": 1,
@ -104,17 +100,6 @@ id | id of requested target
## Create a new target
> JSON output format
```json
{
"message": "",
"data": {
"id": 15
}
}
```
Create a new target with the given data.
### HTTP Request
@ -128,7 +113,6 @@ Parameter | Description
kind | Kind of target to add; one of 'git', 'url'.
url | URL of the Git repository.
branch | Branch of the Git repository.
path | Subdirectory inside Git repository to use.
repo | Vieter repository to publish built packages to.
schedule | Cron build schedule (syntax explained [here](https://rustybever.be/docs/vieter/usage/builds/schedule/))
arch | Comma-separated list of architectures to build package on.
@ -154,20 +138,12 @@ Parameter | Description
kind | Kind of target; one of 'git', 'url'.
url | URL of the Git repository.
branch | Branch of the Git repository.
path | Subdirectory inside Git repository to use.
repo | Vieter repository to publish built packages to.
schedule | Cron build schedule
arch | Comma-separated list of architectures to build package on.
## Remove a target
```shell
curl \
-XDELETE \
-H 'X-Api-Key: secret' \
https://example.com/api/v1/targets/1
```
Remove a target from the server.
### HTTP Request

View File

@ -11,7 +11,6 @@ includes:
- repository
- targets
- logs
- jobs
search: true

View File

@ -17,7 +17,7 @@ If a variable is both present in the config file & as an environment variable,
the value in the environment variable is used.
{{< hint info >}}
**Note**
**Note**
All environment variables can also be provided from a file by appending them
with `_FILE`. This for example allows you to provide the API key from a Docker
secrets file.
@ -32,11 +32,11 @@ configuration variable required for each command.
### `vieter server`
* `port`: HTTP port to run on
* Default: `8000`
* `log_level`: log verbosity level. Value should be one of `FATAL`, `ERROR`,
`WARN`, `INFO` or `DEBUG`.
* Default: `WARN`
* `log_file`: log file to write logs to.
* Default: `vieter.log` (in the current directory)
* `pkg_dir`: where Vieter should store the actual package archives.
* `data_dir`: where Vieter stores the repositories, log file & database.
* `api_key`: the API key to use when authenticating requests.
@ -44,26 +44,9 @@ configuration variable required for each command.
* Packages with architecture `any` are always added to this architecture.
This prevents the server from being confused when an `any` package is
published as the very first package for a repository.
* Targets added without an `arch` value use this value instead.
* `global_schedule`: build schedule for any target that does not have a
schedule defined. For information about this syntax, see
[here](/usage/builds/schedule).
* Default: `0 3` (3AM every night)
* `base_image`: Docker image to use when building a package. Any Pacman-based
distro image should work, as long as `/etc/pacman.conf` is used &
`base-devel` exists in the repositories. Make sure that the image supports
the architecture of your cron daemon.
* Default: `archlinux:base-devel` (only works on `x86_64`). If you require
`aarch64` support, consider using
[`menci/archlinuxarm:base-devel`](https://hub.docker.com/r/menci/archlinuxarm)
([GitHub](https://github.com/Menci/docker-archlinuxarm)). This is the
image used for the Vieter CI builds.
* `max_log_age`: maximum age of logs (in days). Logs older than this will get
cleaned by the log removal daemon. If set to zero, no logs are ever removed.
The age of logs is determined by the time the build was started.
* Default: `0`
* `log_removal_schedule`: cron schedule defining when to clean old logs.
* Default: `0 0` (every day at midnight)
* Git repositories added without an `arch` value use this value instead.
* `port`: HTTP port to run on
* Default: `8000`
### `vieter cron`
@ -114,25 +97,3 @@ configuration variable required for each command.
build`.
* Default: `archlinux:base-devel`
### `vieter agent`
* `log_level`: log verbosity level. Value should be one of `FATAL`, `ERROR`,
`WARN`, `INFO` or `DEBUG`.
* Default: `WARN`
* `address`: *public* URL of the Vieter repository server to build for. From
this server jobs are retrieved. All built packages are published to this
server.
* `api_key`: API key of the above server.
* `data_dir`: directory to store log file in.
* `max_concurrent_builds`: how many builds to run at the same time.
* Default: `1`
* `polling_frequency`: how often (in seconds) to poll the server for new
builds. Note that the agent might poll more frequently when it's actively
processing builds.
* `image_rebuild_frequency`: Vieter periodically builds images that are then
used as a basis for running build containers. This is to prevent each build
from downloading an entire repository worth of dependencies. This setting
defines how frequently (in minutes) to rebuild these images.
* Default: `1440` (every 24 hours)
* `arch`: architecture for which this agent should pull down builds (e.g.
`x86_64`)

View File

@ -21,17 +21,17 @@ branch. This branch will be the most up to date, but does not give any
guarantees about stability, so beware!
Thanks to the single-binary design of Vieter, this image can be used both for
the repository server, the cron daemon and the agent.
the repository server & the cron daemon.
Below is a minimal compose file to set up both the repository server & a build
agent:
Below is an example compose file to set up both the repository server & the
cron daemon:
```yaml
version: '3'
services:
server:
image: 'chewingbever/vieter:0.5.0-rc.1'
image: 'chewingbever/vieter:dev'
restart: 'always'
environment:
@ -41,19 +41,18 @@ services:
- 'data:/data'
cron:
image: 'chewingbever/vieter:0.5.0-rc.1'
image: 'chewingbever/vieter:dev'
restart: 'always'
# Required to connect to the Docker daemon
user: root
command: 'vieter agent'
command: 'vieter cron'
environment:
- 'VIETER_API_KEY=secret'
# MUST be public URL of Vieter repository
- 'VIETER_ADDRESS=https://example.com'
# Architecture for which the agent builds
- 'VIETER_ARCH=x86_64'
- 'VIETER_DEFAULT_ARCH=x86_64'
- 'VIETER_MAX_CONCURRENT_BUILDS=2'
- 'VIETER_GLOBAL_SCHEDULE=0 3'
volumes:
- '/var/run/docker.sock:/var/run/docker.sock'
@ -64,23 +63,20 @@ volumes:
If you do not require the build system, the repository server can be used
independently as well.
Of course, Vieter allows a lot more configuration than this. This compose file
is meant as a starting point for setting up your installation.
{{< hint info >}}
**Note**
Builds are executed on the agent's system using the host's Docker daemon. An
agent for a specific `arch` will only build packages for that specific
architecture. Therefore, if you wish to build packages for both `x86_64` &
`aarch64`, you'll have to deploy two agents, one on each architecture.
Afterwards, any Git repositories enabled for those two architectures will build
on both.
Builds are executed on the cron daemon's system using the host's Docker daemon.
A cron daemon on a specific architecture will only build packages for that
specific architecture. Therefore, if you wish to build packages for both
`x86_64` & `aarch64`, you'll have to deploy two cron daemons, one on each
architecture. Afterwards, any Git repositories enabled for those two
architectures will build on both.
{{< /hint >}}
## Binary
On the
[releases](https://git.rustybever.be/vieter-v/vieter/releases)
[releases](https://git.rustybever.be/vieter/vieter/releases)
page, you can find statically compiled binaries for all
released versions. This is the same binary as used inside
the Docker images.
@ -103,12 +99,12 @@ latest official release or `vieter-git` for the latest development release.
### AUR
If you prefer building the packages locally (or on your own Vieter instance),
there's the [`vieter`](https://aur.archlinux.org/packages/vieter) &
[`vieter-git`](https://aur.archlinux.org/packages/vieter-git) packages on the
AUR. These packages build using the `vlang` compiler package, so I can't
there's the `[vieter](https://aur.archlinux.org/packages/vieter)` &
`[vieter-git](https://aur.archlinux.org/packages/vieter-git)` packages on the
AUR. These packages build using the `vlang-git` compiler package, so I can't
guarantee that a compiler update won't temporarily break them.
## Building from source
The project [README](https://git.rustybever.be/vieter-v/vieter#building)
contains instructions for building Vieter from source.
The project [README](https://git.rustybever.be/vieter/vieter#building) contains
instructions for building Vieter from source.

View File

@ -0,0 +1,3 @@
---
weight: 100
---

View File

@ -0,0 +1,81 @@
# Builds In-depth
For those interested, this page describes how the build system works
internally.
## Builder image
Every cron daemon perodically creates a builder image that is then used as a
base for all builds. This is done to prevent build containers having to pull
down a bunch of updates when they update their system.
The build container is created by running the following commands inside a
container started from the image defined in `base_image`:
```sh
# Update repos & install required packages
pacman -Syu --needed --noconfirm base-devel git
# Add a non-root user to run makepkg
groupadd -g 1000 builder
useradd -mg builder builder
# Make sure they can use sudo without a password
echo 'builder ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
# Create the directory for the builds & make it writeable for the
# build user
mkdir /build
chown -R builder:builder /build
```
This script updates the packages to their latest versions & creates a non-root
user to use when running `makepkg`.
This script is base64-encoded & passed to the container as an environment
variable. The container's entrypoint is set to `/bin/sh -c` & its command
argument to `echo $BUILD_SCRIPT | base64 -d | /bin/sh -e`, with the
`BUILD_SCRIPT` environment variable containing the base64-encoded script.
Once the container exits, a new Docker image is created from it. This image is
then used as the base for any builds.
## Running builds
Each build has its own Docker container, using the builder image as its base.
The same base64-based technique as above is used, just with a different script.
To make the build logs more clear, each command is appended by an echo command
printing the next command to stdout.
Given the Git repository URL is `https://examplerepo.com` with branch `main`,
the URL of the Vieter server is `https://example.com` and `vieter` is the
repository we wish to publish to, we get the following script:
```sh
echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf'
echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
echo -e '+ pacman -Syu --needed --noconfirm'
pacman -Syu --needed --noconfirm
echo -e '+ su builder'
su builder
echo -e '+ git clone --single-branch --depth 1 --branch main https://examplerepo.com repo'
git clone --single-branch --depth 1 --branch main https://examplerepo.com repo
echo -e '+ cd repo'
cd repo
echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm'
makepkg --nobuild --syncdeps --needed --noconfirm
echo -e '+ source PKGBUILD'
source PKGBUILD
echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0'
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
```
This script:
1. Adds the target repository as a repository in the build container
2. Updates mirrors & packages
3. Clones the Git repository
4. Runs `makepkg` without building to calculate `pkgver`
5. Checks whether the package version is already present on the server
6. If not, run `makepkg` & publish any generated package archives to the server

View File

@ -1,23 +0,0 @@
---
weight: 20
---
# Cleanup
Vieter stores the logs of every single package build. While this is great for
debugging why builds fail, it also causes an active or long-running Vieter
instance to accumulate thousands of logs.
To combat this, a log removal daemon can be enabled that periodically removes
old build logs. By starting your server with the `max_log_age` variable (see
[Configuration](/configuration#vieter-server)), a daemon will get enabled that
periodically removes logs older than this setting. By default, this will happen
every day at midnight, but this behavior can be changed using the
`log_removal_schedule` variable.
{{< hint info >}}
**Note**
The daemon will always run a removal of logs on startup. Therefore, it's
possible the daemon will be *very* active when first enabling this setting.
After the initial surge of logs to remove, it'll calm down again.
{{< /hint >}}

View File

@ -1,7 +1,3 @@
---
weight: 10
---
# Cron schedule syntax
The Vieter cron daemon uses a subset of the cron expression syntax to schedule
@ -41,6 +37,6 @@ Each section can consist of as many of these parts as necessary.
## CLI tool
The Vieter binary contains a command that shows you the next matching times for
a given expression. This can be useful for understanding the syntax. For more
a given expression. This can be useful to understand the syntax. For more
information, see
[vieter-schedule(1)](https://rustybever.be/man/vieter/vieter-schedule.1.html).

View File

@ -1,27 +0,0 @@
module agent
import log
import os
import util
const log_file_name = 'vieter.agent.log'
// agent starts an agent service
pub fn agent(conf Config) ! {
log_level := log.level_from_tag(conf.log_level) or {
return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
}
mut logger := log.Log{
level: log_level
}
os.mkdir_all(conf.data_dir) or { util.exit_with_message(1, 'Failed to create data directory.') }
log_file := os.join_path_single(conf.data_dir, agent.log_file_name)
logger.set_full_logpath(log_file)
logger.log_to_console_too()
mut d := agent_init(logger, conf)
d.run()
}

View File

@ -1,31 +0,0 @@
module agent
import cli
import conf as vconf
struct Config {
pub:
log_level string = 'WARN'
// Architecture that the agent represents
arch string
api_key string
address string
data_dir string
max_concurrent_builds int = 1
polling_frequency int = 30
image_rebuild_frequency int = 1440
}
// cmd returns the cli module that handles the cron daemon.
pub fn cmd() cli.Command {
return cli.Command{
name: 'agent'
description: 'Start an agent daemon.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
agent(conf_)!
}
}
}

View File

@ -1,197 +0,0 @@
module agent
import log
import sync.stdatomic
import build
import models { BuildConfig }
import client
import time
import os
const (
build_empty = 0
build_running = 1
build_done = 2
)
struct AgentDaemon {
logger shared log.Log
conf Config
client client.Client
mut:
images ImageManager
// Atomic variables used to detect when a build has finished; length is
// conf.max_concurrent_builds. This approach is used as the difference
// between a recently finished build and an empty build slot is important
// for knowing whether the agent is currently "active".
atomics []u64
// Channel used to send builds to worker threads
build_channel chan BuildConfig
}
// agent_init initializes a new agent
fn agent_init(logger log.Log, conf Config) AgentDaemon {
mut d := AgentDaemon{
logger: logger
client: client.new(conf.address, conf.api_key)
conf: conf
images: new_image_manager(conf.image_rebuild_frequency * 60)
atomics: []u64{len: conf.max_concurrent_builds}
build_channel: chan BuildConfig{cap: conf.max_concurrent_builds}
}
return d
}
// run starts the actual agent daemon. This function will run forever.
pub fn (mut d AgentDaemon) run() {
// Spawn worker threads
for builder_index in 0 .. d.conf.max_concurrent_builds {
spawn d.builder_thread(d.build_channel, builder_index)
}
// This is just so that the very first time the loop is ran, the jobs are
// always polled
mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency)
mut sleep_time := 0 * time.second
mut finished, mut empty, mut running := 0, 0, 0
for {
if sleep_time > 0 {
d.ldebug('Sleeping for ${sleep_time}')
time.sleep(sleep_time)
}
finished, empty = d.update_atomics()
running = d.conf.max_concurrent_builds - finished - empty
// No new finished builds and no free slots, so there's nothing to be
// done
if finished + empty == 0 {
sleep_time = 1 * time.second
continue
}
// Builds have finished, so old builder images might have freed up.
// TODO this might query the docker daemon too frequently.
if finished > 0 {
d.images.clean_old_images()
}
// The agent will always poll for new jobs after at most
// `polling_frequency` seconds. However, when jobs have finished, the
// agent will also poll for new jobs. This is because jobs are often
// clustered together (especially when mostly using the global cron
// schedule), so there's a much higher chance jobs are available.
if finished > 0 || time.now() >= last_poll_time.add_seconds(d.conf.polling_frequency) {
d.ldebug('Polling for new jobs')
new_configs := d.client.poll_jobs(d.conf.arch, finished + empty) or {
d.lerror('Failed to poll jobs: ${err.msg()}')
// TODO pick a better delay here
sleep_time = 5 * time.second
continue
}
d.ldebug('Received ${new_configs.len} jobs')
last_poll_time = time.now()
for config in new_configs {
// Make sure a recent build base image is available for
// building the config
if !d.images.up_to_date(config.base_image) {
d.linfo('Building builder image from base image ${config.base_image}')
// TODO handle this better than to just skip the config
d.images.refresh_image(config.base_image) or {
d.lerror(err.msg())
continue
}
}
// It's technically still possible that the build image is
// removed in the very short period between building the
// builder image and starting a build container with it. If
// this happens, fate really just didn't want you to do this
// build.
d.build_channel <- config
running++
}
}
// The agent is not doing anything, so we just wait until the next poll
// time
if running == 0 {
sleep_time = last_poll_time.add_seconds(d.conf.polling_frequency) - time.now()
} else {
sleep_time = 1 * time.second
}
}
}
// update_atomics checks for each build whether it's completed, and sets it to
// empty again if so. The return value is a tuple `(finished, empty)` where
// `finished` is how many builds were just finished and thus set to empty, and
// `empty` is how many build slots were already empty. The amount of running
// builds can then be calculated by substracting these two values from the
// total allowed concurrent builds.
fn (mut d AgentDaemon) update_atomics() (int, int) {
mut finished := 0
mut empty := 0
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == agent.build_done {
stdatomic.store_u64(&d.atomics[i], agent.build_empty)
finished++
} else if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty {
empty++
}
}
return finished, empty
}
// run_build actually starts the build process for a given target.
fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
d.linfo('started build: ${config}')
// 0 means success, 1 means failure
mut status := 0
new_config := BuildConfig{
...config
base_image: d.images.get(config.base_image)
}
res := build.build_config(d.client.address, d.client.api_key, new_config) or {
d.ldebug('build_config error: ${err.msg()}')
status = 1
build.BuildResult{}
}
if status == 0 {
d.linfo('Uploading build logs for ${config}')
// TODO use the arch value here
build_arch := os.uname().machine
d.client.add_build_log(config.target_id, res.start_time, res.end_time, build_arch,
res.exit_code, res.logs) or { d.lerror('Failed to upload logs for ${config}') }
} else {
d.lwarn('an error occurred during build: ${config}')
}
stdatomic.store_u64(&d.atomics[build_index], agent.build_done)
}
// builder_thread is a thread that constantly listens for builds to process
fn (mut d AgentDaemon) builder_thread(ch chan BuildConfig, builder_index int) {
for {
build_config := <-ch or { break }
d.run_build(builder_index, build_config)
}
}

View File

@ -1,119 +0,0 @@
module agent
import time
import docker
import build
// An ImageManager is a utility that creates builder images from given base
// images, updating these builder images if they've become too old. This
// structure can manage images from any number of base images, paving the way
// for configurable base images per target/repository.
struct ImageManager {
max_image_age int [required]
mut:
// For each base image, one or more builder images can exist at the same
// time
images map[string][]string [required]
// For each base image, we track when its newest image was built
timestamps map[string]time.Time [required]
}
// new_image_manager initializes a new image manager.
fn new_image_manager(max_image_age int) ImageManager {
return ImageManager{
max_image_age: max_image_age
images: map[string][]string{}
timestamps: map[string]time.Time{}
}
}
// get returns the name of the newest image for the given base image. Note that
// this function should only be called *after* a first call to `refresh_image`.
pub fn (m &ImageManager) get(base_image string) string {
return m.images[base_image].last()
}
// up_to_date returns true if the last known builder image exists and is up to
// date. If this function returns true, the last builder image may be used to
// perform a build.
pub fn (mut m ImageManager) up_to_date(base_image string) bool {
if base_image !in m.timestamps
|| m.timestamps[base_image].add_seconds(m.max_image_age) <= time.now() {
return false
}
// It's possible the image has been removed by some external event, so we
// check whether it actually exists as well.
mut dd := docker.new_conn() or { return false }
defer {
dd.close() or {}
}
dd.image_inspect(m.images[base_image].last()) or {
// Image doesn't exist, so we stop tracking it
if err.code() == 404 {
m.images[base_image].delete_last()
m.timestamps.delete(base_image)
}
// If the inspect fails, it's either because the image doesn't exist or
// because of some other error. Either way, we can't know *for certain*
// that the image exists, so we return false.
return false
}
return true
}
// refresh_image builds a new builder image from the given base image. This
// function should only be called if `up_to_date` returned false.
fn (mut m ImageManager) refresh_image(base_image string) ! {
// TODO use better image tags for built images
new_image := build.create_build_image(base_image) or {
return error('Failed to build builder image from base image ${base_image}')
}
m.images[base_image] << new_image
m.timestamps[base_image] = time.now()
}
// clean_old_images removes all older builder images that are no longer in use.
// The function will always leave at least one builder image, namely the newest
// one.
fn (mut m ImageManager) clean_old_images() {
mut dd := docker.new_conn() or { return }
defer {
dd.close() or {}
}
mut i := 0
for image in m.images.keys() {
i = 0
for i < m.images[image].len - 1 {
// For each builder image, we try to remove it by calling the Docker
// API. If the function returns an error or false, that means the image
// wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't
// move the index.
dd.image_remove(m.images[image][i]) or {
// The image was removed by an external event
if err.code() == 404 {
m.images[image].delete(i)
}
// The image couldn't be removed, so we need to keep track of
// it
else {
i += 1
}
continue
}
m.images[image].delete(i)
}
}
}

View File

@ -1,36 +0,0 @@
module agent
// lfatal create a log message with the fatal level
pub fn (mut d AgentDaemon) lfatal(msg string) {
lock d.logger {
d.logger.fatal(msg)
}
}
// lerror create a log message with the error level
pub fn (mut d AgentDaemon) lerror(msg string) {
lock d.logger {
d.logger.error(msg)
}
}
// lwarn create a log message with the warn level
pub fn (mut d AgentDaemon) lwarn(msg string) {
lock d.logger {
d.logger.warn(msg)
}
}
// linfo create a log message with the info level
pub fn (mut d AgentDaemon) linfo(msg string) {
lock d.logger {
d.logger.info(msg)
}
}
// ldebug create a log message with the debug level
pub fn (mut d AgentDaemon) ldebug(msg string) {
lock d.logger {
d.logger.debug(msg)
}
}

View File

@ -1,12 +1,12 @@
module build
import docker
import vieter_v.docker
import encoding.base64
import time
import os
import strings
import util
import models { BuildConfig, Target }
import models { Target }
const (
container_build_dir = '/build'
@ -21,8 +21,8 @@ const (
// system, install some necessary packages & creates a non-root user to run
// makepkg with. The base image should be some Linux distribution that uses
// Pacman as its package manager.
pub fn create_build_image(base_image string) !string {
mut dd := docker.new_conn()!
pub fn create_build_image(base_image string) ?string {
mut dd := docker.new_conn()?
defer {
dd.close() or {}
@ -45,7 +45,7 @@ pub fn create_build_image(base_image string) !string {
c := docker.NewContainer{
image: base_image
env: ['BUILD_SCRIPT=${cmds_str}']
env: ['BUILD_SCRIPT=$cmds_str']
entrypoint: ['/bin/sh', '-c']
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/sh -e']
}
@ -57,15 +57,15 @@ pub fn create_build_image(base_image string) !string {
image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' }
// We pull the provided image
dd.image_pull(image_name, image_tag)!
dd.pull_image(image_name, image_tag)?
id := dd.container_create(c)!.id
// id := docker.create_container(c)!
dd.container_start(id)!
id := dd.container_create(c)?.id
// id := docker.create_container(c)?
dd.container_start(id)?
// This loop waits until the container has stopped, so we can remove it after
for {
data := dd.container_inspect(id)!
data := dd.container_inspect(id)?
if !data.state.running {
break
@ -79,8 +79,8 @@ pub fn create_build_image(base_image string) !string {
// TODO also add the base image's name into the image name to prevent
// conflicts.
tag := time.sys_mono_now().str()
image := dd.image_from_container(id, 'vieter-build', tag)!
dd.container_remove(id)!
image := dd.create_image_from_container(id, 'vieter-build', tag)?
dd.container_remove(id)?
return image.id
}
@ -93,35 +93,28 @@ pub:
logs string
}
// build_target builds the given target. Internally it calls `build_config`.
pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool, timeout int) !BuildResult {
config := target.as_build_config(base_image_id, force, timeout)
return build_config(address, api_key, config)
}
// build_config builds, packages & publishes a given Arch package based on the
// build_target builds, packages & publishes a given Arch package based on the
// provided target. The base image ID should be of an image previously created
// by create_build_image. It returns the logs of the container.
pub fn build_config(address string, api_key string, config BuildConfig) !BuildResult {
mut dd := docker.new_conn()!
pub fn build_target(address string, api_key string, base_image_id string, target &Target) ?BuildResult {
mut dd := docker.new_conn()?
defer {
dd.close() or {}
}
build_arch := os.uname().machine
build_script := create_build_script(address, config, build_arch)
build_script := create_build_script(address, target, build_arch)
// We convert the build script into a base64 string, which then gets passed
// to the container as an env var
base64_script := base64.encode_str(build_script)
c := docker.NewContainer{
image: '${config.base_image}'
image: '$base_image_id'
env: [
'BUILD_SCRIPT=${base64_script}',
'API_KEY=${api_key}',
'BUILD_SCRIPT=$base64_script',
'API_KEY=$api_key',
// `archlinux:base-devel` does not correctly set the path variable,
// causing certain builds to fail. This fixes it.
'PATH=${build.path_dirs.join(':')}',
@ -132,33 +125,25 @@ pub fn build_config(address string, api_key string, config BuildConfig) !BuildRe
user: '0:0'
}
id := dd.container_create(c)!.id
dd.container_start(id)!
id := dd.container_create(c)?.id
dd.container_start(id)?
mut data := dd.container_inspect(id)!
start_time := time.now()
mut data := dd.container_inspect(id)?
// This loop waits until the container has stopped, so we can remove it after
for data.state.running {
if time.now() - start_time > config.timeout * time.second {
dd.container_kill(id)!
dd.container_remove(id)!
return error('Build killed due to timeout (${config.timeout}s)')
}
time.sleep(1 * time.second)
data = dd.container_inspect(id)!
data = dd.container_inspect(id)?
}
mut logs_stream := dd.container_get_logs(id)!
mut logs_stream := dd.container_get_logs(id)?
// Read in the entire stream
mut logs_builder := strings.new_builder(10 * 1024)
util.reader_to_writer(mut logs_stream, mut logs_builder)!
util.reader_to_writer(mut logs_stream, mut logs_builder)?
dd.container_remove(id)!
dd.container_remove(id)?
return BuildResult{
start_time: data.state.start_time

View File

@ -16,5 +16,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -16,5 +16,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -18,5 +18,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -1,219 +0,0 @@
module build
import models { BuildConfig, Target }
import cron
import time
import datatypes { MinHeap }
import util
struct BuildJob {
pub mut:
// Time at which this build job was created/queued
created time.Time
// Next timestamp from which point this job is allowed to be executed
timestamp time.Time
// Required for calculating next timestamp after having pop'ed a job
ce &cron.Expression = unsafe { nil }
// Actual build config sent to the agent
config BuildConfig
// Whether this is a one-time job
single bool
}
// Allows BuildJob structs to be sorted according to their timestamp in
// MinHeaps
fn (r1 BuildJob) < (r2 BuildJob) bool {
return r1.timestamp < r2.timestamp
}
// The build job queue is responsible for managing the list of scheduled builds
// for each architecture. Agents receive jobs from this queue.
pub struct BuildJobQueue {
// Schedule to use for targets without explicitely defined cron expression
default_schedule &cron.Expression
// Base image to use for targets without defined base image
default_base_image string
// After how many minutes a build should be forcefully cancelled
default_build_timeout int
mut:
mutex shared util.Dummy
// For each architecture, a priority queue is tracked
queues map[string]MinHeap[BuildJob]
// When a target is removed from the server or edited, its previous build
// configs will be invalid. This map allows for those to be simply skipped
// by ignoring any build configs created before this timestamp.
invalidated map[int]time.Time
}
// new_job_queue initializes a new job queue
pub fn new_job_queue(default_schedule &cron.Expression, default_base_image string, default_build_timeout int) BuildJobQueue {
return BuildJobQueue{
default_schedule: unsafe { default_schedule }
default_base_image: default_base_image
default_build_timeout: default_build_timeout
invalidated: map[int]time.Time{}
}
}
// insert_all executes insert for each architecture of the given Target.
pub fn (mut q BuildJobQueue) insert_all(target Target) ! {
for arch in target.arch {
q.insert(target: target, arch: arch.value)!
}
}
[params]
pub struct InsertConfig {
target Target [required]
arch string [required]
single bool
force bool
now bool
}
// insert a new target's job into the queue for the given architecture. This
// job will then be endlessly rescheduled after being pop'ed, unless removed
// explicitely.
pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! {
lock q.mutex {
if input.arch !in q.queues {
q.queues[input.arch] = MinHeap[BuildJob]{}
}
mut job := BuildJob{
created: time.now()
single: input.single
config: input.target.as_build_config(q.default_base_image, input.force, q.default_build_timeout)
}
if !input.now {
ce := if input.target.schedule != '' {
cron.parse_expression(input.target.schedule) or {
return error("Error while parsing cron expression '${input.target.schedule}' (id ${input.target.id}): ${err.msg()}")
}
} else {
q.default_schedule
}
job.timestamp = ce.next_from_now()
job.ce = ce
} else {
job.timestamp = time.now()
}
q.queues[input.arch].insert(job)
}
}
// reschedule the given job by calculating the next timestamp and re-adding it
// to its respective queue. This function is called by the pop functions
// *after* having pop'ed the job.
fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) {
new_timestamp := job.ce.next_from_now()
new_job := BuildJob{
...job
created: time.now()
timestamp: new_timestamp
}
q.queues[arch].insert(new_job)
}
// pop_invalid pops all invalid jobs.
fn (mut q BuildJobQueue) pop_invalid(arch string) {
for {
job := q.queues[arch].peek() or { return }
if job.config.target_id in q.invalidated
&& job.created < q.invalidated[job.config.target_id] {
// This pop *should* never fail according to the source code
q.queues[arch].pop() or {}
} else {
break
}
}
}
// peek shows the first job for the given architecture that's ready to be
// executed, if present.
pub fn (mut q BuildJobQueue) peek(arch string) ?BuildJob {
// Even peek requires a write lock, because pop_invalid can modify the data
// structure
lock q.mutex {
if arch !in q.queues {
return none
}
q.pop_invalid(arch)
job := q.queues[arch].peek() or { return none }
if job.timestamp < time.now() {
return job
}
}
return none
}
// pop removes the first job for the given architecture that's ready to be
// executed from the queue and returns it, if present.
pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob {
lock q.mutex {
if arch !in q.queues {
return none
}
q.pop_invalid(arch)
mut job := q.queues[arch].peek() or { return none }
if job.timestamp < time.now() {
job = q.queues[arch].pop() or { return none }
if !job.single {
q.reschedule(job, arch)
}
return job
}
}
return none
}
// pop_n tries to pop at most n available jobs for the given architecture.
pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob {
lock q.mutex {
if arch !in q.queues {
return []
}
mut out := []BuildJob{}
for out.len < n {
q.pop_invalid(arch)
mut job := q.queues[arch].peek() or { break }
if job.timestamp < time.now() {
job = q.queues[arch].pop() or { break }
if !job.single {
q.reschedule(job, arch)
}
out << job
} else {
break
}
}
return out
}
return []
}
// invalidate a target's old build jobs.
pub fn (mut q BuildJobQueue) invalidate(target_id int) {
q.invalidated[target_id] = time.now()
}

View File

@ -1,20 +0,0 @@
echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf'
echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
echo -e '+ pacman -Syu --needed --noconfirm'
pacman -Syu --needed --noconfirm
echo -e '+ su builder'
su builder
echo -e '+ git clone --single-branch --depth 1 '\''https://examplerepo.com'\'' repo'
git clone --single-branch --depth 1 'https://examplerepo.com' repo
echo -e '+ cd '\''repo/example/path'\'''
cd 'repo/example/path'
echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm'
makepkg --nobuild --syncdeps --needed --noconfirm
echo -e '+ source PKGBUILD'
source PKGBUILD
echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0'
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -1,20 +0,0 @@
echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf'
echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
echo -e '+ pacman -Syu --needed --noconfirm'
pacman -Syu --needed --noconfirm
echo -e '+ su builder'
su builder
echo -e '+ git clone --single-branch --depth 1 '\''https://examplerepo.com'\'' repo'
git clone --single-branch --depth 1 'https://examplerepo.com' repo
echo -e '+ cd '\''repo/example/path with spaces'\'''
cd 'repo/example/path with spaces'
echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm'
makepkg --nobuild --syncdeps --needed --noconfirm
echo -e '+ source PKGBUILD'
source PKGBUILD
echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0'
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -1,6 +1,6 @@
module build
import models { BuildConfig }
import models { Target }
// escape_shell_string escapes any characters that could be interpreted
// incorrectly by a shell. The resulting value should be safe to use inside an
@ -23,13 +23,13 @@ pub fn echo_commands(cmds []string) []string {
}
// create_build_script generates a shell script that builds a given Target.
fn create_build_script(address string, config BuildConfig, build_arch string) string {
repo_url := '${address}/${config.repo}'
fn create_build_script(address string, target &Target, build_arch string) string {
repo_url := '$address/$target.repo'
mut commands := [
// This will later be replaced by a proper setting for changing the
// mirrorlist
"echo -e '[${config.repo}]\\nServer = ${address}/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
"echo -e '[$target.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
// We need to update the package list of the repo we just added above.
// This should however not pull in a lot of packages as long as the
// builder image is rebuilt frequently.
@ -38,22 +38,22 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
'su builder',
]
commands << match config.kind {
commands << match target.kind {
'git' {
if config.branch == '' {
if target.branch == '' {
[
"git clone --single-branch --depth 1 '${config.url}' repo",
"git clone --single-branch --depth 1 '$target.url' repo",
]
} else {
[
"git clone --single-branch --depth 1 --branch ${config.branch} '${config.url}' repo",
"git clone --single-branch --depth 1 --branch $target.branch '$target.url' repo",
]
}
}
'url' {
[
'mkdir repo',
"curl -o repo/PKGBUILD -L '${config.url}'",
"curl -o repo/PKGBUILD -L '$target.url'",
]
}
else {
@ -61,32 +61,19 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
}
}
commands << if config.path != '' {
"cd 'repo/${config.path}'"
} else {
'cd repo'
}
commands << [
'cd repo',
'makepkg --nobuild --syncdeps --needed --noconfirm',
'source PKGBUILD',
]
if !config.force {
// The build container checks whether the package is already present on
// the server.
commands << [
'curl -s --head --fail ${repo_url}/${build_arch}/\$pkgname-\$pkgver-\$pkgrel && exit 0',
// If the above curl command succeeds, we don't need to rebuild the
// package. However, because we're in a su shell, the exit command will
// drop us back into the root shell. Therefore, we must check whether
// we're in root so we don't proceed.
'[ "\$(id -u)" == 0 ] && exit 0',
]
}
commands << [
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" ${repo_url}/publish; done',
'curl -s --head --fail $repo_url/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
// If the above curl command succeeds, we don't need to rebuild the
// package. However, because we're in a su shell, the exit command will
// drop us back into the root shell. Therefore, we must check whether
// we're in root so we don't proceed.
'[ "\$(id -u)" == 0 ] && exit 0',
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $repo_url/publish; done',
]
return echo_commands(commands).join('\n')

View File

@ -1,75 +1,43 @@
module build
import models { BuildConfig }
fn test_create_build_script_git() {
config := BuildConfig{
target_id: 1
kind: 'git'
url: 'https://examplerepo.com'
repo: 'vieter'
base_image: 'not-used:latest'
}
build_script := create_build_script('https://example.com', config, 'x86_64')
expected := $embed_file('scripts/git.sh')
assert build_script == expected.to_string().trim_space()
}
fn test_create_build_script_git_path() {
mut config := BuildConfig{
target_id: 1
kind: 'git'
url: 'https://examplerepo.com'
repo: 'vieter'
path: 'example/path'
base_image: 'not-used:latest'
}
mut build_script := create_build_script('https://example.com', config, 'x86_64')
mut expected := $embed_file('scripts/git_path.sh')
assert build_script == expected.to_string().trim_space()
config = BuildConfig{
...config
path: 'example/path with spaces'
}
build_script = create_build_script('https://example.com', config, 'x86_64')
expected = $embed_file('scripts/git_path_spaces.sh')
assert build_script == expected.to_string().trim_space()
}
import models { Target }
fn test_create_build_script_git_branch() {
config := BuildConfig{
target_id: 1
target := Target{
id: 1
kind: 'git'
url: 'https://examplerepo.com'
branch: 'main'
repo: 'vieter'
base_image: 'not-used:latest'
}
build_script := create_build_script('https://example.com', target, 'x86_64')
expected := $embed_file('build_script_git_branch.sh')
build_script := create_build_script('https://example.com', config, 'x86_64')
expected := $embed_file('scripts/git_branch.sh')
assert build_script == expected.to_string().trim_space()
}
fn test_create_build_script_git() {
target := Target{
id: 1
kind: 'git'
url: 'https://examplerepo.com'
repo: 'vieter'
}
build_script := create_build_script('https://example.com', target, 'x86_64')
expected := $embed_file('build_script_git.sh')
assert build_script == expected.to_string().trim_space()
}
fn test_create_build_script_url() {
config := BuildConfig{
target_id: 1
target := Target{
id: 1
kind: 'url'
url: 'https://examplerepo.com'
repo: 'vieter'
base_image: 'not-used:latest'
}
build_script := create_build_script('https://example.com', config, 'x86_64')
expected := $embed_file('scripts/url.sh')
build_script := create_build_script('https://example.com', target, 'x86_64')
expected := $embed_file('build_script_url.sh')
assert build_script == expected.to_string().trim_space()
}

View File

@ -2,7 +2,7 @@ module client
import net.http { Method }
import net.urllib
import web.response { Response, new_data_response }
import response { Response }
import json
pub struct Client {
@ -21,8 +21,8 @@ pub fn new(address string, api_key string) Client {
// send_request_raw sends an HTTP request, returning the http.Response object.
// It encodes the params so that they're safe to pass as HTTP query parameters.
fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) !http.Response {
mut full_url := '${c.address}${url}'
fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) ?http.Response {
mut full_url := '$c.address$url'
if params.len > 0 {
mut params_escaped := map[string]string{}
@ -30,61 +30,41 @@ fn (c &Client) send_request_raw(method Method, url string, params map[string]str
// Escape each query param
for k, v in params {
// An empty parameter should be the same as not providing it at all
params_escaped[k] = urllib.query_escape(v)
if v != '' {
params_escaped[k] = urllib.query_escape(v)
}
}
params_str := params_escaped.keys().map('${it}=${params_escaped[it]}').join('&')
params_str := params_escaped.keys().map('$it=${params[it]}').join('&')
full_url = '${full_url}?${params_str}'
full_url = '$full_url?$params_str'
}
// Looking at the source code, this function doesn't actually fail, so I'm
// not sure why it returns an optional
mut req := http.new_request(method, full_url, body) or { return error('') }
req.add_custom_header('X-Api-Key', c.api_key)!
mut req := http.new_request(method, full_url, body)?
req.add_custom_header('X-Api-Key', c.api_key)?
res := req.do()!
res := req.do()?
return res
}
// send_request<T> just calls send_request_with_body<T> with an empty body.
fn (c &Client) send_request[T](method Method, url string, params map[string]string) !Response[T] {
return c.send_request_with_body[T](method, url, params, '')
fn (c &Client) send_request<T>(method Method, url string, params map[string]string) ?Response<T> {
return c.send_request_with_body<T>(method, url, params, '')
}
// send_request_with_body<T> calls send_request_raw_response & parses its
// output as a Response<T> object.
fn (c &Client) send_request_with_body[T](method Method, url string, params map[string]string, body string) !Response[T] {
res := c.send_request_raw(method, url, params, body)!
status := res.status()
// Non-successful requests are expected to return either an empty body or
// Response<string>
if status.is_error() {
// A non-successful status call will have an empty body
if res.body == '' {
return error('Error ${res.status_code} (${status.str()}): (empty response)')
}
data := json.decode(Response[string], res.body)!
return error('Status ${res.status_code} (${status.str()}): ${data.message}')
}
// Just return an empty successful response
if res.body == '' {
return new_data_response(T{})
}
data := json.decode(Response[T], res.body)!
fn (c &Client) send_request_with_body<T>(method Method, url string, params map[string]string, body string) ?Response<T> {
res_text := c.send_request_raw_response(method, url, params, body)?
data := json.decode(Response<T>, res_text)?
return data
}
// send_request_raw_response returns the raw text response for an HTTP request.
fn (c &Client) send_request_raw_response(method Method, url string, params map[string]string, body string) !string {
res := c.send_request_raw(method, url, params, body)!
fn (c &Client) send_request_raw_response(method Method, url string, params map[string]string, body string) ?string {
res := c.send_request_raw(method, url, params, body)?
return res.body
}

View File

@ -1,23 +0,0 @@
module client
import models { BuildConfig }
// poll_jobs requests a list of new build jobs from the server.
pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig {
data := c.send_request[[]BuildConfig](.get, '/api/v1/jobs/poll', {
'arch': arch
'max': max.str()
})!
return data.data
}
// queue_job adds a new one-time build job for the given target to the job
// queue.
pub fn (c &Client) queue_job(target_id int, arch string, force bool) ! {
c.send_request[string](.post, '/api/v1/jobs/queue', {
'target': target_id.str()
'arch': arch
'force': force.str()
})!
}

View File

@ -1,33 +1,45 @@
module client
import models { BuildLog, BuildLogFilter }
import web.response { Response }
import net.http { Method }
import response { Response }
import time
// get_build_logs returns all build logs.
pub fn (c &Client) get_build_logs(filter BuildLogFilter) ![]BuildLog {
pub fn (c &Client) get_build_logs(filter BuildLogFilter) ?Response<[]BuildLog> {
params := models.params_from(filter)
data := c.send_request[[]BuildLog](.get, '/api/v1/logs', params)!
data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)?
return data.data
return data
}
// get_build_logs_for_target returns all build logs for a given target.
pub fn (c &Client) get_build_logs_for_target(target_id int) ?Response<[]BuildLog> {
params := {
'repo': target_id.str()
}
data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)?
return data
}
// get_build_log returns a specific build log.
pub fn (c &Client) get_build_log(id int) !BuildLog {
data := c.send_request[BuildLog](.get, '/api/v1/logs/${id}', {})!
pub fn (c &Client) get_build_log(id int) ?Response<BuildLog> {
data := c.send_request<BuildLog>(Method.get, '/api/v1/logs/$id', {})?
return data.data
return data
}
// get_build_log_content returns the contents of the build log file.
pub fn (c &Client) get_build_log_content(id int) !string {
data := c.send_request_raw_response(.get, '/api/v1/logs/${id}/content', {}, '')!
pub fn (c &Client) get_build_log_content(id int) ?string {
data := c.send_request_raw_response(Method.get, '/api/v1/logs/$id/content', {}, '')?
return data
}
// add_build_log adds a new build log to the server.
pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) !Response[int] {
pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) ?Response<string> {
params := {
'target': target_id.str()
'startTime': start_time.unix_time().str()
@ -36,12 +48,7 @@ pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time t
'exitCode': exit_code.str()
}
data := c.send_request_with_body[int](.post, '/api/v1/logs', params, content)!
data := c.send_request_with_body<string>(Method.post, '/api/v1/logs', params, content)?
return data
}
// remove_build_log removes the build log with the given id from the server.
pub fn (c &Client) remove_build_log(id int) ! {
c.send_request[string](.delete, '/api/v1/logs/${id}', {})!
}

View File

@ -1,16 +0,0 @@
module client
// remove_repo removes an entire repository.
pub fn (c &Client) remove_repo(repo string) ! {
c.send_request[string](.delete, '/${repo}', {})!
}
// remove_arch_repo removes an entire arch-repo.
pub fn (c &Client) remove_arch_repo(repo string, arch string) ! {
c.send_request[string](.delete, '/${repo}/${arch}', {})!
}
// remove_package removes a single package from the given arch-repo.
pub fn (c &Client) remove_package(repo string, arch string, pkgname string) ! {
c.send_request[string](.delete, '/${repo}/${arch}/${pkgname}', {})!
}

View File

@ -1,23 +1,25 @@
module client
import models { Target, TargetFilter }
import net.http { Method }
import response { Response }
// get_targets returns a list of targets, given a filter object.
pub fn (c &Client) get_targets(filter TargetFilter) ![]Target {
pub fn (c &Client) get_targets(filter TargetFilter) ?[]Target {
params := models.params_from(filter)
data := c.send_request[[]Target](.get, '/api/v1/targets', params)!
data := c.send_request<[]Target>(Method.get, '/api/v1/targets', params)?
return data.data
}
// get_all_targets retrieves *all* targs from the API using the default
// limit.
pub fn (c &Client) get_all_targets() ![]Target {
pub fn (c &Client) get_all_targets() ?[]Target {
mut targets := []Target{}
mut offset := u64(0)
for {
sub_targets := c.get_targets(offset: offset)!
sub_targets := c.get_targets(offset: offset)?
if sub_targets.len == 0 {
break
@ -32,8 +34,8 @@ pub fn (c &Client) get_all_targets() ![]Target {
}
// get_target returns the target for a specific id.
pub fn (c &Client) get_target(id int) !Target {
data := c.send_request[Target](.get, '/api/v1/targets/${id}', {})!
pub fn (c &Client) get_target(id int) ?Target {
data := c.send_request<Target>(Method.get, '/api/v1/targets/$id', {})?
return data.data
}
@ -43,29 +45,28 @@ pub struct NewTarget {
url string
branch string
repo string
path string
arch []string
}
// add_target adds a new target to the server.
pub fn (c &Client) add_target(t NewTarget) !int {
params := models.params_from[NewTarget](t)
data := c.send_request[int](.post, '/api/v1/targets', params)!
pub fn (c &Client) add_target(t NewTarget) ?Response<string> {
params := models.params_from<NewTarget>(t)
data := c.send_request<string>(Method.post, '/api/v1/targets', params)?
return data.data
return data
}
// remove_target removes the target with the given id from the server.
pub fn (c &Client) remove_target(id int) !string {
data := c.send_request[string](.delete, '/api/v1/targets/${id}', {})!
pub fn (c &Client) remove_target(id int) ?Response<string> {
data := c.send_request<string>(Method.delete, '/api/v1/targets/$id', {})?
return data.data
return data
}
// patch_target sends a PATCH request to the given target with the params as
// payload.
pub fn (c &Client) patch_target(id int, params map[string]string) !string {
data := c.send_request[string](.patch, '/api/v1/targets/${id}', params)!
pub fn (c &Client) patch_target(id int, params map[string]string) ?Response<string> {
data := c.send_request<string>(Method.patch, '/api/v1/targets/$id', params)?
return data.data
return data
}

View File

@ -1,62 +0,0 @@
module aur
import cli
import console
import client
import aur
import conf as vconf
struct Config {
address string [required]
api_key string [required]
}
// cmd returns the cli module for interacting with the AUR API.
pub fn cmd() cli.Command {
return cli.Command{
name: 'aur'
description: 'Interact with the AUR.'
commands: [
cli.Command{
name: 'search'
description: 'Search for packages.'
required_args: 1
execute: fn (cmd cli.Command) ! {
c := aur.new()
pkgs := c.search(cmd.args[0])!
data := pkgs.map([it.name, it.description])
println(console.pretty_table(['name', 'description'], data)!)
}
},
cli.Command{
name: 'add'
usage: 'repo pkg-name [pkg-name...]'
description: 'Add the given AUR package(s) to Vieter. Non-existent packages will be silently ignored.'
required_args: 2
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
c := aur.new()
pkgs := c.info(cmd.args[1..])!
vc := client.new(conf_.address, conf_.api_key)
for pkg in pkgs {
vc.add_target(
kind: 'git'
url: 'https://aur.archlinux.org/${pkg.package_base}' + '.git'
repo: cmd.args[0]
) or {
println('Failed to add ${pkg.name}: ${err.msg()}')
continue
}
println('Added ${pkg.name}' + '.')
}
}
},
]
}
}

View File

@ -5,15 +5,10 @@ import strings
import cli
import os
// tabbed_table returns a simple textual table, with tabs as separators.
pub fn tabbed_table(data [][]string) string {
return data.map(it.join('\t')).join('\n')
}
// pretty_table converts a list of string data into a pretty table. Many thanks
// to @hungrybluedev in the Vlang Discord for providing this code!
// https://ptb.discord.com/channels/592103645835821068/592106336838352923/970278787143045192
pub fn pretty_table(header []string, data [][]string) !string {
pub fn pretty_table(header []string, data [][]string) ?string {
column_count := header.len
mut column_widths := []int{len: column_count, init: header[it].len}
@ -26,7 +21,7 @@ pub fn pretty_table(header []string, data [][]string) !string {
}
}
single_line_length := arrays.sum(column_widths)! + (column_count + 1) * 3 - 4
single_line_length := arrays.sum(column_widths)? + (column_count + 1) * 3 - 4
horizontal_line := '+' + strings.repeat(`-`, single_line_length) + '+'
mut buffer := strings.new_builder(data.len * single_line_length)
@ -64,12 +59,12 @@ pub fn pretty_table(header []string, data [][]string) !string {
// export_man_pages recursively generates all man pages for the given
// cli.Command & writes them to the given directory.
pub fn export_man_pages(cmd cli.Command, path string) ! {
pub fn export_man_pages(cmd cli.Command, path string) ? {
man := cmd.manpage()
os.write_file(os.join_path_single(path, cmd.full_name().replace(' ', '-') + '.1'),
man)!
man)?
for sub_cmd in cmd.commands {
export_man_pages(sub_cmd, path)!
export_man_pages(sub_cmd, path)?
}
}

View File

@ -1,7 +1,7 @@
module logs
import cli
import conf as vconf
import vieter_v.conf as vconf
import client
import console
import time
@ -24,13 +24,11 @@ pub fn cmd() cli.Command {
flags: [
cli.Flag{
name: 'limit'
abbrev: 'l'
description: 'How many results to return.'
flag: cli.FlagType.int
},
cli.Flag{
name: 'offset'
abbrev: 'o'
description: 'Minimum index to return.'
flag: cli.FlagType.int
},
@ -41,18 +39,16 @@ pub fn cmd() cli.Command {
},
cli.Flag{
name: 'today'
abbrev: 't'
description: 'Only list logs started today. This flag overwrites any other date-related flag.'
description: 'Only list logs started today.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'failed'
description: 'Only list logs with non-zero exit codes. This flag overwrites the --code flag.'
description: 'Only list logs with non-zero exit codes.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'day'
abbrev: 'd'
description: 'Only list logs started on this day. (format: YYYY-MM-DD)'
flag: cli.FlagType.string
},
@ -66,36 +62,31 @@ pub fn cmd() cli.Command {
description: 'Only list logs started after this timestamp. (format: YYYY-MM-DD HH:mm:ss)'
flag: cli.FlagType.string
},
cli.Flag{
name: 'code'
description: 'Only return logs with the given exit code. Prepend with `!` to exclude instead of include. Can be specified multiple times.'
flag: cli.FlagType.string_array
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
mut filter := BuildLogFilter{}
limit := cmd.flags.get_int('limit')!
limit := cmd.flags.get_int('limit')?
if limit != 0 {
filter.limit = u64(limit)
}
offset := cmd.flags.get_int('offset')!
offset := cmd.flags.get_int('offset')?
if offset != 0 {
filter.offset = u64(offset)
}
target_id := cmd.flags.get_int('target')!
target_id := cmd.flags.get_int('target')?
if target_id != 0 {
filter.target = target_id
}
tz_offset := time.offset()
if cmd.flags.get_bool('today')! {
if cmd.flags.get_bool('today')? {
today := time.now()
filter.after = time.new_time(time.Time{
@ -107,12 +98,12 @@ pub fn cmd() cli.Command {
}
// The -today flag overwrites any of the other date flags.
else {
day_str := cmd.flags.get_string('day')!
before_str := cmd.flags.get_string('before')!
after_str := cmd.flags.get_string('after')!
day_str := cmd.flags.get_string('day')?
before_str := cmd.flags.get_string('before')?
after_str := cmd.flags.get_string('after')?
if day_str != '' {
day := time.parse_rfc3339(day_str)!
day := time.parse_rfc3339(day_str)?
day_utc := time.new_time(time.Time{
year: day.year
month: day.month
@ -127,38 +118,22 @@ pub fn cmd() cli.Command {
filter.before = day_utc.add_days(1)
} else {
if before_str != '' {
filter.before = time.parse(before_str)!.add_seconds(-tz_offset)
filter.before = time.parse(before_str)?.add_seconds(-tz_offset)
}
if after_str != '' {
filter.after = time.parse(after_str)!.add_seconds(-tz_offset)
filter.after = time.parse(after_str)?.add_seconds(-tz_offset)
}
}
}
if cmd.flags.get_bool('failed')! {
if cmd.flags.get_bool('failed')? {
filter.exit_codes = [
'!0',
]
} else {
filter.exit_codes = cmd.flags.get_strings('code')!
}
raw := cmd.flags.get_bool('raw')!
list(conf_, filter, raw)!
}
},
cli.Command{
name: 'remove'
required_args: 1
usage: 'id'
description: 'Remove a build log that matches the given id.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
remove(conf_, cmd.args[0])!
list(conf, filter)?
}
},
cli.Command{
@ -166,12 +141,12 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Show all info for a specific build log.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
id := cmd.args[0].int()
info(conf_, id)!
info(conf, id)?
}
},
cli.Command{
@ -179,12 +154,12 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Output the content of a build log to stdout.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
id := cmd.args[0].int()
content(conf_, id)!
content(conf, id)?
}
},
]
@ -192,44 +167,42 @@ pub fn cmd() cli.Command {
}
// print_log_list prints a list of logs.
fn print_log_list(logs []BuildLog, raw bool) ! {
fn print_log_list(logs []BuildLog) ? {
data := logs.map([it.id.str(), it.target_id.str(), it.start_time.local().str(),
it.exit_code.str()])
if raw {
println(console.tabbed_table(data))
} else {
println(console.pretty_table(['id', 'target', 'start time', 'exit code'], data)!)
}
println(console.pretty_table(['id', 'target', 'start time', 'exit code'], data)?)
}
// list prints a list of all build logs.
fn list(conf_ Config, filter BuildLogFilter, raw bool) ! {
c := client.new(conf_.address, conf_.api_key)
logs := c.get_build_logs(filter)!
fn list(conf Config, filter BuildLogFilter) ? {
c := client.new(conf.address, conf.api_key)
logs := c.get_build_logs(filter)?.data
print_log_list(logs, raw)!
print_log_list(logs)?
}
// list prints a list of all build logs for a given target.
fn list_for_target(conf Config, target_id int) ? {
c := client.new(conf.address, conf.api_key)
logs := c.get_build_logs_for_target(target_id)?.data
print_log_list(logs)?
}
// info print the detailed info for a given build log.
fn info(conf_ Config, id int) ! {
c := client.new(conf_.address, conf_.api_key)
log := c.get_build_log(id)!
fn info(conf Config, id int) ? {
c := client.new(conf.address, conf.api_key)
log := c.get_build_log(id)?.data
print(log)
}
// content outputs the contents of the log file for a given build log to
// stdout.
fn content(conf_ Config, id int) ! {
c := client.new(conf_.address, conf_.api_key)
content := c.get_build_log_content(id)!
fn content(conf Config, id int) ? {
c := client.new(conf.address, conf.api_key)
content := c.get_build_log_content(id)?
println(content)
}
// remove removes a build log from the server's list.
fn remove(conf_ Config, id string) ! {
c := client.new(conf_.address, conf_.api_key)
c.remove_build_log(id.int())!
}

View File

@ -11,11 +11,11 @@ pub fn cmd() cli.Command {
description: 'Generate all man pages & save them in the given directory.'
usage: 'dir'
required_args: 1
execute: fn (cmd cli.Command) ! {
execute: fn (cmd cli.Command) ? {
root := cmd.root()
os.mkdir_all(cmd.args[0])!
os.mkdir_all(cmd.args[0])?
console.export_man_pages(root, cmd.args[0])!
console.export_man_pages(root, cmd.args[0])?
}
}
}

View File

@ -1,52 +0,0 @@
module repos
import cli
import conf as vconf
import client
struct Config {
address string [required]
api_key string [required]
}
// cmd returns the cli module that handles modifying the repository contents.
pub fn cmd() cli.Command {
return cli.Command{
name: 'repos'
description: 'Interact with the repositories & packages stored on the server.'
commands: [
cli.Command{
name: 'remove'
required_args: 1
usage: 'repo [arch [pkgname]]'
description: 'Remove a repo, arch-repo, or package from the server.'
flags: [
cli.Flag{
name: 'force'
flag: cli.FlagType.bool
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
if cmd.args.len < 3 {
if !cmd.flags.get_bool('force')! {
return error('Removing an arch-repo or repository is a very destructive command. If you really do wish to perform this operation, explicitely add the --force flag.')
}
}
client_ := client.new(conf_.address, conf_.api_key)
if cmd.args.len == 1 {
client_.remove_repo(cmd.args[0])!
} else if cmd.args.len == 2 {
client_.remove_arch_repo(cmd.args[0], cmd.args[1])!
} else {
client_.remove_package(cmd.args[0], cmd.args[1], cmd.args[2])!
}
}
},
]
}
}

View File

@ -1,7 +1,7 @@
module schedule
import cli
import cron
import cron.expression { parse_expression }
import time
// cmd returns the cli submodule for previewing a cron schedule.
@ -18,11 +18,11 @@ pub fn cmd() cli.Command {
default_value: ['5']
},
]
execute: fn (cmd cli.Command) ! {
ce := cron.parse_expression(cmd.args.join(' '))!
count := cmd.flags.get_int('count')!
execute: fn (cmd cli.Command) ? {
ce := parse_expression(cmd.args.join(' '))?
count := cmd.flags.get_int('count')?
for t in ce.next_n(time.now(), count) {
for t in ce.next_n(time.now(), count)? {
println(t)
}
}

View File

@ -1,34 +1,34 @@
module targets
import client
import docker
import vieter_v.docker
import os
import build
// build locally builds the target with the given id.
fn build_target(conf Config, target_id int, force bool, timeout int) ! {
fn build(conf Config, target_id int) ? {
c := client.new(conf.address, conf.api_key)
target := c.get_target(target_id)!
target := c.get_target(target_id)?
build_arch := os.uname().machine
println('Creating base image...')
image_id := build.create_build_image(conf.base_image)!
image_id := build.create_build_image(conf.base_image)?
println('Running build...')
res := build.build_target(conf.address, conf.api_key, image_id, target, force, timeout)!
res := build.build_target(conf.address, conf.api_key, image_id, target)?
println('Removing build image...')
mut dd := docker.new_conn()!
mut dd := docker.new_conn()?
defer {
dd.close() or {}
}
dd.image_remove(image_id)!
dd.remove_image(image_id)?
println('Uploading logs to Vieter...')
c.add_build_log(target.id, res.start_time, res.end_time, build_arch, res.exit_code,
res.logs)!
res.logs)?
}

View File

@ -1,8 +1,8 @@
module targets
import cli
import conf as vconf
import cron
import vieter_v.conf as vconf
import cron.expression { parse_expression }
import client { NewTarget }
import console
import models { TargetFilter }
@ -13,7 +13,7 @@ struct Config {
base_image string = 'archlinux:base-devel'
}
// cmd returns the cli submodule that handles the targets API interaction
// cmd returns the cli submodule that handles the repos API interaction
pub fn cmd() cli.Command {
return cli.Command{
name: 'targets'
@ -25,13 +25,11 @@ pub fn cmd() cli.Command {
flags: [
cli.Flag{
name: 'limit'
abbrev: 'l'
description: 'How many results to return.'
flag: cli.FlagType.int
},
cli.Flag{
name: 'offset'
abbrev: 'o'
description: 'Minimum index to return.'
flag: cli.FlagType.int
},
@ -40,52 +38,29 @@ pub fn cmd() cli.Command {
description: 'Only return targets that publish to this repo.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'query'
abbrev: 'q'
description: 'Search string to filter targets by.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'arch'
description: 'Only list targets that build for this arch.'
flag: cli.FlagType.string
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
mut filter := TargetFilter{}
limit := cmd.flags.get_int('limit')!
limit := cmd.flags.get_int('limit')?
if limit != 0 {
filter.limit = u64(limit)
}
offset := cmd.flags.get_int('offset')!
offset := cmd.flags.get_int('offset')?
if offset != 0 {
filter.offset = u64(offset)
}
repo := cmd.flags.get_string('repo')!
repo := cmd.flags.get_string('repo')?
if repo != '' {
filter.repo = repo
}
query := cmd.flags.get_string('query')!
if query != '' {
filter.query = query
}
arch := cmd.flags.get_string('arch')!
if arch != '' {
filter.arch = arch
}
raw := cmd.flags.get_bool('raw')!
list(conf_, filter, raw)!
list(conf, filter)?
}
},
cli.Command{
@ -105,27 +80,19 @@ pub fn cmd() cli.Command {
description: "Which branch to clone; only applies to kind 'git'."
flag: cli.FlagType.string
},
cli.Flag{
name: 'path'
description: 'Subdirectory inside Git repository to use.'
flag: cli.FlagType.string
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
t := NewTarget{
kind: cmd.flags.get_string('kind')!
kind: cmd.flags.get_string('kind')?
url: cmd.args[0]
repo: cmd.args[1]
branch: cmd.flags.get_string('branch') or { '' }
path: cmd.flags.get_string('path') or { '' }
}
raw := cmd.flags.get_bool('raw')!
add(conf_, t, raw)!
add(conf, t)?
}
},
cli.Command{
@ -133,11 +100,11 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Remove a target that matches the given id.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
remove(conf_, cmd.args[0])!
remove(conf, cmd.args[0])?
}
},
cli.Command{
@ -145,11 +112,11 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Show detailed information for the target matching the id.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
info(conf_, cmd.args[0])!
info(conf, cmd.args[0])?
}
},
cli.Command{
@ -188,15 +155,10 @@ pub fn cmd() cli.Command {
description: 'Kind of target.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'path'
description: 'Subdirectory inside Git repository to use.'
flag: cli.FlagType.string
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
found := cmd.flags.get_all_found()
@ -204,11 +166,11 @@ pub fn cmd() cli.Command {
for f in found {
if f.name != 'config-file' {
params[f.name] = f.get_string()!
params[f.name] = f.get_string()?
}
}
patch(conf_, cmd.args[0], params)!
patch(conf, cmd.args[0], params)?
}
},
cli.Command{
@ -216,104 +178,76 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Build the target with the given id & publish it.'
flags: [
cli.Flag{
name: 'force'
description: 'Build the target without checking whether it needs to be renewed.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'remote'
description: 'Schedule the build on the server instead of running it locally.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'arch'
description: 'Architecture to schedule build for. Required when using -remote.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'timeout'
description: 'After how many minutes to cancel the build. Only applies to local builds.'
flag: cli.FlagType.int
default_value: ['3600']
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
remote := cmd.flags.get_bool('remote')!
force := cmd.flags.get_bool('force')!
timeout := cmd.flags.get_int('timeout')!
target_id := cmd.args[0].int()
if remote {
arch := cmd.flags.get_string('arch')!
if arch == '' {
return error('When scheduling the build remotely, you have to specify an architecture.')
}
c := client.new(conf_.address, conf_.api_key)
c.queue_job(target_id, arch, force)!
} else {
build_target(conf_, target_id, force, timeout)!
}
build(conf, cmd.args[0].int())?
}
},
]
}
}
// get_repo_by_prefix tries to find the repo with the given prefix in its
// ID. If multiple or none are found, an error is raised.
// list prints out a list of all repositories.
fn list(conf_ Config, filter TargetFilter, raw bool) ! {
c := client.new(conf_.address, conf_.api_key)
targets := c.get_targets(filter)!
data := targets.map([it.id.str(), it.kind, it.url, it.repo])
fn list(conf Config, filter TargetFilter) ? {
c := client.new(conf.address, conf.api_key)
repos := c.get_targets(filter)?
data := repos.map([it.id.str(), it.kind, it.url, it.repo])
if raw {
println(console.tabbed_table(data))
} else {
println(console.pretty_table(['id', 'kind', 'url', 'repo'], data)!)
println(console.pretty_table(['id', 'kind', 'url', 'repo'], data)?)
}
// add adds a new repository to the server's list.
fn add(conf Config, t &NewTarget) ? {
c := client.new(conf.address, conf.api_key)
res := c.add_target(t)?
println(res.message)
}
// remove removes a repository from the server's list.
fn remove(conf Config, id string) ? {
id_int := id.int()
if id_int != 0 {
c := client.new(conf.address, conf.api_key)
res := c.remove_target(id_int)?
println(res.message)
}
}
// add adds a new target to the server's list.
fn add(conf_ Config, t &NewTarget, raw bool) ! {
c := client.new(conf_.address, conf_.api_key)
target_id := c.add_target(t)!
if raw {
println(target_id)
} else {
println('Target added with id ${target_id}')
}
}
// remove removes a target from the server's list.
fn remove(conf_ Config, id string) ! {
c := client.new(conf_.address, conf_.api_key)
c.remove_target(id.int())!
}
// patch patches a given target with the provided params.
fn patch(conf_ Config, id string, params map[string]string) ! {
// patch patches a given repository with the provided params.
fn patch(conf Config, id string, params map[string]string) ? {
// We check the cron expression first because it's useless to send an
// invalid one to the server.
if 'schedule' in params && params['schedule'] != '' {
cron.parse_expression(params['schedule']) or {
return error('Invalid cron expression: ${err.msg()}')
parse_expression(params['schedule']) or {
return error('Invalid cron expression: $err.msg()')
}
}
c := client.new(conf_.address, conf_.api_key)
c.patch_target(id.int(), params)!
id_int := id.int()
if id_int != 0 {
c := client.new(conf.address, conf.api_key)
res := c.patch_target(id_int, params)?
println(res.message)
}
}
// info shows detailed information for a given target.
fn info(conf_ Config, id string) ! {
c := client.new(conf_.address, conf_.api_key)
target := c.get_target(id.int())!
println(target)
// info shows detailed information for a given repo.
fn info(conf Config, id string) ? {
id_int := id.int()
if id_int == 0 {
return
}
c := client.new(conf.address, conf.api_key)
repo := c.get_target(id_int)?
println(repo)
}

32
src/cron/cli.v 100644
View File

@ -0,0 +1,32 @@
module cron
import cli
import vieter_v.conf as vconf
struct Config {
pub:
log_level string = 'WARN'
api_key string
address string
data_dir string
base_image string = 'archlinux:base-devel'
max_concurrent_builds int = 1
api_update_frequency int = 15
image_rebuild_frequency int = 1440
// Replicates the behavior of the original cron system
global_schedule string = '0 3'
}
// cmd returns the cli module that handles the cron daemon.
pub fn cmd() cli.Command {
return cli.Command{
name: 'cron'
description: 'Start the cron service that periodically runs builds.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)?
cron(conf)?
}
}
}

33
src/cron/cron.v 100644
View File

@ -0,0 +1,33 @@
module cron
import log
import cron.daemon
import cron.expression
import os
const log_file_name = 'vieter.cron.log'
// cron starts a cron daemon & starts periodically scheduling builds.
pub fn cron(conf Config) ? {
// Configure logger
log_level := log.level_from_tag(conf.log_level) or {
return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
}
mut logger := log.Log{
level: log_level
}
log_file := os.join_path_single(conf.data_dir, cron.log_file_name)
logger.set_full_logpath(log_file)
logger.log_to_console_too()
ce := expression.parse_expression(conf.global_schedule) or {
return error('Error while parsing global cron expression: $err.msg()')
}
mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)?
d.run()
}

View File

@ -0,0 +1,115 @@
module daemon
import time
import sync.stdatomic
import build
import os
const (
build_empty = 0
build_running = 1
build_done = 2
)
// clean_finished_builds removes finished builds from the build slots & returns
// them.
fn (mut d Daemon) clean_finished_builds() []ScheduledBuild {
mut out := []ScheduledBuild{}
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
out << d.builds[i]
}
}
return out
}
// update_builds starts as many builds as possible.
fn (mut d Daemon) start_new_builds() {
now := time.now()
for d.queue.len() > 0 {
elem := d.queue.peek() or {
d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
break
}
if elem.timestamp < now {
sb := d.queue.pop() or {
d.lerror("queue.pop() unexpectedly returned an error. This shouldn't happen.")
break
}
// If this build couldn't be scheduled, no more will be possible.
if !d.start_build(sb) {
d.queue.insert(sb)
break
}
} else {
break
}
}
}
// start_build starts a build for the given ScheduledBuild object.
fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty {
stdatomic.store_u64(&d.atomics[i], daemon.build_running)
d.builds[i] = sb
go d.run_build(i, sb)
return true
}
}
return false
}
// run_build actually starts the build process for a given target.
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
d.linfo('started build: $sb.target.url -> $sb.target.repo')
// 0 means success, 1 means failure
mut status := 0
res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.target) or {
d.ldebug('build_target error: $err.msg()')
status = 1
build.BuildResult{}
}
if status == 0 {
d.linfo('finished build: $sb.target.url -> $sb.target.repo; uploading logs...')
build_arch := os.uname().machine
d.client.add_build_log(sb.target.id, res.start_time, res.end_time, build_arch,
res.exit_code, res.logs) or {
d.lerror('Failed to upload logs for build: $sb.target.url -> $sb.target.repo')
}
} else {
d.linfo('an error occured during build: $sb.target.url -> $sb.target.repo')
}
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
}
// current_build_count returns how many builds are currently running.
fn (mut d Daemon) current_build_count() int {
mut res := 0
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_running {
res += 1
}
}
return res
}

View File

@ -0,0 +1,274 @@
module daemon
import time
import log
import datatypes { MinHeap }
import cron.expression { CronExpression, parse_expression }
import math
import build
import vieter_v.docker
import os
import client
import models { Target }
const (
// How many seconds to wait before retrying to update API if failed
api_update_retry_timeout = 5
// How many seconds to wait before retrying to rebuild image if failed
rebuild_base_image_retry_timout = 30
)
struct ScheduledBuild {
pub:
target Target
timestamp time.Time
}
// Overloaded operator for comparing ScheduledBuild objects
fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
return r1.timestamp < r2.timestamp
}
pub struct Daemon {
mut:
client client.Client
base_image string
builder_images []string
global_schedule CronExpression
api_update_frequency int
image_rebuild_frequency int
// Targets currently loaded from API.
targets []Target
// At what point to update the list of targets.
api_update_timestamp time.Time
image_build_timestamp time.Time
queue MinHeap<ScheduledBuild>
// Which builds are currently running
builds []ScheduledBuild
// Atomic variables used to detect when a build has finished; length is the
// same as builds
atomics []u64
logger shared log.Log
}
// init_daemon initializes a new Daemon object. It renews the targets &
// populates the build queue for the first time.
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon {
mut d := Daemon{
client: client.new(address, api_key)
base_image: base_image
global_schedule: global_schedule
api_update_frequency: api_update_frequency
image_rebuild_frequency: image_rebuild_frequency
atomics: []u64{len: max_concurrent_builds}
builds: []ScheduledBuild{len: max_concurrent_builds}
logger: logger
}
// Initialize the targets & queue
d.renew_targets()
d.renew_queue()
if !d.rebuild_base_image() {
return error('The base image failed to build. The Vieter cron daemon cannot run without an initial builder image.')
}
return d
}
// run starts the actual daemon process. It runs builds when possible &
// periodically refreshes the list of targets to ensure we stay in sync.
pub fn (mut d Daemon) run() {
for {
finished_builds := d.clean_finished_builds()
// Update the API's contents if needed & renew the queue
if time.now() >= d.api_update_timestamp {
d.renew_targets()
d.renew_queue()
}
// The finished builds should only be rescheduled if the API contents
// haven't been renewed.
else {
for sb in finished_builds {
d.schedule_build(sb.target)
}
}
// TODO remove old builder images.
// This issue is less trivial than it sounds, because a build could
// still be running when the image has to be rebuilt. That would
// prevent the image from being removed. Therefore, we will need to
// keep track of a list or something & remove an image once we have
// made sure it isn't being used anymore.
if time.now() >= d.image_build_timestamp {
d.rebuild_base_image()
// In theory, executing this function here allows an old builder
// image to exist for at most image_rebuild_frequency minutes.
d.clean_old_base_images()
}
// Schedules new builds when possible
d.start_new_builds()
// If there are builds currently running, the daemon should refresh
// every second to clean up any finished builds & start new ones.
mut delay := time.Duration(1 * time.second)
// Sleep either until we have to refresh the targets or when the next
// build has to start, with a minimum of 1 second.
if d.current_build_count() == 0 {
now := time.now()
delay = d.api_update_timestamp - now
if d.queue.len() > 0 {
elem := d.queue.peek() or {
d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
// This is just a fallback option. In theory, queue.peek()
// should *never* return an error or none, because we check
// its len beforehand.
time.sleep(1)
continue
}
time_until_next_job := elem.timestamp - now
delay = math.min(delay, time_until_next_job)
}
}
// We sleep for at least one second. This is to prevent the program
// from looping agressively when a cronjob can be scheduled, but
// there's no spots free for it to be started.
delay = math.max(delay, 1 * time.second)
d.ldebug('Sleeping for ${delay}...')
time.sleep(delay)
}
}
// schedule_build adds the next occurence of the given targets build to the
// queue.
fn (mut d Daemon) schedule_build(target Target) {
ce := if target.schedule != '' {
parse_expression(target.schedule) or {
// TODO This shouldn't return an error if the expression is empty.
d.lerror("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()")
d.global_schedule
}
} else {
d.global_schedule
}
// A target that can't be scheduled will just be skipped for now
timestamp := ce.next_from_now() or {
d.lerror("Couldn't calculate next timestamp from '$target.schedule'; skipping")
return
}
d.queue.insert(ScheduledBuild{
target: target
timestamp: timestamp
})
}
// renew_targets requests the newest list of targets from the server & replaces
// the old one.
fn (mut d Daemon) renew_targets() {
d.linfo('Renewing targets...')
mut new_targets := d.client.get_all_targets() or {
d.lerror('Failed to renew targets. Retrying in ${daemon.api_update_retry_timeout}s...')
d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout)
return
}
// Filter out any targets that shouldn't run on this architecture
cur_arch := os.uname().machine
new_targets = new_targets.filter(it.arch.any(it.value == cur_arch))
d.targets = new_targets
d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency)
}
// renew_queue replaces the old queue with a new one that reflects the newest
// values in targets.
fn (mut d Daemon) renew_queue() {
d.linfo('Renewing queue...')
mut new_queue := MinHeap<ScheduledBuild>{}
// Move any jobs that should have already started from the old queue onto
// the new one
now := time.now()
// For some reason, using
// ```v
// for d.queue.len() > 0 && d.queue.peek() ?.timestamp < now {
//```
// here causes the function to prematurely just exit, without any errors or anything, very weird
// https://github.com/vlang/v/issues/14042
for d.queue.len() > 0 {
elem := d.queue.pop() or {
d.lerror("queue.pop() returned an error. This shouldn't happen.")
continue
}
if elem.timestamp < now {
new_queue.insert(elem)
} else {
break
}
}
d.queue = new_queue
// For each target in targets, parse their cron expression (or use the
// default one if not present) & add them to the queue
for target in d.targets {
d.schedule_build(target)
}
}
// rebuild_base_image recreates the builder image.
fn (mut d Daemon) rebuild_base_image() bool {
d.linfo('Rebuilding builder image....')
d.builder_images << build.create_build_image(d.base_image) or {
d.lerror('Failed to rebuild base image. Retrying in ${daemon.rebuild_base_image_retry_timout}s...')
d.image_build_timestamp = time.now().add_seconds(daemon.rebuild_base_image_retry_timout)
return false
}
d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency)
return true
}
// clean_old_base_images tries to remove any old but still present builder
// images.
fn (mut d Daemon) clean_old_base_images() {
mut i := 0
mut dd := docker.new_conn() or {
d.lerror('Failed to connect to Docker socket.')
return
}
defer {
dd.close() or {}
}
for i < d.builder_images.len - 1 {
// For each builder image, we try to remove it by calling the Docker
// API. If the function returns an error or false, that means the image
// wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't
// move the index.
dd.remove_image(d.builder_images[i]) or { i += 1 }
}
}

View File

@ -0,0 +1,35 @@
module daemon
import log
// log reate a log message with the given level
pub fn (mut d Daemon) log(msg &string, level log.Level) {
lock d.logger {
d.logger.send_output(msg, level)
}
}
// lfatal create a log message with the fatal level
pub fn (mut d Daemon) lfatal(msg &string) {
d.log(msg, log.Level.fatal)
}
// lerror create a log message with the error level
pub fn (mut d Daemon) lerror(msg &string) {
d.log(msg, log.Level.error)
}
// lwarn create a log message with the warn level
pub fn (mut d Daemon) lwarn(msg &string) {
d.log(msg, log.Level.warn)
}
// linfo create a log message with the info level
pub fn (mut d Daemon) linfo(msg &string) {
d.log(msg, log.Level.info)
}
// ldebug create a log message with the debug level
pub fn (mut d Daemon) ldebug(msg &string) {
d.log(msg, log.Level.debug)
}

View File

@ -1,101 +0,0 @@
module cron
#flag -I @VMODROOT/libvieter/include
#flag -L @VMODROOT/libvieter/build
#flag -lvieter
#include "vieter_cron.h"
[typedef]
pub struct C.vieter_cron_expression {
minutes &u8
hours &u8
days &u8
months &u8
minute_count u8
hour_count u8
day_count u8
month_count u8
}
pub type Expression = C.vieter_cron_expression
// == returns whether the two expressions are equal by value.
fn (ce1 Expression) == (ce2 Expression) bool {
if ce1.month_count != ce2.month_count || ce1.day_count != ce2.day_count
|| ce1.hour_count != ce2.hour_count || ce1.minute_count != ce2.minute_count {
return false
}
for i in 0 .. ce1.month_count {
unsafe {
if ce1.months[i] != ce2.months[i] {
return false
}
}
}
for i in 0 .. ce1.day_count {
unsafe {
if ce1.days[i] != ce2.days[i] {
return false
}
}
}
for i in 0 .. ce1.hour_count {
unsafe {
if ce1.hours[i] != ce2.hours[i] {
return false
}
}
}
for i in 0 .. ce1.minute_count {
unsafe {
if ce1.minutes[i] != ce2.minutes[i] {
return false
}
}
}
return true
}
[typedef]
struct C.vieter_cron_simple_time {
year int
month int
day int
hour int
minute int
}
type SimpleTime = C.vieter_cron_simple_time
enum ParseError as u8 {
ok = 0
invalid_expression = 1
invalid_number = 2
out_of_range = 3
too_many_parts = 4
not_enough_parts = 5
}
// str returns the string representation of a ParseError.
fn (e ParseError) str() string {
return match e {
.ok { '' }
.invalid_expression { 'Invalid expression' }
.invalid_number { 'Invalid number' }
.out_of_range { 'Out of range' }
.too_many_parts { 'Too many parts' }
.not_enough_parts { 'Not enough parts' }
}
}
fn C.vieter_cron_expr_init() &C.vieter_cron_expression
fn C.vieter_cron_expr_free(ce &C.vieter_cron_expression)
fn C.vieter_cron_expr_next(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression, ref &C.vieter_cron_simple_time)
fn C.vieter_cron_expr_next_from_now(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression)
fn C.vieter_cron_expr_parse(out &C.vieter_cron_expression, s &char) ParseError

View File

@ -1,73 +0,0 @@
module cron
import time
// free the memory associated with the Expression.
[unsafe]
pub fn (ce &Expression) free() {
C.vieter_cron_expr_free(ce)
}
// parse_expression parses a string into an Expression.
pub fn parse_expression(exp string) !&Expression {
out := C.vieter_cron_expr_init()
res := C.vieter_cron_expr_parse(out, exp.str)
if res != .ok {
return error(res.str())
}
return out
}
// next calculates the next occurence of the cron schedule, given a reference
// point.
pub fn (ce &Expression) next(ref time.Time) time.Time {
st := SimpleTime{
year: ref.year
month: ref.month
day: ref.day
hour: ref.hour
minute: ref.minute
}
out := SimpleTime{}
C.vieter_cron_expr_next(&out, ce, &st)
return time.new_time(time.Time{
year: out.year
month: out.month
day: out.day
hour: out.hour
minute: out.minute
})
}
// next_from_now calculates the next occurence of the cron schedule with the
// current time as reference.
pub fn (ce &Expression) next_from_now() time.Time {
out := SimpleTime{}
C.vieter_cron_expr_next_from_now(&out, ce)
return time.new_time(time.Time{
year: out.year
month: out.month
day: out.day
hour: out.hour
minute: out.minute
})
}
// next_n returns the n next occurences of the expression, given a starting
// time.
pub fn (ce &Expression) next_n(ref time.Time, n int) []time.Time {
mut times := []time.Time{cap: n}
times << ce.next(ref)
for i in 1 .. n {
times << ce.next(times[i - 1])
}
return times
}

View File

@ -0,0 +1,275 @@
module expression
import time
pub struct CronExpression {
minutes []int
hours []int
days []int
months []int
}
// next calculates the earliest time this cron expression is valid. It will
// always pick a moment in the future, even if ref matches completely up to the
// minute. This function conciously does not take gap years into account.
pub fn (ce &CronExpression) next(ref time.Time) ?time.Time {
// If the given ref matches the next cron occurence up to the minute, it
// will return that value. Because we always want to return a value in the
// future, we artifically shift the ref 60 seconds to make sure we always
// match in the future. A shift of 60 seconds is enough because the cron
// expression does not allow for accuracy smaller than one minute.
sref := ref
// For all of these values, the rule is the following: if their value is
// the length of their respective array in the CronExpression object, that
// means we've looped back around. This means that the "bigger" value has
// to be incremented by one. For example, if the minutes have looped
// around, that means that the hour has to be incremented as well.
mut minute_index := 0
mut hour_index := 0
mut day_index := 0
mut month_index := 0
// This chain is the same logic multiple times, namely that if a "bigger"
// value loops around, then the smaller value will always reset as well.
// For example, if we're going to a new day, the hour & minute will always
// be their smallest value again.
for month_index < ce.months.len && sref.month > ce.months[month_index] {
month_index++
}
if month_index < ce.months.len && sref.month == ce.months[month_index] {
for day_index < ce.days.len && sref.day > ce.days[day_index] {
day_index++
}
if day_index < ce.days.len && ce.days[day_index] == sref.day {
for hour_index < ce.hours.len && sref.hour > ce.hours[hour_index] {
hour_index++
}
if hour_index < ce.hours.len && ce.hours[hour_index] == sref.hour {
// Minute is the only value where we explicitely make sure we
// can't match sref's value exactly. This is to ensure we only
// return values in the future.
for minute_index < ce.minutes.len && sref.minute >= ce.minutes[minute_index] {
minute_index++
}
}
}
}
// Here, we increment the "bigger" values by one if the smaller ones loop
// around. The order is important, as it allows a sort-of waterfall effect
// to occur which updates all values if required.
if minute_index == ce.minutes.len && hour_index < ce.hours.len {
hour_index += 1
}
if hour_index == ce.hours.len && day_index < ce.days.len {
day_index += 1
}
if day_index == ce.days.len && month_index < ce.months.len {
month_index += 1
}
mut minute := ce.minutes[minute_index % ce.minutes.len]
mut hour := ce.hours[hour_index % ce.hours.len]
mut day := ce.days[day_index % ce.days.len]
// Sometimes, we end up with a day that does not exist within the selected
// month, e.g. day 30 in February. When this occurs, we reset day back to
// the smallest value & loop over to the next month that does have this
// day.
if day > time.month_days[ce.months[month_index % ce.months.len] - 1] {
day = ce.days[0]
month_index += 1
for day > time.month_days[ce.months[month_index & ce.months.len] - 1] {
month_index += 1
// If for whatever reason the day value ends up being something
// that can't be scheduled in any month, we have to make sure we
// don't create an infinite loop.
if month_index == 2 * ce.months.len {
return error('No schedulable moment.')
}
}
}
month := ce.months[month_index % ce.months.len]
mut year := sref.year
// If the month loops over, we need to increment the year.
if month_index >= ce.months.len {
year++
}
return time.new_time(time.Time{
year: year
month: month
day: day
minute: minute
hour: hour
})
}
// next_from_now returns the result of ce.next(ref) where ref is the result of
// time.now().
pub fn (ce &CronExpression) next_from_now() ?time.Time {
return ce.next(time.now())
}
// next_n returns the n next occurences of the expression, given a starting
// time.
pub fn (ce &CronExpression) next_n(ref time.Time, n int) ?[]time.Time {
mut times := []time.Time{cap: n}
times << ce.next(ref)?
for i in 1 .. n {
times << ce.next(times[i - 1])?
}
return times
}
// parse_range parses a given string into a range of sorted integers, if
// possible.
fn parse_range(s string, min int, max int, mut bitv []bool) ? {
mut start := min
mut end := max
mut interval := 1
exps := s.split('/')
if exps.len > 2 {
return error('Invalid expression.')
}
if exps[0] != '*' {
dash_parts := exps[0].split('-')
if dash_parts.len > 2 {
return error('Invalid expression.')
}
start = dash_parts[0].int()
// The builtin parsing functions return zero if the string can't be
// parsed into a number, so we have to explicitely check whether they
// actually entered zero or if it's an invalid number.
if start == 0 && dash_parts[0] != '0' {
return error('Invalid number.')
}
// Check whether the start value is out of range
if start < min || start > max {
return error('Out of range.')
}
if dash_parts.len == 2 {
end = dash_parts[1].int()
if end == 0 && dash_parts[1] != '0' {
return error('Invalid number.')
}
if end < start || end > max {
return error('Out of range.')
}
}
}
if exps.len > 1 {
interval = exps[1].int()
// interval being zero is always invalid, but we want to check why
// it's invalid for better error messages.
if interval == 0 {
if exps[1] != '0' {
return error('Invalid number.')
} else {
return error('Step size zero not allowed.')
}
}
if interval > max - min {
return error('Step size too large.')
}
}
// Here, s solely consists of a number, so that's the only value we
// should return.
else if exps[0] != '*' && !exps[0].contains('-') {
bitv[start - min] = true
return
}
for start <= end {
bitv[start - min] = true
start += interval
}
}
// bitv_to_ints converts a bit vector into an array containing the
// corresponding values.
fn bitv_to_ints(bitv []bool, min int) []int {
mut out := []int{}
for i in 0 .. bitv.len {
if bitv[i] {
out << min + i
}
}
return out
}
// parse_part parses a given part of a cron expression & returns the
// corresponding array of ints.
fn parse_part(s string, min int, max int) ?[]int {
mut bitv := []bool{len: max - min + 1, init: false}
for range in s.split(',') {
parse_range(range, min, max, mut bitv)?
}
return bitv_to_ints(bitv, min)
}
// parse_expression parses an entire cron expression string into a
// CronExpression object, if possible.
pub fn parse_expression(exp string) ?CronExpression {
// The filter allows for multiple spaces between parts
mut parts := exp.split(' ').filter(it != '')
if parts.len < 2 || parts.len > 4 {
return error('Expression must contain between 2 and 4 space-separated parts.')
}
// For ease of use, we allow the user to only specify as many parts as they
// need.
for parts.len < 4 {
parts << '*'
}
mut part_results := [][]int{}
mins := [0, 0, 1, 1]
maxs := [59, 23, 31, 12]
// This for loop allows us to more clearly propagate the error to the user.
for i, min in mins {
part_results << parse_part(parts[i], min, maxs[i]) or {
return error('An error occurred with part $i: $err.msg()')
}
}
return CronExpression{
minutes: part_results[0]
hours: part_results[1]
days: part_results[2]
months: part_results[3]
}
}

View File

@ -0,0 +1,98 @@
module expression
// parse_range_error returns the returned error message. If the result is '',
// that means the function didn't error.
fn parse_range_error(s string, min int, max int) string {
mut bitv := []bool{len: max - min + 1, init: false}
parse_range(s, min, max, mut bitv) or { return err.msg }
return ''
}
// =====parse_range=====
fn test_range_star_range() ? {
mut bitv := []bool{len: 6, init: false}
parse_range('*', 0, 5, mut bitv)?
assert bitv == [true, true, true, true, true, true]
}
fn test_range_number() ? {
mut bitv := []bool{len: 6, init: false}
parse_range('4', 0, 5, mut bitv)?
assert bitv_to_ints(bitv, 0) == [4]
}
fn test_range_number_too_large() ? {
assert parse_range_error('10', 0, 6) == 'Out of range.'
}
fn test_range_number_too_small() ? {
assert parse_range_error('0', 2, 6) == 'Out of range.'
}
fn test_range_number_invalid() ? {
assert parse_range_error('x', 0, 6) == 'Invalid number.'
}
fn test_range_step_star_1() ? {
mut bitv := []bool{len: 21, init: false}
parse_range('*/4', 0, 20, mut bitv)?
assert bitv_to_ints(bitv, 0) == [0, 4, 8, 12, 16, 20]
}
fn test_range_step_star_2() ? {
mut bitv := []bool{len: 8, init: false}
parse_range('*/3', 1, 8, mut bitv)?
assert bitv_to_ints(bitv, 1) == [1, 4, 7]
}
fn test_range_step_star_too_large() ? {
assert parse_range_error('*/21', 0, 20) == 'Step size too large.'
}
fn test_range_step_zero() ? {
assert parse_range_error('*/0', 0, 20) == 'Step size zero not allowed.'
}
fn test_range_step_number() ? {
mut bitv := []bool{len: 21, init: false}
parse_range('5/4', 2, 22, mut bitv)?
assert bitv_to_ints(bitv, 2) == [5, 9, 13, 17, 21]
}
fn test_range_step_number_too_large() ? {
assert parse_range_error('10/4', 0, 5) == 'Out of range.'
}
fn test_range_step_number_too_small() ? {
assert parse_range_error('2/4', 5, 10) == 'Out of range.'
}
fn test_range_dash() ? {
mut bitv := []bool{len: 10, init: false}
parse_range('4-8', 0, 9, mut bitv)?
assert bitv_to_ints(bitv, 0) == [4, 5, 6, 7, 8]
}
fn test_range_dash_step() ? {
mut bitv := []bool{len: 10, init: false}
parse_range('4-8/2', 0, 9, mut bitv)?
assert bitv_to_ints(bitv, 0) == [4, 6, 8]
}
// =====parse_part=====
fn test_part_single() ? {
assert parse_part('*', 0, 5)? == [0, 1, 2, 3, 4, 5]
}
fn test_part_multiple() ? {
assert parse_part('*/2,2/3', 1, 8)? == [1, 2, 3, 5, 7, 8]
}

View File

@ -0,0 +1,34 @@
module expression
import time { parse }
fn util_test_time(exp string, t1_str string, t2_str string) ? {
ce := parse_expression(exp)?
t1 := parse(t1_str)?
t2 := parse(t2_str)?
t3 := ce.next(t1)?
assert t2.year == t3.year
assert t2.month == t3.month
assert t2.day == t3.day
assert t2.hour == t3.hour
assert t2.minute == t3.minute
}
fn test_next_simple() ? {
// Very simple
util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')?
// Overlap to next day
util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')?
util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')?
util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')?
// Overlap to next month
util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')?
// Overlap to next year
util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')?
}

View File

@ -1,35 +0,0 @@
module cron
import time { parse }
fn util_test_time(exp string, t1_str string, t2_str string) ! {
ce := parse_expression(exp)!
t1 := parse(t1_str)!
t2 := parse(t2_str)!
t3 := ce.next(t1)
assert t2.year == t3.year
assert t2.month == t3.month
assert t2.day == t3.day
assert t2.hour == t3.hour
assert t2.minute == t3.minute
}
fn test_next_simple() ! {
// Very simple
// util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')!
// Overlap to next day
mut exp := '0 3 '
util_test_time(exp, '2002-01-01 03:00:00', '2002-01-02 03:00:00')!
util_test_time(exp, '2002-01-01 04:00:00', '2002-01-02 03:00:00')!
util_test_time('0 3-7/4,7-19', '2002-01-01 04:00:00', '2002-01-01 07:00:00')!
//// Overlap to next month
util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')!
//// Overlap to next year
util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')!
}

View File

@ -1,42 +0,0 @@
module cron
fn test_not_allowed() {
illegal_expressions := [
'4 *-7',
'4 *-7/4',
'4 7/*',
'0 0 30 2',
'0 /5',
'0 ',
'0',
' 0',
' 0 ',
'1 2 3 4~9',
'1 1-3-5',
'0 5/2-5',
'',
'1 1/2/3',
'*5 8',
'x 8',
]
mut res := false
for exp in illegal_expressions {
res = false
parse_expression(exp) or { res = true }
assert res, "'${exp}' should produce an error"
}
}
fn test_auto_extend() ! {
ce1 := parse_expression('5 5')!
ce2 := parse_expression('5 5 *')!
ce3 := parse_expression('5 5 * *')!
assert ce1 == ce2 && ce2 == ce3
}
fn test_four() {
parse_expression('0 1 2 3 ') or { assert false }
}

View File

@ -1,6 +1,6 @@
module dbms
module db
import db.sqlite
import sqlite
import time
pub struct VieterDb {
@ -17,21 +17,17 @@ const (
$embed_file('migrations/001-initial/up.sql'),
$embed_file('migrations/002-rename-to-targets/up.sql'),
$embed_file('migrations/003-target-url-type/up.sql'),
$embed_file('migrations/004-nullable-branch/up.sql'),
$embed_file('migrations/005-repo-path/up.sql'),
]
migrations_down = [
$embed_file('migrations/001-initial/down.sql'),
$embed_file('migrations/002-rename-to-targets/down.sql'),
$embed_file('migrations/003-target-url-type/down.sql'),
$embed_file('migrations/004-nullable-branch/down.sql'),
$embed_file('migrations/005-repo-path/down.sql'),
]
)
// init initializes a database & adds the correct tables.
pub fn init(db_path string) !VieterDb {
conn := sqlite.connect(db_path)!
pub fn init(db_path string) ?VieterDb {
conn := sqlite.connect(db_path)?
sql conn {
create table MigrationVersion
@ -49,13 +45,13 @@ pub fn init(db_path string) !VieterDb {
}
// Apply each migration in order
for i in cur_version.version .. dbms.migrations_up.len {
migration := dbms.migrations_up[i].to_string()
for i in cur_version.version .. db.migrations_up.len {
migration := db.migrations_up[i].to_string()
version_num := i + 1
// vfmt does not like these dots
println('Applying migration ${version_num}' + '...')
println('Applying migration $version_num' + '...')
// The sqlite library seems to not like it when multiple statements are
// passed in a single exec. Therefore, we split them & run them all
@ -64,7 +60,7 @@ pub fn init(db_path string) !VieterDb {
res := conn.exec_none(part)
if res != sqlite.sqlite_done {
return error('An error occurred while applying migration ${version_num}: SQLite error code ${res}')
return error('An error occurred while applying migration $version_num')
}
}
@ -80,9 +76,9 @@ pub fn init(db_path string) !VieterDb {
}
}
// row_into[T] converts an sqlite.Row into a given type T by parsing each field
// row_into<T> converts an sqlite.Row into a given type T by parsing each field
// from a string according to its type.
pub fn row_into[T](row sqlite.Row) T {
pub fn row_into<T>(row sqlite.Row) T {
mut i := 0
mut out := T{}

View File

@ -1,4 +1,4 @@
module dbms
module db
import models { BuildLog, BuildLogFilter }
import time
@ -8,20 +8,20 @@ pub fn (db &VieterDb) get_build_logs(filter BuildLogFilter) []BuildLog {
mut where_parts := []string{}
if filter.target != 0 {
where_parts << 'target_id == ${filter.target}'
where_parts << 'target_id == $filter.target'
}
if filter.before != time.Time{} {
where_parts << 'start_time < ${filter.before.unix_time()}'
where_parts << 'start_time < $filter.before.unix_time()'
}
if filter.after != time.Time{} {
where_parts << 'start_time > ${filter.after.unix_time()}'
where_parts << 'start_time > $filter.after.unix_time()'
}
// NOTE: possible SQL injection
if filter.arch != '' {
where_parts << "arch == '${filter.arch}'"
where_parts << "arch == '$filter.arch'"
}
mut parts := []string{}
@ -30,27 +30,27 @@ pub fn (db &VieterDb) get_build_logs(filter BuildLogFilter) []BuildLog {
if exp[0] == `!` {
code := exp[1..].int()
parts << 'exit_code != ${code}'
parts << 'exit_code != $code'
} else {
code := exp.int()
parts << 'exit_code == ${code}'
parts << 'exit_code == $code'
}
}
if parts.len > 0 {
where_parts << parts.map('(${it})').join(' or ')
where_parts << parts.map('($it)').join(' or ')
}
mut where_str := ''
if where_parts.len > 0 {
where_str = 'where ' + where_parts.map('(${it})').join(' and ')
where_str = 'where ' + where_parts.map('($it)').join(' and ')
}
query := 'select * from BuildLog ${where_str} limit ${filter.limit} offset ${filter.offset}'
query := 'select * from BuildLog $where_str limit $filter.limit offset $filter.offset'
rows, _ := db.conn.exec(query)
res := rows.map(row_into[BuildLog](it))
res := rows.map(row_into<BuildLog>(it))
return res
}
@ -79,16 +79,10 @@ pub fn (db &VieterDb) get_build_log(id int) ?BuildLog {
}
// add_build_log inserts the given BuildLog into the database.
pub fn (db &VieterDb) add_build_log(log BuildLog) int {
pub fn (db &VieterDb) add_build_log(log BuildLog) {
sql db.conn {
insert log into BuildLog
}
// Here, this does work because a log doesn't contain any foreign keys,
// meaning the ORM only has to do a single add
inserted_id := db.conn.last_id() as int
return inserted_id
}
// delete_build_log delete the BuildLog with the given ID from the database.

View File

@ -1,6 +1,25 @@
module dbms
module db
import models { Target, TargetArch }
import models { Target, TargetArch, TargetFilter }
// get_targets returns all targets in the database.
pub fn (db &VieterDb) get_targets(filter TargetFilter) []Target {
// This seems to currently be blocked by a bug in the ORM, I'll have to ask
// around.
if filter.repo != '' {
res := sql db.conn {
select from Target where repo == filter.repo order by id limit filter.limit offset filter.offset
}
return res
}
res := sql db.conn {
select from Target order by id limit filter.limit offset filter.offset
}
return res
}
// get_target tries to return a specific target.
pub fn (db &VieterDb) get_target(target_id int) ?Target {
@ -19,17 +38,10 @@ pub fn (db &VieterDb) get_target(target_id int) ?Target {
}
// add_target inserts the given target into the database.
pub fn (db &VieterDb) add_target(target Target) int {
pub fn (db &VieterDb) add_target(repo Target) {
sql db.conn {
insert target into Target
insert repo into Target
}
// ID of inserted target is the largest id
inserted_target := sql db.conn {
select from Target order by id desc limit 1
}
return inserted_target.id
}
// delete_target deletes the target with the given id from the database.
@ -49,13 +61,13 @@ pub fn (db &VieterDb) update_target(target_id int, params map[string]string) {
if field.name in params {
// Any fields that are array types require their own update method
$if field.typ is string {
values << "${field.name} = '${params[field.name]}'"
values << "$field.name = '${params[field.name]}'"
}
}
}
values_str := values.join(', ')
// I think this is actual SQL & not the ORM language
query := 'update Target set ${values_str} where id == ${target_id}'
query := 'update Target set $values_str where id == $target_id'
db.conn.exec_none(query)
}

View File

@ -1,26 +0,0 @@
-- This down won't really work because it'll throw NOT NULL errors, but I'm
-- just putting it here for future reference (still not sure whether I'm even
-- gonna use these)
PRAGMA foreign_keys=off;
BEGIN TRANSACTION;
ALTER TABLE Target RENAME TO _Target_old;
CREATE TABLE Target (
id INTEGER PRIMARY KEY,
url TEXT NOT NULL,
branch TEXT NOT NULL,
repo TEXT NOT NULL,
schedule TEXT,
kind TEXT NOT NULL DEFAULT 'git'
);
INSERT INTO Target (id, url, branch, repo, schedule, kind)
SELECT id, url, branch, repo, schedule, kind FROM _Target_old;
DROP TABLE _Target_old;
COMMIT;
PRAGMA foreign_keys=on;

View File

@ -1,23 +0,0 @@
PRAGMA foreign_keys=off;
BEGIN TRANSACTION;
ALTER TABLE Target RENAME TO _Target_old;
CREATE TABLE Target (
id INTEGER PRIMARY KEY,
url TEXT NOT NULL,
branch TEXT,
repo TEXT NOT NULL,
schedule TEXT,
kind TEXT NOT NULL DEFAULT 'git'
);
INSERT INTO Target (id, url, branch, repo, schedule, kind)
SELECT id, url, branch, repo, schedule, kind FROM _Target_old;
DROP TABLE _Target_old;
COMMIT;
PRAGMA foreign_keys=on;

View File

@ -1 +0,0 @@
ALTER TABLE Target DROP COLUMN path;

View File

@ -1 +0,0 @@
ALTER TABLE Target ADD COLUMN path TEXT;

View File

@ -1,129 +0,0 @@
module dbms
import models { Target, TargetFilter }
import db.sqlite
// Iterator providing a filtered view into the list of targets currently stored
// in the database. It replaces functionality usually performed in the database
// using SQL queries that can't currently be used due to missing stuff in V's
// ORM.
pub struct TargetsIterator {
conn sqlite.DB
filter TargetFilter
window_size int = 32
mut:
window []Target
window_index u64
// Offset in entire list of unfiltered targets
offset int
// Offset in filtered list of targets
filtered_offset u64
started bool
done bool
}
// targets returns an iterator allowing filtered access to the list of targets.
pub fn (db &VieterDb) targets(filter TargetFilter) TargetsIterator {
window_size := 32
return TargetsIterator{
conn: db.conn
filter: filter
window: []Target{cap: window_size}
window_size: window_size
}
}
// advance_window moves the sliding window over the filtered list of targets
// until it either reaches the end of the list of targets, or has encountered a
// non-empty window.
fn (mut ti TargetsIterator) advance_window() {
for {
ti.window = sql ti.conn {
select from Target order by id limit ti.window_size offset ti.offset
}
ti.offset += ti.window.len
if ti.window.len == 0 {
ti.done = true
return
}
if ti.filter.repo != '' {
ti.window = ti.window.filter(it.repo == ti.filter.repo)
}
if ti.filter.arch != '' {
ti.window = ti.window.filter(it.arch.any(it.value == ti.filter.arch))
}
if ti.filter.query != '' {
ti.window = ti.window.filter(it.url.contains(ti.filter.query)
|| it.path.contains(ti.filter.query) || it.branch.contains(ti.filter.query))
}
// We break out of the loop once we found a non-empty window
if ti.window.len > 0 {
break
}
}
}
// next returns the next target, if possible.
pub fn (mut ti TargetsIterator) next() ?Target {
if ti.done {
return none
}
// The first call to `next` will cause the sliding window to move to where
// the requested offset starts
if !ti.started {
ti.advance_window()
// Skip all matched targets until the requested offset
for !ti.done && ti.filtered_offset + u64(ti.window.len) <= ti.filter.offset {
ti.filtered_offset += u64(ti.window.len)
ti.advance_window()
}
if ti.done {
return none
}
left_inside_window := ti.filter.offset - ti.filtered_offset
ti.window_index = left_inside_window
ti.filtered_offset += left_inside_window
ti.started = true
}
return_value := ti.window[ti.window_index]
ti.window_index++
ti.filtered_offset++
// Next call will be past the requested offset
if ti.filter.limit > 0 && ti.filtered_offset == ti.filter.offset + ti.filter.limit {
ti.done = true
}
// Ensure the next call has a new valid window
if ti.window_index == u64(ti.window.len) {
ti.advance_window()
ti.window_index = 0
}
return return_value
}
// collect consumes the entire iterator & returns the result as an array.
pub fn (mut ti TargetsIterator) collect() []Target {
mut out := []Target{}
for t in ti {
out << t
}
return out
}

@ -1 +0,0 @@
Subproject commit 379a05a7b6b604c107360e0a679fb3ea5400e02c

View File

@ -7,21 +7,13 @@ import console.targets
import console.logs
import console.schedule
import console.man
import console.aur
import console.repos
import agent
import cron
fn main() {
// Stop buffering output so logs always show up immediately
unsafe {
C.setbuf(C.stdout, 0)
}
mut app := cli.Command{
name: 'vieter'
description: 'Vieter is a lightweight implementation of an Arch repository server.'
version: '0.6.0'
posix_mode: true
version: '0.3.0'
flags: [
cli.Flag{
flag: cli.FlagType.string
@ -31,23 +23,14 @@ fn main() {
global: true
default_value: [os.expand_tilde_to_home('~/.vieterrc')]
},
cli.Flag{
flag: cli.FlagType.bool
name: 'raw'
abbrev: 'r'
description: 'Only output minimal information (no formatted tables, etc.)'
global: true
},
]
commands: [
server.cmd(),
targets.cmd(),
cron.cmd(),
logs.cmd(),
schedule.cmd(),
man.cmd(),
aur.cmd(),
agent.cmd(),
repos.cmd(),
]
}
app.setup()

View File

@ -1,19 +0,0 @@
module models
pub struct BuildConfig {
pub:
target_id int
kind string
url string
branch string
path string
repo string
base_image string
force bool
timeout int
}
// str return a single-line string representation of a build log
pub fn (c BuildConfig) str() string {
return '{ target: ${c.target_id}, kind: ${c.kind}, url: ${c.url}, branch: ${c.branch}, path: ${c.path}, repo: ${c.repo}, base_image: ${c.base_image}, force: ${c.force}, timeout: ${c.timeout} }'
}

View File

@ -1,7 +1,6 @@
module models
import time
import os
pub struct BuildLog {
pub mut:
@ -16,26 +15,19 @@ pub mut:
// str returns a string representation.
pub fn (bl &BuildLog) str() string {
mut parts := [
'id: ${bl.id}',
'target id: ${bl.target_id}',
'start time: ${bl.start_time.local()}',
'end time: ${bl.end_time.local()}',
'id: $bl.id',
'target id: $bl.target_id',
'start time: $bl.start_time.local()',
'end time: $bl.end_time.local()',
'duration: ${bl.end_time - bl.start_time}',
'arch: ${bl.arch}',
'exit code: ${bl.exit_code}',
'arch: $bl.arch',
'exit code: $bl.exit_code',
]
str := parts.join('\n')
return str
}
// path returns the path to the log file, relative to the logs directory
pub fn (bl &BuildLog) path() string {
filename := bl.start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
return os.join_path(bl.target_id.str(), bl.arch, filename)
}
[params]
pub struct BuildLogFilter {
pub mut:

View File

@ -2,19 +2,19 @@ module models
import time
// from_params[T] creates a new instance of T from the given map by parsing all
// from_params<T> creates a new instance of T from the given map by parsing all
// of its fields from the map.
pub fn from_params[T](params map[string]string) ?T {
pub fn from_params<T>(params map[string]string) ?T {
mut o := T{}
patch_from_params[T](mut o, params)?
patch_from_params<T>(mut o, params)?
return o
}
// patch_from_params[T] updates the given T object with the params defined in
// patch_from_params<T> updates the given T object with the params defined in
// the map.
pub fn patch_from_params[T](mut o T, params map[string]string) ? {
pub fn patch_from_params<T>(mut o T, params map[string]string) ? {
$for field in T.fields {
if field.name in params && params[field.name] != '' {
$if field.typ is string {
@ -36,8 +36,8 @@ pub fn patch_from_params[T](mut o T, params map[string]string) ? {
}
}
// params_from[T] converts a given T struct into a map of strings.
pub fn params_from[T](o &T) map[string]string {
// params_from<T> converts a given T struct into a map of strings.
pub fn params_from<T>(o &T) map[string]string {
mut out := map[string]string{}
$for field in T.fields {

View File

@ -28,52 +28,31 @@ pub mut:
repo string [nonull]
// Cron schedule describing how frequently to build the repo.
schedule string
// Subdirectory in the Git repository to cd into
path string
// On which architectures the package is allowed to be built. In reality,
// this controls which agents will build this package when scheduled.
// this controls which builders will periodically build the image.
arch []TargetArch [fkey: 'target_id']
}
// str returns a string representation.
pub fn (t &Target) str() string {
pub fn (gr &Target) str() string {
mut parts := [
'id: ${t.id}',
'kind: ${t.kind}',
'url: ${t.url}',
'branch: ${t.branch}',
'path: ${t.path}',
'repo: ${t.repo}',
'schedule: ${t.schedule}',
'arch: ${t.arch.map(it.value).join(', ')}',
'id: $gr.id',
'kind: $gr.kind',
'url: $gr.url',
'branch: $gr.branch',
'repo: $gr.repo',
'schedule: $gr.schedule',
'arch: ${gr.arch.map(it.value).join(', ')}',
]
str := parts.join('\n')
return str
}
// as_build_config converts a Target into a BuildConfig, given some extra
// needed information.
pub fn (t &Target) as_build_config(base_image string, force bool, timeout int) BuildConfig {
return BuildConfig{
target_id: t.id
kind: t.kind
url: t.url
branch: t.branch
path: t.path
repo: t.repo
base_image: base_image
force: force
timeout: timeout
}
}
[params]
pub struct TargetFilter {
pub mut:
limit u64 = 25
offset u64
repo string
query string
arch string
}

View File

@ -1,5 +0,0 @@
# package
This module handles both parsing the published Arch tarballs & the contents of
their `.PKGINFO` files, as well as generating the contents of the database
archives' `desc` & `files` files.

View File

@ -1,103 +0,0 @@
module package
// format_entry returns a string properly formatted to be added to a desc file.
[inline]
fn format_entry(key string, value string) string {
return '\n%${key}%\n${value}\n'
}
// full_name returns the properly formatted name for the package, including
// version & architecture
pub fn (pkg &Pkg) full_name() string {
p := pkg.info
return '${p.name}-${p.version}-${p.arch}'
}
// filename returns the correct filename of the package file
pub fn (pkg &Pkg) filename() string {
ext := match pkg.compression {
0 { '.tar' }
1 { '.tar.gz' }
6 { '.tar.xz' }
14 { '.tar.zst' }
else { panic("Another compression code shouldn't be possible. Faulty code: ${pkg.compression}") }
}
return '${pkg.full_name()}.pkg${ext}'
}
// to_desc returns a desc file valid string representation
pub fn (pkg &Pkg) to_desc() !string {
p := pkg.info
// filename
mut desc := '%FILENAME%\n${pkg.filename()}\n'
desc += format_entry('NAME', p.name)
desc += format_entry('BASE', p.base)
desc += format_entry('VERSION', p.version)
if p.description.len > 0 {
desc += format_entry('DESC', p.description)
}
if p.groups.len > 0 {
desc += format_entry('GROUPS', p.groups.join_lines())
}
desc += format_entry('CSIZE', p.csize.str())
desc += format_entry('ISIZE', p.size.str())
sha256sum := pkg.checksum()!
desc += format_entry('SHA256SUM', sha256sum)
// TODO add pgpsig stuff
if p.url.len > 0 {
desc += format_entry('URL', p.url)
}
if p.licenses.len > 0 {
desc += format_entry('LICENSE', p.licenses.join_lines())
}
desc += format_entry('ARCH', p.arch)
desc += format_entry('BUILDDATE', p.build_date.str())
desc += format_entry('PACKAGER', p.packager)
if p.replaces.len > 0 {
desc += format_entry('REPLACES', p.replaces.join_lines())
}
if p.conflicts.len > 0 {
desc += format_entry('CONFLICTS', p.conflicts.join_lines())
}
if p.provides.len > 0 {
desc += format_entry('PROVIDES', p.provides.join_lines())
}
if p.depends.len > 0 {
desc += format_entry('DEPENDS', p.depends.join_lines())
}
if p.optdepends.len > 0 {
desc += format_entry('OPTDEPENDS', p.optdepends.join_lines())
}
if p.makedepends.len > 0 {
desc += format_entry('MAKEDEPENDS', p.makedepends.join_lines())
}
if p.checkdepends.len > 0 {
desc += format_entry('CHECKDEPENDS', p.checkdepends.join_lines())
}
return '${desc}\n'
}
// to_files returns a files file valid string representation
pub fn (pkg &Pkg) to_files() string {
return '%FILES%\n${pkg.files.join_lines()}\n'
}

View File

@ -43,12 +43,12 @@ pub mut:
}
// checksum calculates the sha256 hash of the package
pub fn (p &Pkg) checksum() !string {
pub fn (p &Pkg) checksum() ?string {
return util.hash_file(p.path)
}
// parse_pkg_info_string parses a PkgInfo object from a string
fn parse_pkg_info_string(pkg_info_str &string) !PkgInfo {
fn parse_pkg_info_string(pkg_info_str &string) ?PkgInfo {
mut pkg_info := PkgInfo{}
// Iterate over the entire string
@ -101,9 +101,9 @@ fn parse_pkg_info_string(pkg_info_str &string) !PkgInfo {
// read_pkg_archive extracts the file list & .PKGINFO contents from an archive
// NOTE: this command only supports zstd-, xz- & gzip-compressed tarballs.
pub fn read_pkg_archive(pkg_path string) !Pkg {
pub fn read_pkg_archive(pkg_path string) ?Pkg {
if !os.is_file(pkg_path) {
return error("'${pkg_path}' doesn't exist or isn't a file.")
return error("'$pkg_path' doesn't exist or isn't a file.")
}
a := C.archive_read_new()
@ -159,7 +159,7 @@ pub fn read_pkg_archive(pkg_path string) !Pkg {
pkg_text := unsafe { buf.vstring_with_len(size).clone() }
pkg_info = parse_pkg_info_string(pkg_text)!
pkg_info = parse_pkg_info_string(pkg_text)?
} else {
C.archive_read_data_skip(a)
}
@ -174,3 +174,104 @@ pub fn read_pkg_archive(pkg_path string) !Pkg {
compression: compression_code
}
}
// format_entry returns a string properly formatted to be added to a desc file.
fn format_entry(key string, value string) string {
return '\n%$key%\n$value\n'
}
// full_name returns the properly formatted name for the package, including
// version & architecture
pub fn (pkg &Pkg) full_name() string {
p := pkg.info
return '$p.name-$p.version-$p.arch'
}
// filename returns the correct filename of the package file
pub fn (pkg &Pkg) filename() string {
ext := match pkg.compression {
0 { '.tar' }
1 { '.tar.gz' }
6 { '.tar.xz' }
14 { '.tar.zst' }
else { panic("Another compression code shouldn't be possible. Faulty code: $pkg.compression") }
}
return '${pkg.full_name()}.pkg$ext'
}
// to_desc returns a desc file valid string representation
pub fn (pkg &Pkg) to_desc() ?string {
p := pkg.info
// filename
mut desc := '%FILENAME%\n$pkg.filename()\n'
desc += format_entry('NAME', p.name)
desc += format_entry('BASE', p.base)
desc += format_entry('VERSION', p.version)
if p.description.len > 0 {
desc += format_entry('DESC', p.description)
}
if p.groups.len > 0 {
desc += format_entry('GROUPS', p.groups.join_lines())
}
desc += format_entry('CSIZE', p.csize.str())
desc += format_entry('ISIZE', p.size.str())
sha256sum := pkg.checksum()?
desc += format_entry('SHA256SUM', sha256sum)
// TODO add pgpsig stuff
if p.url.len > 0 {
desc += format_entry('URL', p.url)
}
if p.licenses.len > 0 {
desc += format_entry('LICENSE', p.licenses.join_lines())
}
desc += format_entry('ARCH', p.arch)
desc += format_entry('BUILDDATE', p.build_date.str())
desc += format_entry('PACKAGER', p.packager)
if p.replaces.len > 0 {
desc += format_entry('REPLACES', p.replaces.join_lines())
}
if p.conflicts.len > 0 {
desc += format_entry('CONFLICTS', p.conflicts.join_lines())
}
if p.provides.len > 0 {
desc += format_entry('PROVIDES', p.provides.join_lines())
}
if p.depends.len > 0 {
desc += format_entry('DEPENDS', p.depends.join_lines())
}
if p.optdepends.len > 0 {
desc += format_entry('OPTDEPENDS', p.optdepends.join_lines())
}
if p.makedepends.len > 0 {
desc += format_entry('MAKEDEPENDS', p.makedepends.join_lines())
}
if p.checkdepends.len > 0 {
desc += format_entry('CHECKDEPENDS', p.checkdepends.join_lines())
}
return '$desc\n'
}
// to_files returns a files file valid string representation
pub fn (pkg &Pkg) to_files() string {
return '%FILES%\n$pkg.files.join_lines()\n'
}

View File

@ -1,43 +0,0 @@
# repo
This module manages the contents of the various repositories stored within a
Vieter instance.
## Terminology
* Arch-repository (arch-repo): specific architecture of a given repository. This is what
Pacman actually uses as a repository, and contains its own `.db` & `.files`
files.
* Repository (repo): a collection of arch-repositories. A single repository can
contain packages of different architectures, with each package being stored
in that specific architecture' arch-repository.
* Repository group (repo-group): a collection of repositories. Each Vieter
instance consists of a single repository group, which manages all underlying
repositories & arch-repositories.
## Arch-repository layout
An arch-repository (aka a regular Pacman repository) consists of a directory
with the following files (`{repo}` should be replaced with the name of the
repository):
* One or more package directories. These directories follow the naming scheme
`${pkgname}-${pkgver}-${pkgrel}`. Each of these directories contains two
files, `desc` & `files`. The `desc` file is a list of the package's metadata,
while `files` contains a list of all files that the package contains. The
latter is used when using `pacman -F`.
* `{repo}.db` & `{repo}.db.tar.gz`: the database file of the repository. This
is just a compressed tarball of all package directories, but only their
`desc` files. Both these files should have the same content (`repo-add`
creates a symlink, but Vieter just serves the same file for both routes)
* `{repo}.files` & `{repo}.files.tar.gz`: the same as the `.db` file, but this
also contains the `files` files, instead of just the `desc` files.
## Filesystem layout
The repository part of Vieter consists of two directories. One is the `repos`
directory inside the configured `data_dir`, while the other is the configured
`pkg_dir`. `repos` contains only the repository group, while `pkg_dir` contains
the actual package archives. `pkg_dir` is the directory that can take up a
significant amount of memory, while `repos` solely consists of small text
files.

View File

@ -1,85 +0,0 @@
module repo
import os
// remove_pkg_from_arch_repo removes a package from an arch-repo's database. It
// returns false if the package wasn't present in the database. It also
// optionally re-syncs the repo archives.
pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, perform_sync bool) !bool {
repo_dir := os.join_path(r.repos_dir, repo, arch)
// If the repository doesn't exist yet, the result is automatically false
if !os.exists(repo_dir) {
return false
}
// We iterate over every directory in the repo dir
// TODO filter so we only check directories
for d in os.ls(repo_dir)! {
// Because a repository only allows a single version of each package,
// we need only compare whether the name of the package is the same,
// not the version.
name := d.split('-')#[..-2].join('-')
if name == pkg_name {
// We lock the mutex here to prevent other routines from creating a
// new archive while we remove an entry
lock r.mutex {
os.rmdir_all(os.join_path_single(repo_dir, d))!
}
// Also remove the package archive
repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch)
archives := os.ls(repo_pkg_dir)!.filter(it.split('-')#[..-3].join('-') == name)
for archive_name in archives {
full_path := os.join_path_single(repo_pkg_dir, archive_name)
os.rm(full_path)!
}
// Sync the db archives if requested
if perform_sync {
r.sync(repo, arch)!
}
return true
}
}
return false
}
// remove_arch_repo removes an arch-repo & its packages.
pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) !bool {
repo_dir := os.join_path(r.repos_dir, repo, arch)
// If the repository doesn't exist yet, the result is automatically false
if !os.exists(repo_dir) {
return false
}
os.rmdir_all(repo_dir)!
pkg_dir := os.join_path(r.pkg_dir, repo, arch)
os.rmdir_all(pkg_dir)!
return true
}
// remove_repo removes a repo & its packages.
pub fn (r &RepoGroupManager) remove_repo(repo string) !bool {
repo_dir := os.join_path_single(r.repos_dir, repo)
// If the repository doesn't exist yet, the result is automatically false
if !os.exists(repo_dir) {
return false
}
os.rmdir_all(repo_dir)!
pkg_dir := os.join_path_single(r.pkg_dir, repo)
os.rmdir_all(pkg_dir)!
return true
}

View File

@ -23,23 +23,18 @@ pub:
pub struct RepoAddResult {
pub:
name string
version string
archs []string
added bool [required]
pkg &package.Pkg [required]
}
// new creates a new RepoGroupManager & creates the directories as needed
pub fn new(repos_dir string, pkg_dir string, default_arch string) !RepoGroupManager {
pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager {
if !os.is_dir(repos_dir) {
os.mkdir_all(repos_dir) or {
return error('Failed to create repos directory: ${err.msg()}')
}
os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') }
}
if !os.is_dir(pkg_dir) {
os.mkdir_all(pkg_dir) or {
return error('Failed to create package directory: ${err.msg()}')
}
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg()') }
}
return RepoGroupManager{
@ -53,32 +48,31 @@ pub fn new(repos_dir string, pkg_dir string, default_arch string) !RepoGroupMana
// pkg archive. It's a wrapper around add_pkg_in_repo that parses the archive
// file, passes the result to add_pkg_in_repo, and hard links the archive to
// the right subdirectories in r.pkg_dir if it was successfully added.
pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) !RepoAddResult {
pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult {
pkg := package.read_pkg_archive(pkg_path) or {
return error('Failed to read package file: ${err.msg()}')
return error('Failed to read package file: $err.msg()')
}
archs := r.add_pkg_in_repo(repo, pkg)!
added := r.add_pkg_in_repo(repo, pkg)?
// If the add was successful, we move the file to the packages directory
for arch in archs {
for arch in added {
repo_pkg_path := os.real_path(os.join_path(r.pkg_dir, repo, arch))
dest_path := os.join_path_single(repo_pkg_path, pkg.filename())
os.mkdir_all(repo_pkg_path)!
os.mkdir_all(repo_pkg_path)?
// We create hard links so that "any" arch packages aren't stored
// multiple times
os.link(pkg_path, dest_path)!
os.link(pkg_path, dest_path)?
}
// After linking, we can remove the original file
os.rm(pkg_path)!
os.rm(pkg_path)?
return RepoAddResult{
name: pkg.info.name
version: pkg.info.version
archs: archs
added: added.len > 0
pkg: &pkg
}
}
@ -89,13 +83,15 @@ pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) !Re
// r.default_arch. If this arch-repo doesn't exist yet, it is created. If the
// architecture isn't 'any', the package is only added to the specific
// architecture.
fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]string {
fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]string {
// A package not of arch 'any' can be handled easily by adding it to the
// respective repo
if pkg.info.arch != 'any' {
r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)!
return [pkg.info.arch]
if r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)? {
return [pkg.info.arch]
} else {
return []
}
}
mut arch_repos := []string{}
@ -108,7 +104,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]strin
// If this is the first package that's added to the repo, the directory
// won't exist yet
if os.exists(repo_dir) {
arch_repos = os.ls(repo_dir)!
arch_repos = os.ls(repo_dir)?
}
// The default_arch should always be updated when a package with arch 'any'
@ -117,39 +113,92 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]strin
arch_repos << r.default_arch
}
// Add the package to each found architecture
// NOTE: if any of these fail, the function fails. This means the user does
// not know which arch-repositories did succeed in adding the package, if
// any.
mut added := []string{}
// We add the package to each repository. If any of the repositories
// return true, the result of the function is also true.
for arch in arch_repos {
r.add_pkg_in_arch_repo(repo, arch, pkg)!
if r.add_pkg_in_arch_repo(repo, arch, pkg)? {
added << arch
}
}
return arch_repos
return added
}
// add_pkg_in_arch_repo is the function that actually adds a package to a given
// arch-repo. It records the package's data in the arch-repo's desc & files
// files, and afterwards updates the db & files archives to reflect these
// changes.
fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ! {
pkg_dir := os.join_path(r.repos_dir, repo, arch, '${pkg.info.name}-${pkg.info.version}')
// changes. The function returns false if the package was already present in
// the repo, and true otherwise.
fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ?bool {
pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version')
// Remove the previous version of the package, if present
r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)!
r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)?
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') }
os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()!) or {
os.rmdir_all(pkg_dir)!
os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()?) or {
os.rmdir_all(pkg_dir)?
return error('Failed to write desc file.')
}
os.write_file(os.join_path_single(pkg_dir, 'files'), pkg.to_files()) or {
os.rmdir_all(pkg_dir)!
os.rmdir_all(pkg_dir)?
return error('Failed to write files file.')
}
r.sync(repo, arch)!
r.sync(repo, arch)?
return true
}
// remove_pkg_from_arch_repo removes a package from an arch-repo's database. It
// returns false if the package wasn't present in the database. It also
// optionally re-syncs the repo archives.
fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) ?bool {
repo_dir := os.join_path(r.repos_dir, repo, arch)
// If the repository doesn't exist yet, the result is automatically false
if !os.exists(repo_dir) {
return false
}
// We iterate over every directory in the repo dir
// TODO filter so we only check directories
for d in os.ls(repo_dir)? {
// Because a repository only allows a single version of each package,
// we need only compare whether the name of the package is the same,
// not the version.
name := d.split('-')#[..-2].join('-')
if name == pkg_name {
// We lock the mutex here to prevent other routines from creating a
// new archive while we remove an entry
lock r.mutex {
os.rmdir_all(os.join_path_single(repo_dir, d))?
}
// Also remove the package archive
repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch)
archives := os.ls(repo_pkg_dir)?.filter(it.split('-')#[..-3].join('-') == name)
for archive_name in archives {
full_path := os.join_path_single(repo_pkg_dir, archive_name)
os.rm(full_path)?
}
// Sync the db archives if requested
if sync {
r.sync(repo, arch)?
}
return true
}
}
return false
}

View File

@ -32,7 +32,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
}
// sync regenerates the repository archive files.
fn (r &RepoGroupManager) sync(repo string, arch string) ! {
fn (r &RepoGroupManager) sync(repo string, arch string) ? {
subrepo_path := os.join_path(r.repos_dir, repo, arch)
lock r.mutex {
@ -54,7 +54,7 @@ fn (r &RepoGroupManager) sync(repo string, arch string) ! {
C.archive_write_open_filename(a_files, &char(files_path.str))
// Iterate over each directory
for d in os.ls(subrepo_path)!.filter(os.is_dir(os.join_path_single(subrepo_path,
for d in os.ls(subrepo_path)?.filter(os.is_dir(os.join_path_single(subrepo_path,
it))) {
// desc
mut inner_path := os.join_path_single(d, 'desc')

View File

@ -0,0 +1,34 @@
module response
pub struct Response<T> {
pub:
message string
data T
}
// new_response constructs a new Response<String> object with the given message
// & an empty data field.
pub fn new_response(message string) Response<string> {
return Response<string>{
message: message
data: ''
}
}
// new_data_response<T> constructs a new Response<T> object with the given data
// & an empty message field.
pub fn new_data_response<T>(data T) Response<T> {
return Response<T>{
message: ''
data: data
}
}
// new_full_response<T> constructs a new Response<T> object with the given
// message & data.
pub fn new_full_response<T>(message string, data T) Response<T> {
return Response<T>{
message: message
data: data
}
}

View File

@ -1,49 +0,0 @@
module server
import web
import web.response { new_data_response, new_response }
// v1_poll_job_queue allows agents to poll for new build jobs.
['/api/v1/jobs/poll'; auth; get; markused]
fn (mut app App) v1_poll_job_queue() web.Result {
arch := app.query['arch'] or {
return app.json(.bad_request, new_response('Missing arch query arg.'))
}
max_str := app.query['max'] or {
return app.json(.bad_request, new_response('Missing max query arg.'))
}
max := max_str.int()
mut out := app.job_queue.pop_n(arch, max).map(it.config)
return app.json(.ok, new_data_response(out))
}
// v1_queue_job allows queueing a new one-time build job for the given target.
['/api/v1/jobs/queue'; auth; markused; post]
fn (mut app App) v1_queue_job() web.Result {
target_id := app.query['target'] or {
return app.json(.bad_request, new_response('Missing target query arg.'))
}.int()
arch := app.query['arch'] or {
return app.json(.bad_request, new_response('Missing arch query arg.'))
}
if arch == '' {
app.json(.bad_request, new_response('Empty arch query arg.'))
}
force := 'force' in app.query
target := app.db.get_target(target_id) or {
return app.json(.bad_request, new_response('Unknown target id.'))
}
app.job_queue.insert(target: target, arch: arch, single: true, now: true, force: force) or {
return app.status(.internal_server_error)
}
return app.status(.ok)
}

Some files were not shown because too many files have changed in this diff Show More