Compare commits

..

1 Commits

Author SHA1 Message Date
Renovate Bot 39fcc17a23 Update dependency rouge to v4
renovate/artifacts Artifact file update failure
ci/woodpecker/push/arch unknown status Details
ci/woodpecker/push/deploy unknown status Details
ci/woodpecker/push/docker unknown status Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/docs Pipeline failed Details
ci/woodpecker/pr/docs Pipeline failed Details
ci/woodpecker/pr/lint Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/man Pipeline was successful Details
ci/woodpecker/push/test Pipeline was successful Details
ci/woodpecker/pr/build Pipeline was successful Details
ci/woodpecker/pr/man Pipeline was successful Details
ci/woodpecker/pr/docker Pipeline was successful Details
ci/woodpecker/pr/test Pipeline was successful Details
2022-12-17 10:02:39 +00:00
68 changed files with 1155 additions and 1029 deletions

View File

@ -1,4 +0,0 @@
# To stay consistent with the V formatting style, we use tabs
UseTab: Always
IndentWidth: 4
TabWidth: 4

View File

@ -5,5 +5,6 @@ root = true
end_of_line = lf end_of_line = lf
insert_final_newline = true insert_final_newline = true
[*.{v,c,h}] [*.v]
# vfmt wants it :(
indent_style = tab indent_style = tab

2
.gitignore vendored
View File

@ -1,4 +1,4 @@
vieter.c *.c
/data/ /data/
# Build artifacts # Build artifacts

3
.gitmodules vendored
View File

@ -1,6 +1,3 @@
[submodule "docs/themes/hugo-book"] [submodule "docs/themes/hugo-book"]
path = docs/themes/hugo-book path = docs/themes/hugo-book
url = https://github.com/alex-shpak/hugo-book url = https://github.com/alex-shpak/hugo-book
[submodule "src/libvieter"]
path = src/libvieter
url = https://git.rustybever.be/vieter-v/libvieter

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
matrix: matrix:
PLATFORM: PLATFORM:
@ -57,7 +57,7 @@ pipeline:
- export OBJ_PATH="/vieter/commits/$CI_COMMIT_SHA/vieter-$(echo '${PLATFORM}' | sed 's:/:-:g')" - export OBJ_PATH="/vieter/commits/$CI_COMMIT_SHA/vieter-$(echo '${PLATFORM}' | sed 's:/:-:g')"
- export SIG_STRING="PUT\n\n$CONTENT_TYPE\n$DATE\n$OBJ_PATH" - export SIG_STRING="PUT\n\n$CONTENT_TYPE\n$DATE\n$OBJ_PATH"
- export SIGNATURE="$(echo -en $SIG_STRING | openssl dgst -sha1 -hmac $S3_PASSWORD -binary | base64)" - export SIGNATURE="$(echo -en $SIG_STRING | openssl sha1 -hmac $S3_PASSWORD -binary | base64)"
- > - >
curl curl
--silent --silent

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
platform: 'linux/amd64' platform: 'linux/amd64'
branches: branches:

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
platform: 'linux/amd64' platform: 'linux/amd64'
branches: [ 'main' ] branches: [ 'main' ]

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
# These checks already get performed on the feature branches # These checks already get performed on the feature branches
branches: branches:

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
platform: 'linux/amd64' platform: 'linux/amd64'
branches: branches:
@ -8,21 +8,15 @@ branches:
depends_on: depends_on:
- build - build
skip_clone: true
pipeline: pipeline:
install-modules: generate:
image: *vlang_image image: *vlang_image
pull: true pull: true
commands: commands:
- export VMODULES=$PWD/.vmodules - curl -o vieter -L "https://s3.rustybever.be/vieter/commits/$CI_COMMIT_SHA/vieter-linux-amd64"
- 'cd src && v install' - chmod +x vieter
generate:
image: *vlang_image
commands:
# - curl -o vieter -L "https://s3.rustybever.be/vieter/commits/$CI_COMMIT_SHA/vieter-linux-amd64"
# - chmod +x vieter
- export VMODULES=$PWD/.vmodules
- make
- ./vieter man man - ./vieter man man
- cd man - cd man

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
matrix: matrix:
PLATFORM: PLATFORM:

View File

@ -9,40 +9,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added ### Added
* Metrics endpoint for Prometheus integration
* Search in list of targets using API & CLI
* Allow filtering targets by arch value
### Changed
* Rewrote cron expression logic in C
### Removed
* Deprecated cron daemon
## [0.5.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0)
### Added
* CLI commands for removing packages, arch-repos & repositories
## [0.5.0-rc.2](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0-rc.2)
### Added
* API route for removing logs & accompanying CLI command
* Daemon for periodically removing old logs
* CLI flag to filter logs by specific exit codes
### Changed
* Use `--long-option` instead of `-long-option` for CLI
## [0.5.0-rc.1](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0-rc.1)
### Added
* Allow specifying subdirectory inside Git repository * Allow specifying subdirectory inside Git repository
* Added option to deploy using agent-server architecture instead of cron daemon * Added option to deploy using agent-server architecture instead of cron daemon
* Allow scheduling builds on the server from the CLI tool instead of building * Allow scheduling builds on the server from the CLI tool instead of building

View File

@ -1,20 +1,16 @@
# =====CONFIG===== # =====CONFIG=====
SRC_DIR := src SRC_DIR := src
SRCS != find '$(SRC_DIR)' -iname '*.v' SOURCES != find '$(SRC_DIR)' -iname '*.v'
V_PATH ?= v V_PATH ?= v
V := $(V_PATH) -showcc -gc boehm -W -d use_openssl -skip-unused V := $(V_PATH) -showcc -gc boehm -W -d use_openssl
all: vieter all: vieter
# =====COMPILATION===== # =====COMPILATION=====
.PHONY: libvieter
libvieter:
make -C '$(SRC_DIR)/libvieter' CFLAGS='-O3'
# Regular binary # Regular binary
vieter: $(SOURCES) libvieter vieter: $(SOURCES)
$(V) -g -o vieter $(SRC_DIR) $(V) -g -o vieter $(SRC_DIR)
# Debug build using gcc # Debug build using gcc
@ -22,7 +18,7 @@ vieter: $(SOURCES) libvieter
# multi-threaded and causes issues when running vieter inside gdb. # multi-threaded and causes issues when running vieter inside gdb.
.PHONY: debug .PHONY: debug
debug: dvieter debug: dvieter
dvieter: $(SOURCES) libvieter dvieter: $(SOURCES)
$(V_PATH) -showcc -keepc -cg -o dvieter $(SRC_DIR) $(V_PATH) -showcc -keepc -cg -o dvieter $(SRC_DIR)
# Run the debug build inside gdb # Run the debug build inside gdb
@ -33,12 +29,12 @@ gdb: dvieter
# Optimised production build # Optimised production build
.PHONY: prod .PHONY: prod
prod: pvieter prod: pvieter
pvieter: $(SOURCES) libvieter pvieter: $(SOURCES)
$(V) -o pvieter -prod $(SRC_DIR) $(V) -o pvieter -prod $(SRC_DIR)
# Only generate C code # Only generate C code
.PHONY: c .PHONY: c
c: $(SOURCES) libvieter c: $(SOURCES)
$(V) -o vieter.c $(SRC_DIR) $(V) -o vieter.c $(SRC_DIR)
@ -71,7 +67,6 @@ man: vieter
# =====OTHER===== # =====OTHER=====
# Linting
.PHONY: lint .PHONY: lint
lint: lint:
$(V) fmt -verify $(SRC_DIR) $(V) fmt -verify $(SRC_DIR)
@ -79,24 +74,18 @@ lint:
$(V_PATH) missdoc -p $(SRC_DIR) $(V_PATH) missdoc -p $(SRC_DIR)
@ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ] @ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ]
# Format the V codebase
# Formatting
.PHONY: fmt .PHONY: fmt
fmt: fmt:
$(V) fmt -w $(SRC_DIR) $(V) fmt -w $(SRC_DIR)
# Testing
.PHONY: test .PHONY: test
test: libvieter test:
$(V) -g test $(SRC_DIR) $(V) test $(SRC_DIR)
# Cleaning
.PHONY: clean .PHONY: clean
clean: clean:
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' 'docs/public' rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' 'docs/public'
make -C '$(SRC_DIR)/libvieter' clean
# =====EXPERIMENTAL===== # =====EXPERIMENTAL=====

View File

@ -3,7 +3,7 @@
pkgbase='vieter' pkgbase='vieter'
pkgname='vieter' pkgname='vieter'
pkgver='0.5.0' pkgver='0.4.0'
pkgrel=1 pkgrel=1
pkgdesc="Lightweight Arch repository server & package build system" pkgdesc="Lightweight Arch repository server & package build system"
depends=('glibc' 'openssl' 'libarchive' 'sqlite') depends=('glibc' 'openssl' 'libarchive' 'sqlite')

View File

@ -11,37 +11,27 @@ makedepends=('git' 'vlang')
arch=('x86_64' 'aarch64') arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter-v/vieter' url='https://git.rustybever.be/vieter-v/vieter'
license=('AGPL3') license=('AGPL3')
source=( source=("$pkgname::git+https://git.rustybever.be/vieter-v/vieter#branch=dev")
"${pkgname}::git+https://git.rustybever.be/vieter-v/vieter#branch=dev" md5sums=('SKIP')
"libvieter::git+https://git.rustybever.be/vieter-v/libvieter"
)
md5sums=('SKIP' 'SKIP')
provides=('vieter') provides=('vieter')
conflicts=('vieter') conflicts=('vieter')
pkgver() { pkgver() {
cd "${pkgname}" cd "$pkgname"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g' git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
} }
prepare() { prepare() {
cd "${pkgname}" export VMODULES="$srcdir/.vmodules"
# Add the libvieter submodule cd "$pkgname/src" && v install
git submodule init
git config submodules.src/libvieter.url "${srcdir}/libvieter"
git -c protocol.file.allow=always submodule update
export VMODULES="${srcdir}/.vmodules"
cd src && v install
} }
build() { build() {
export VMODULES="${srcdir}/.vmodules" export VMODULES="$srcdir/.vmodules"
cd "${pkgname}" cd "$pkgname"
make prod make prod
@ -52,9 +42,9 @@ build() {
} }
package() { package() {
install -dm755 "${pkgdir}/usr/bin" install -dm755 "$pkgdir/usr/bin"
install -Dm755 "${pkgname}/pvieter" "${pkgdir}/usr/bin/vieter" install -Dm755 "$pkgname/pvieter" "$pkgdir/usr/bin/vieter"
install -dm755 "${pkgdir}/usr/share/man/man1" install -dm755 "$pkgdir/usr/share/man/man1"
install -Dm644 "${pkgname}/man"/*.1 "${pkgdir}/usr/share/man/man1" install -Dm644 "$pkgname/man"/*.1 "$pkgdir/usr/share/man/man1"
} }

View File

@ -1,78 +0,0 @@
# Jobs
<aside class="notice">
All routes in this section require authentication.
</aside>
## Manually schedule a job
```shell
curl \
-H 'X-Api-Key: secret' \
https://example.com/api/v1/jobs/queue?target=10&force&arch=x86_64
```
Manually schedule a job on the server.
### HTTP Request
`POST /api/v1/jobs/queue`
### Query Parameters
Parameter | Description
--------- | -----------
target | Id of target to schedule build for
arch | Architecture to build on
force | Whether it's a forced build (true if present)
## Poll for new jobs
<aside class="warning">
This endpoint is used by the agents and should not be used manually. It's just
here for completeness. Requests to this endpoint modify the build queue,
meaning manual requests can cause builds to be skipped.
</aside>
```shell
curl \
-H 'X-Api-Key: secret' \
https://example.com/api/v1/jobs/poll?arch=x86_64&max=2
```
> JSON output format
```json
{
"message": "",
"data": [
{
"target_id": 1,
"kind": "git",
"url": "https://aur.archlinux.org/discord-ptb.git",
"branch": "master",
"path": "",
"repo": "bur",
"base_image": "archlinux:base-devel",
"force": true
}
]
}
```
Poll the server for new builds.
### HTTP Request
`GET /api/v1/jobs/poll`
### Query Parameters
Parameter | Description
--------- | -----------
arch | For which architecture to receive jobs
max | How many jobs to receive at most

View File

@ -125,8 +125,8 @@ id | ID of requested log
<aside class="warning"> <aside class="warning">
This endpoint is used by the agents and should not be used manually unless you You should probably not use this endpoint, as it's used by the build system to
know what you're doing. It's just here for completeness. publish its logs.
</aside> </aside>
@ -149,24 +149,3 @@ target | id of target this build is for
### Request body ### Request body
Plaintext contents of the build log. Plaintext contents of the build log.
## Remove a build log
```shell
curl \
-XDELETE \
-H 'X-Api-Key: secret' \
https://example.com/api/v1/logs/1
```
Remove a build log from the server.
### HTTP Request
`DELETE /api/v1/logs/:id`
### URL Parameters
Parameter | Description
--------- | -----------
id | id of log to remove

View File

@ -27,7 +27,6 @@ curl \
"kind": "git", "kind": "git",
"url": "https://aur.archlinux.org/discord-ptb.git", "url": "https://aur.archlinux.org/discord-ptb.git",
"branch": "master", "branch": "master",
"path" : "",
"repo": "bur", "repo": "bur",
"schedule": "", "schedule": "",
"arch": [ "arch": [
@ -55,8 +54,6 @@ Parameter | Description
limit | Maximum amount of results to return. limit | Maximum amount of results to return.
offset | Offset of results. offset | Offset of results.
repo | Limit results to targets that publish to the given repo. repo | Limit results to targets that publish to the given repo.
query | Only return targets that have this substring in their URL, path or branch.
arch | Only return targets that publish to this arch.
## Get specific target ## Get specific target
@ -76,9 +73,8 @@ curl \
"kind": "git", "kind": "git",
"url": "https://aur.archlinux.org/discord-ptb.git", "url": "https://aur.archlinux.org/discord-ptb.git",
"branch": "master", "branch": "master",
"path": "",
"repo": "bur", "repo": "bur",
"schedule": "0 2", "schedule": "0 3",
"arch": [ "arch": [
{ {
"id": 1, "id": 1,
@ -128,7 +124,6 @@ Parameter | Description
kind | Kind of target to add; one of 'git', 'url'. kind | Kind of target to add; one of 'git', 'url'.
url | URL of the Git repository. url | URL of the Git repository.
branch | Branch of the Git repository. branch | Branch of the Git repository.
path | Subdirectory inside Git repository to use.
repo | Vieter repository to publish built packages to. repo | Vieter repository to publish built packages to.
schedule | Cron build schedule (syntax explained [here](https://rustybever.be/docs/vieter/usage/builds/schedule/)) schedule | Cron build schedule (syntax explained [here](https://rustybever.be/docs/vieter/usage/builds/schedule/))
arch | Comma-separated list of architectures to build package on. arch | Comma-separated list of architectures to build package on.
@ -154,20 +149,12 @@ Parameter | Description
kind | Kind of target; one of 'git', 'url'. kind | Kind of target; one of 'git', 'url'.
url | URL of the Git repository. url | URL of the Git repository.
branch | Branch of the Git repository. branch | Branch of the Git repository.
path | Subdirectory inside Git repository to use.
repo | Vieter repository to publish built packages to. repo | Vieter repository to publish built packages to.
schedule | Cron build schedule schedule | Cron build schedule
arch | Comma-separated list of architectures to build package on. arch | Comma-separated list of architectures to build package on.
## Remove a target ## Remove a target
```shell
curl \
-XDELETE \
-H 'X-Api-Key: secret' \
https://example.com/api/v1/targets/1
```
Remove a target from the server. Remove a target from the server.
### HTTP Request ### HTTP Request

View File

@ -11,7 +11,6 @@ includes:
- repository - repository
- targets - targets
- logs - logs
- jobs
search: true search: true

View File

@ -32,11 +32,11 @@ configuration variable required for each command.
### `vieter server` ### `vieter server`
* `port`: HTTP port to run on
* Default: `8000`
* `log_level`: log verbosity level. Value should be one of `FATAL`, `ERROR`, * `log_level`: log verbosity level. Value should be one of `FATAL`, `ERROR`,
`WARN`, `INFO` or `DEBUG`. `WARN`, `INFO` or `DEBUG`.
* Default: `WARN` * Default: `WARN`
* `log_file`: log file to write logs to.
* Default: `vieter.log` (in the current directory)
* `pkg_dir`: where Vieter should store the actual package archives. * `pkg_dir`: where Vieter should store the actual package archives.
* `data_dir`: where Vieter stores the repositories, log file & database. * `data_dir`: where Vieter stores the repositories, log file & database.
* `api_key`: the API key to use when authenticating requests. * `api_key`: the API key to use when authenticating requests.
@ -44,26 +44,9 @@ configuration variable required for each command.
* Packages with architecture `any` are always added to this architecture. * Packages with architecture `any` are always added to this architecture.
This prevents the server from being confused when an `any` package is This prevents the server from being confused when an `any` package is
published as the very first package for a repository. published as the very first package for a repository.
* Targets added without an `arch` value use this value instead. * Git repositories added without an `arch` value use this value instead.
* `global_schedule`: build schedule for any target that does not have a * `port`: HTTP port to run on
schedule defined. For information about this syntax, see * Default: `8000`
[here](/usage/builds/schedule).
* Default: `0 3` (3AM every night)
* `base_image`: Docker image to use when building a package. Any Pacman-based
distro image should work, as long as `/etc/pacman.conf` is used &
`base-devel` exists in the repositories. Make sure that the image supports
the architecture of your cron daemon.
* Default: `archlinux:base-devel` (only works on `x86_64`). If you require
`aarch64` support, consider using
[`menci/archlinuxarm:base-devel`](https://hub.docker.com/r/menci/archlinuxarm)
([GitHub](https://github.com/Menci/docker-archlinuxarm)). This is the
image used for the Vieter CI builds.
* `max_log_age`: maximum age of logs (in days). Logs older than this will get
cleaned by the log removal daemon. If set to zero, no logs are ever removed.
The age of logs is determined by the time the build was started.
* Default: `0`
* `log_removal_schedule`: cron schedule defining when to clean old logs.
* Default: `0 0` (every day at midnight)
### `vieter cron` ### `vieter cron`

View File

@ -23,15 +23,15 @@ guarantees about stability, so beware!
Thanks to the single-binary design of Vieter, this image can be used both for Thanks to the single-binary design of Vieter, this image can be used both for
the repository server, the cron daemon and the agent. the repository server, the cron daemon and the agent.
Below is a minimal compose file to set up both the repository server & a build Below is an example compose file to set up both the repository server & the
agent: cron daemon:
```yaml ```yaml
version: '3' version: '3'
services: services:
server: server:
image: 'chewingbever/vieter:0.5.0-rc.1' image: 'chewingbever/vieter:dev'
restart: 'always' restart: 'always'
environment: environment:
@ -41,19 +41,18 @@ services:
- 'data:/data' - 'data:/data'
cron: cron:
image: 'chewingbever/vieter:0.5.0-rc.1' image: 'chewingbever/vieter:dev'
restart: 'always' restart: 'always'
# Required to connect to the Docker daemon
user: root user: root
command: 'vieter agent' command: 'vieter cron'
environment: environment:
- 'VIETER_API_KEY=secret' - 'VIETER_API_KEY=secret'
# MUST be public URL of Vieter repository # MUST be public URL of Vieter repository
- 'VIETER_ADDRESS=https://example.com' - 'VIETER_ADDRESS=https://example.com'
# Architecture for which the agent builds - 'VIETER_DEFAULT_ARCH=x86_64'
- 'VIETER_ARCH=x86_64'
- 'VIETER_MAX_CONCURRENT_BUILDS=2' - 'VIETER_MAX_CONCURRENT_BUILDS=2'
- 'VIETER_GLOBAL_SCHEDULE=0 3'
volumes: volumes:
- '/var/run/docker.sock:/var/run/docker.sock' - '/var/run/docker.sock:/var/run/docker.sock'
@ -64,17 +63,14 @@ volumes:
If you do not require the build system, the repository server can be used If you do not require the build system, the repository server can be used
independently as well. independently as well.
Of course, Vieter allows a lot more configuration than this. This compose file
is meant as a starting point for setting up your installation.
{{< hint info >}} {{< hint info >}}
**Note** **Note**
Builds are executed on the agent's system using the host's Docker daemon. An Builds are executed on the cron daemon's system using the host's Docker daemon.
agent for a specific `arch` will only build packages for that specific A cron daemon on a specific architecture will only build packages for that
architecture. Therefore, if you wish to build packages for both `x86_64` & specific architecture. Therefore, if you wish to build packages for both
`aarch64`, you'll have to deploy two agents, one on each architecture. `x86_64` & `aarch64`, you'll have to deploy two cron daemons, one on each
Afterwards, any Git repositories enabled for those two architectures will build architecture. Afterwards, any Git repositories enabled for those two
on both. architectures will build on both.
{{< /hint >}} {{< /hint >}}
## Binary ## Binary
@ -103,9 +99,9 @@ latest official release or `vieter-git` for the latest development release.
### AUR ### AUR
If you prefer building the packages locally (or on your own Vieter instance), If you prefer building the packages locally (or on your own Vieter instance),
there's the [`vieter`](https://aur.archlinux.org/packages/vieter) & there's the `[vieter](https://aur.archlinux.org/packages/vieter)` &
[`vieter-git`](https://aur.archlinux.org/packages/vieter-git) packages on the `[vieter-git](https://aur.archlinux.org/packages/vieter-git)` packages on the
AUR. These packages build using the `vlang` compiler package, so I can't AUR. These packages build using the `vlang-git` compiler package, so I can't
guarantee that a compiler update won't temporarily break them. guarantee that a compiler update won't temporarily break them.
## Building from source ## Building from source

View File

@ -0,0 +1,3 @@
---
weight: 100
---

View File

@ -0,0 +1,81 @@
# Builds In-depth
For those interested, this page describes how the build system works
internally.
## Builder image
Every cron daemon perodically creates a builder image that is then used as a
base for all builds. This is done to prevent build containers having to pull
down a bunch of updates when they update their system.
The build container is created by running the following commands inside a
container started from the image defined in `base_image`:
```sh
# Update repos & install required packages
pacman -Syu --needed --noconfirm base-devel git
# Add a non-root user to run makepkg
groupadd -g 1000 builder
useradd -mg builder builder
# Make sure they can use sudo without a password
echo 'builder ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
# Create the directory for the builds & make it writeable for the
# build user
mkdir /build
chown -R builder:builder /build
```
This script updates the packages to their latest versions & creates a non-root
user to use when running `makepkg`.
This script is base64-encoded & passed to the container as an environment
variable. The container's entrypoint is set to `/bin/sh -c` & its command
argument to `echo $BUILD_SCRIPT | base64 -d | /bin/sh -e`, with the
`BUILD_SCRIPT` environment variable containing the base64-encoded script.
Once the container exits, a new Docker image is created from it. This image is
then used as the base for any builds.
## Running builds
Each build has its own Docker container, using the builder image as its base.
The same base64-based technique as above is used, just with a different script.
To make the build logs more clear, each command is appended by an echo command
printing the next command to stdout.
Given the Git repository URL is `https://examplerepo.com` with branch `main`,
the URL of the Vieter server is `https://example.com` and `vieter` is the
repository we wish to publish to, we get the following script:
```sh
echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf'
echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
echo -e '+ pacman -Syu --needed --noconfirm'
pacman -Syu --needed --noconfirm
echo -e '+ su builder'
su builder
echo -e '+ git clone --single-branch --depth 1 --branch main https://examplerepo.com repo'
git clone --single-branch --depth 1 --branch main https://examplerepo.com repo
echo -e '+ cd repo'
cd repo
echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm'
makepkg --nobuild --syncdeps --needed --noconfirm
echo -e '+ source PKGBUILD'
source PKGBUILD
echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0'
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
```
This script:
1. Adds the target repository as a repository in the build container
2. Updates mirrors & packages
3. Clones the Git repository
4. Runs `makepkg` without building to calculate `pkgver`
5. Checks whether the package version is already present on the server
6. If not, run `makepkg` & publish any generated package archives to the server

View File

@ -1,23 +0,0 @@
---
weight: 20
---
# Cleanup
Vieter stores the logs of every single package build. While this is great for
debugging why builds fail, it also causes an active or long-running Vieter
instance to accumulate thousands of logs.
To combat this, a log removal daemon can be enabled that periodically removes
old build logs. By starting your server with the `max_log_age` variable (see
[Configuration](/configuration#vieter-server)), a daemon will get enabled that
periodically removes logs older than this setting. By default, this will happen
every day at midnight, but this behavior can be changed using the
`log_removal_schedule` variable.
{{< hint info >}}
**Note**
The daemon will always run a removal of logs on startup. Therefore, it's
possible the daemon will be *very* active when first enabling this setting.
After the initial surge of logs to remove, it'll calm down again.
{{< /hint >}}

View File

@ -1,7 +1,3 @@
---
weight: 10
---
# Cron schedule syntax # Cron schedule syntax
The Vieter cron daemon uses a subset of the cron expression syntax to schedule The Vieter cron daemon uses a subset of the cron expression syntax to schedule

View File

@ -99,7 +99,7 @@ fn (mut m ImageManager) clean_old_images() {
// wasn't deleted. Therefore, we move the index over. If the function // wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't // returns true, the array's length has decreased by one so we don't
// move the index. // move the index.
dd.image_remove(m.images[image][i]) or { dd.remove_image(m.images[image][i]) or {
// The image was removed by an external event // The image was removed by an external event
if err.code() == 404 { if err.code() == 404 {
m.images[image].delete(i) m.images[image].delete(i)

View File

@ -1,36 +1,35 @@
module agent module agent
import log
// log a message with the given level
pub fn (mut d AgentDaemon) log(msg string, level log.Level) {
lock d.logger {
d.logger.send_output(msg, level)
}
}
// lfatal create a log message with the fatal level // lfatal create a log message with the fatal level
pub fn (mut d AgentDaemon) lfatal(msg string) { pub fn (mut d AgentDaemon) lfatal(msg string) {
lock d.logger { d.log(msg, log.Level.fatal)
d.logger.fatal(msg)
}
} }
// lerror create a log message with the error level // lerror create a log message with the error level
pub fn (mut d AgentDaemon) lerror(msg string) { pub fn (mut d AgentDaemon) lerror(msg string) {
lock d.logger { d.log(msg, log.Level.error)
d.logger.error(msg)
}
} }
// lwarn create a log message with the warn level // lwarn create a log message with the warn level
pub fn (mut d AgentDaemon) lwarn(msg string) { pub fn (mut d AgentDaemon) lwarn(msg string) {
lock d.logger { d.log(msg, log.Level.warn)
d.logger.warn(msg)
}
} }
// linfo create a log message with the info level // linfo create a log message with the info level
pub fn (mut d AgentDaemon) linfo(msg string) { pub fn (mut d AgentDaemon) linfo(msg string) {
lock d.logger { d.log(msg, log.Level.info)
d.logger.info(msg)
}
} }
// ldebug create a log message with the debug level // ldebug create a log message with the debug level
pub fn (mut d AgentDaemon) ldebug(msg string) { pub fn (mut d AgentDaemon) ldebug(msg string) {
lock d.logger { d.log(msg, log.Level.debug)
d.logger.debug(msg)
}
} }

View File

@ -57,7 +57,7 @@ pub fn create_build_image(base_image string) !string {
image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' } image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' }
// We pull the provided image // We pull the provided image
dd.image_pull(image_name, image_tag)! dd.pull_image(image_name, image_tag)!
id := dd.container_create(c)!.id id := dd.container_create(c)!.id
// id := docker.create_container(c)! // id := docker.create_container(c)!
@ -79,7 +79,7 @@ pub fn create_build_image(base_image string) !string {
// TODO also add the base image's name into the image name to prevent // TODO also add the base image's name into the image name to prevent
// conflicts. // conflicts.
tag := time.sys_mono_now().str() tag := time.sys_mono_now().str()
image := dd.image_from_container(id, 'vieter-build', tag)! image := dd.create_image_from_container(id, 'vieter-build', tag)!
dd.container_remove(id)! dd.container_remove(id)!
return image.id return image.id

View File

@ -1,7 +1,7 @@
module build module build
import models { BuildConfig, Target } import models { BuildConfig, Target }
import cron import cron.expression { CronExpression, parse_expression }
import time import time
import datatypes { MinHeap } import datatypes { MinHeap }
import util import util
@ -13,7 +13,7 @@ pub mut:
// Next timestamp from which point this job is allowed to be executed // Next timestamp from which point this job is allowed to be executed
timestamp time.Time timestamp time.Time
// Required for calculating next timestamp after having pop'ed a job // Required for calculating next timestamp after having pop'ed a job
ce &cron.Expression = unsafe { nil } ce CronExpression
// Actual build config sent to the agent // Actual build config sent to the agent
config BuildConfig config BuildConfig
// Whether this is a one-time job // Whether this is a one-time job
@ -30,7 +30,7 @@ fn (r1 BuildJob) < (r2 BuildJob) bool {
// for each architecture. Agents receive jobs from this queue. // for each architecture. Agents receive jobs from this queue.
pub struct BuildJobQueue { pub struct BuildJobQueue {
// Schedule to use for targets without explicitely defined cron expression // Schedule to use for targets without explicitely defined cron expression
default_schedule &cron.Expression default_schedule CronExpression
// Base image to use for targets without defined base image // Base image to use for targets without defined base image
default_base_image string default_base_image string
mut: mut:
@ -44,9 +44,9 @@ mut:
} }
// new_job_queue initializes a new job queue // new_job_queue initializes a new job queue
pub fn new_job_queue(default_schedule &cron.Expression, default_base_image string) BuildJobQueue { pub fn new_job_queue(default_schedule CronExpression, default_base_image string) BuildJobQueue {
return BuildJobQueue{ return BuildJobQueue{
default_schedule: unsafe { default_schedule } default_schedule: default_schedule
default_base_image: default_base_image default_base_image: default_base_image
invalidated: map[int]time.Time{} invalidated: map[int]time.Time{}
} }
@ -85,14 +85,14 @@ pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! {
if !input.now { if !input.now {
ce := if input.target.schedule != '' { ce := if input.target.schedule != '' {
cron.parse_expression(input.target.schedule) or { parse_expression(input.target.schedule) or {
return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()") return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()")
} }
} else { } else {
q.default_schedule q.default_schedule
} }
job.timestamp = ce.next_from_now() job.timestamp = ce.next_from_now()!
job.ce = ce job.ce = ce
} else { } else {
job.timestamp = time.now() job.timestamp = time.now()
@ -105,8 +105,8 @@ pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! {
// reschedule the given job by calculating the next timestamp and re-adding it // reschedule the given job by calculating the next timestamp and re-adding it
// to its respective queue. This function is called by the pop functions // to its respective queue. This function is called by the pop functions
// *after* having pop'ed the job. // *after* having pop'ed the job.
fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) { fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) ! {
new_timestamp := job.ce.next_from_now() new_timestamp := job.ce.next_from_now()!
new_job := BuildJob{ new_job := BuildJob{
...job ...job
@ -168,7 +168,10 @@ pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob {
job = q.queues[arch].pop()? job = q.queues[arch].pop()?
if !job.single { if !job.single {
q.reschedule(job, arch) // TODO how do we handle this properly? Is it even possible for a
// cron expression to not return a next time if it's already been
// used before?
q.reschedule(job, arch) or {}
} }
return job return job
@ -195,7 +198,8 @@ pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob {
job = q.queues[arch].pop() or { break } job = q.queues[arch].pop() or { break }
if !job.single { if !job.single {
q.reschedule(job, arch) // TODO idem
q.reschedule(job, arch) or {}
} }
out << job out << job

View File

@ -1,27 +1,28 @@
module client module client
import models { BuildLog, BuildLogFilter } import models { BuildLog, BuildLogFilter }
import net.http { Method }
import web.response { Response } import web.response { Response }
import time import time
// get_build_logs returns all build logs. // get_build_logs returns all build logs.
pub fn (c &Client) get_build_logs(filter BuildLogFilter) ![]BuildLog { pub fn (c &Client) get_build_logs(filter BuildLogFilter) ![]BuildLog {
params := models.params_from(filter) params := models.params_from(filter)
data := c.send_request<[]BuildLog>(.get, '/api/v1/logs', params)! data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)!
return data.data return data.data
} }
// get_build_log returns a specific build log. // get_build_log returns a specific build log.
pub fn (c &Client) get_build_log(id int) !BuildLog { pub fn (c &Client) get_build_log(id int) !BuildLog {
data := c.send_request<BuildLog>(.get, '/api/v1/logs/$id', {})! data := c.send_request<BuildLog>(Method.get, '/api/v1/logs/$id', {})!
return data.data return data.data
} }
// get_build_log_content returns the contents of the build log file. // get_build_log_content returns the contents of the build log file.
pub fn (c &Client) get_build_log_content(id int) !string { pub fn (c &Client) get_build_log_content(id int) !string {
data := c.send_request_raw_response(.get, '/api/v1/logs/$id/content', {}, '')! data := c.send_request_raw_response(Method.get, '/api/v1/logs/$id/content', {}, '')!
return data return data
} }
@ -36,12 +37,7 @@ pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time t
'exitCode': exit_code.str() 'exitCode': exit_code.str()
} }
data := c.send_request_with_body<int>(.post, '/api/v1/logs', params, content)! data := c.send_request_with_body<int>(Method.post, '/api/v1/logs', params, content)!
return data return data
} }
// remove_build_log removes the build log with the given id from the server.
pub fn (c &Client) remove_build_log(id int) ! {
c.send_request<string>(.delete, '/api/v1/logs/$id', {})!
}

View File

@ -1,16 +0,0 @@
module client
// remove_repo removes an entire repository.
pub fn (c &Client) remove_repo(repo string) ! {
c.send_request<string>(.delete, '/$repo', {})!
}
// remove_arch_repo removes an entire arch-repo.
pub fn (c &Client) remove_arch_repo(repo string, arch string) ! {
c.send_request<string>(.delete, '/$repo/$arch', {})!
}
// remove_package removes a single package from the given arch-repo.
pub fn (c &Client) remove_package(repo string, arch string, pkgname string) ! {
c.send_request<string>(.delete, '/$repo/$arch/$pkgname', {})!
}

View File

@ -1,11 +1,12 @@
module client module client
import models { Target, TargetFilter } import models { Target, TargetFilter }
import net.http { Method }
// get_targets returns a list of targets, given a filter object. // get_targets returns a list of targets, given a filter object.
pub fn (c &Client) get_targets(filter TargetFilter) ![]Target { pub fn (c &Client) get_targets(filter TargetFilter) ![]Target {
params := models.params_from(filter) params := models.params_from(filter)
data := c.send_request<[]Target>(.get, '/api/v1/targets', params)! data := c.send_request<[]Target>(Method.get, '/api/v1/targets', params)!
return data.data return data.data
} }
@ -33,7 +34,7 @@ pub fn (c &Client) get_all_targets() ![]Target {
// get_target returns the target for a specific id. // get_target returns the target for a specific id.
pub fn (c &Client) get_target(id int) !Target { pub fn (c &Client) get_target(id int) !Target {
data := c.send_request<Target>(.get, '/api/v1/targets/$id', {})! data := c.send_request<Target>(Method.get, '/api/v1/targets/$id', {})!
return data.data return data.data
} }
@ -50,14 +51,14 @@ pub struct NewTarget {
// add_target adds a new target to the server. // add_target adds a new target to the server.
pub fn (c &Client) add_target(t NewTarget) !int { pub fn (c &Client) add_target(t NewTarget) !int {
params := models.params_from<NewTarget>(t) params := models.params_from<NewTarget>(t)
data := c.send_request<int>(.post, '/api/v1/targets', params)! data := c.send_request<int>(Method.post, '/api/v1/targets', params)!
return data.data return data.data
} }
// remove_target removes the target with the given id from the server. // remove_target removes the target with the given id from the server.
pub fn (c &Client) remove_target(id int) !string { pub fn (c &Client) remove_target(id int) !string {
data := c.send_request<string>(.delete, '/api/v1/targets/$id', {})! data := c.send_request<string>(Method.delete, '/api/v1/targets/$id', {})!
return data.data return data.data
} }
@ -65,7 +66,7 @@ pub fn (c &Client) remove_target(id int) !string {
// patch_target sends a PATCH request to the given target with the params as // patch_target sends a PATCH request to the given target with the params as
// payload. // payload.
pub fn (c &Client) patch_target(id int, params map[string]string) !string { pub fn (c &Client) patch_target(id int, params map[string]string) !string {
data := c.send_request<string>(.patch, '/api/v1/targets/$id', params)! data := c.send_request<string>(Method.patch, '/api/v1/targets/$id', params)!
return data.data return data.data
} }

View File

@ -24,13 +24,11 @@ pub fn cmd() cli.Command {
flags: [ flags: [
cli.Flag{ cli.Flag{
name: 'limit' name: 'limit'
abbrev: 'l'
description: 'How many results to return.' description: 'How many results to return.'
flag: cli.FlagType.int flag: cli.FlagType.int
}, },
cli.Flag{ cli.Flag{
name: 'offset' name: 'offset'
abbrev: 'o'
description: 'Minimum index to return.' description: 'Minimum index to return.'
flag: cli.FlagType.int flag: cli.FlagType.int
}, },
@ -41,18 +39,16 @@ pub fn cmd() cli.Command {
}, },
cli.Flag{ cli.Flag{
name: 'today' name: 'today'
abbrev: 't' description: 'Only list logs started today.'
description: 'Only list logs started today. This flag overwrites any other date-related flag.'
flag: cli.FlagType.bool flag: cli.FlagType.bool
}, },
cli.Flag{ cli.Flag{
name: 'failed' name: 'failed'
description: 'Only list logs with non-zero exit codes. This flag overwrites the --code flag.' description: 'Only list logs with non-zero exit codes.'
flag: cli.FlagType.bool flag: cli.FlagType.bool
}, },
cli.Flag{ cli.Flag{
name: 'day' name: 'day'
abbrev: 'd'
description: 'Only list logs started on this day. (format: YYYY-MM-DD)' description: 'Only list logs started on this day. (format: YYYY-MM-DD)'
flag: cli.FlagType.string flag: cli.FlagType.string
}, },
@ -66,11 +62,6 @@ pub fn cmd() cli.Command {
description: 'Only list logs started after this timestamp. (format: YYYY-MM-DD HH:mm:ss)' description: 'Only list logs started after this timestamp. (format: YYYY-MM-DD HH:mm:ss)'
flag: cli.FlagType.string flag: cli.FlagType.string
}, },
cli.Flag{
name: 'code'
description: 'Only return logs with the given exit code. Prepend with `!` to exclude instead of include. Can be specified multiple times.'
flag: cli.FlagType.string_array
},
] ]
execute: fn (cmd cli.Command) ! { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')! config_file := cmd.flags.get_string('config-file')!
@ -140,8 +131,6 @@ pub fn cmd() cli.Command {
filter.exit_codes = [ filter.exit_codes = [
'!0', '!0',
] ]
} else {
filter.exit_codes = cmd.flags.get_strings('code')!
} }
raw := cmd.flags.get_bool('raw')! raw := cmd.flags.get_bool('raw')!
@ -149,18 +138,6 @@ pub fn cmd() cli.Command {
list(conf, filter, raw)! list(conf, filter, raw)!
} }
}, },
cli.Command{
name: 'remove'
required_args: 1
usage: 'id'
description: 'Remove a build log that matches the given id.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
remove(conf, cmd.args[0])!
}
},
cli.Command{ cli.Command{
name: 'info' name: 'info'
required_args: 1 required_args: 1
@ -227,9 +204,3 @@ fn content(conf Config, id int) ! {
println(content) println(content)
} }
// remove removes a build log from the server's list.
fn remove(conf Config, id string) ! {
c := client.new(conf.address, conf.api_key)
c.remove_build_log(id.int())!
}

View File

@ -1,52 +0,0 @@
module repos
import cli
import conf as vconf
import client
struct Config {
address string [required]
api_key string [required]
}
// cmd returns the cli module that handles modifying the repository contents.
pub fn cmd() cli.Command {
return cli.Command{
name: 'repos'
description: 'Interact with the repositories & packages stored on the server.'
commands: [
cli.Command{
name: 'remove'
required_args: 1
usage: 'repo [arch [pkgname]]'
description: 'Remove a repo, arch-repo, or package from the server.'
flags: [
cli.Flag{
name: 'force'
flag: cli.FlagType.bool
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
if cmd.args.len < 3 {
if !cmd.flags.get_bool('force')! {
return error('Removing an arch-repo or repository is a very destructive command. If you really do wish to perform this operation, explicitely add the --force flag.')
}
}
client := client.new(conf.address, conf.api_key)
if cmd.args.len == 1 {
client.remove_repo(cmd.args[0])!
} else if cmd.args.len == 2 {
client.remove_arch_repo(cmd.args[0], cmd.args[1])!
} else {
client.remove_package(cmd.args[0], cmd.args[1], cmd.args[2])!
}
}
},
]
}
}

View File

@ -1,7 +1,7 @@
module schedule module schedule
import cli import cli
import cron import cron.expression { parse_expression }
import time import time
// cmd returns the cli submodule for previewing a cron schedule. // cmd returns the cli submodule for previewing a cron schedule.
@ -19,10 +19,10 @@ pub fn cmd() cli.Command {
}, },
] ]
execute: fn (cmd cli.Command) ! { execute: fn (cmd cli.Command) ! {
ce := cron.parse_expression(cmd.args.join(' '))! ce := parse_expression(cmd.args.join(' '))!
count := cmd.flags.get_int('count')! count := cmd.flags.get_int('count')!
for t in ce.next_n(time.now(), count) { for t in ce.next_n(time.now(), count)! {
println(t) println(t)
} }
} }

View File

@ -26,7 +26,7 @@ fn build(conf Config, target_id int, force bool) ! {
dd.close() or {} dd.close() or {}
} }
dd.image_remove(image_id)! dd.remove_image(image_id)!
println('Uploading logs to Vieter...') println('Uploading logs to Vieter...')
c.add_build_log(target.id, res.start_time, res.end_time, build_arch, res.exit_code, c.add_build_log(target.id, res.start_time, res.end_time, build_arch, res.exit_code,

View File

@ -2,7 +2,7 @@ module targets
import cli import cli
import conf as vconf import conf as vconf
import cron import cron.expression { parse_expression }
import client { NewTarget } import client { NewTarget }
import console import console
import models { TargetFilter } import models { TargetFilter }
@ -25,13 +25,11 @@ pub fn cmd() cli.Command {
flags: [ flags: [
cli.Flag{ cli.Flag{
name: 'limit' name: 'limit'
abbrev: 'l'
description: 'How many results to return.' description: 'How many results to return.'
flag: cli.FlagType.int flag: cli.FlagType.int
}, },
cli.Flag{ cli.Flag{
name: 'offset' name: 'offset'
abbrev: 'o'
description: 'Minimum index to return.' description: 'Minimum index to return.'
flag: cli.FlagType.int flag: cli.FlagType.int
}, },
@ -40,17 +38,6 @@ pub fn cmd() cli.Command {
description: 'Only return targets that publish to this repo.' description: 'Only return targets that publish to this repo.'
flag: cli.FlagType.string flag: cli.FlagType.string
}, },
cli.Flag{
name: 'query'
abbrev: 'q'
description: 'Search string to filter targets by.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'arch'
description: 'Only list targets that build for this arch.'
flag: cli.FlagType.string
},
] ]
execute: fn (cmd cli.Command) ! { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')! config_file := cmd.flags.get_string('config-file')!
@ -73,16 +60,6 @@ pub fn cmd() cli.Command {
filter.repo = repo filter.repo = repo
} }
query := cmd.flags.get_string('query')!
if query != '' {
filter.query = query
}
arch := cmd.flags.get_string('arch')!
if arch != '' {
filter.arch = arch
}
raw := cmd.flags.get_bool('raw')! raw := cmd.flags.get_bool('raw')!
list(conf, filter, raw)! list(conf, filter, raw)!
@ -295,7 +272,7 @@ fn patch(conf Config, id string, params map[string]string) ! {
// We check the cron expression first because it's useless to send an // We check the cron expression first because it's useless to send an
// invalid one to the server. // invalid one to the server.
if 'schedule' in params && params['schedule'] != '' { if 'schedule' in params && params['schedule'] != '' {
cron.parse_expression(params['schedule']) or { parse_expression(params['schedule']) or {
return error('Invalid cron expression: $err.msg()') return error('Invalid cron expression: $err.msg()')
} }
} }

32
src/cron/cli.v 100644
View File

@ -0,0 +1,32 @@
module cron
import cli
import conf as vconf
struct Config {
pub:
log_level string = 'WARN'
api_key string
address string
data_dir string
base_image string = 'archlinux:base-devel'
max_concurrent_builds int = 1
api_update_frequency int = 15
image_rebuild_frequency int = 1440
// Replicates the behavior of the original cron system
global_schedule string = '0 3'
}
// cmd returns the cli module that handles the cron daemon.
pub fn cmd() cli.Command {
return cli.Command{
name: 'cron'
description: 'Start the cron service that periodically runs builds.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
cron(conf)!
}
}
}

33
src/cron/cron.v 100644
View File

@ -0,0 +1,33 @@
module cron
import log
import cron.daemon
import cron.expression
import os
const log_file_name = 'vieter.cron.log'
// cron starts a cron daemon & starts periodically scheduling builds.
pub fn cron(conf Config) ! {
// Configure logger
log_level := log.level_from_tag(conf.log_level) or {
return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
}
mut logger := log.Log{
level: log_level
}
log_file := os.join_path_single(conf.data_dir, cron.log_file_name)
logger.set_full_logpath(log_file)
logger.log_to_console_too()
ce := expression.parse_expression(conf.global_schedule) or {
return error('Error while parsing global cron expression: $err.msg()')
}
mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)!
d.run()
}

View File

@ -0,0 +1,115 @@
module daemon
import time
import sync.stdatomic
import build
import os
const (
build_empty = 0
build_running = 1
build_done = 2
)
// clean_finished_builds removes finished builds from the build slots & returns
// them.
fn (mut d Daemon) clean_finished_builds() []ScheduledBuild {
mut out := []ScheduledBuild{}
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
out << d.builds[i]
}
}
return out
}
// update_builds starts as many builds as possible.
fn (mut d Daemon) start_new_builds() {
now := time.now()
for d.queue.len() > 0 {
elem := d.queue.peek() or {
d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
break
}
if elem.timestamp < now {
sb := d.queue.pop() or {
d.lerror("queue.pop() unexpectedly returned an error. This shouldn't happen.")
break
}
// If this build couldn't be scheduled, no more will be possible.
if !d.start_build(sb) {
d.queue.insert(sb)
break
}
} else {
break
}
}
}
// start_build starts a build for the given ScheduledBuild object.
fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty {
stdatomic.store_u64(&d.atomics[i], daemon.build_running)
d.builds[i] = sb
go d.run_build(i, sb)
return true
}
}
return false
}
// run_build actually starts the build process for a given target.
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
d.linfo('started build: $sb.target.url -> $sb.target.repo')
// 0 means success, 1 means failure
mut status := 0
res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.target, false) or {
d.ldebug('build_target error: $err.msg()')
status = 1
build.BuildResult{}
}
if status == 0 {
d.linfo('finished build: $sb.target.url -> $sb.target.repo; uploading logs...')
build_arch := os.uname().machine
d.client.add_build_log(sb.target.id, res.start_time, res.end_time, build_arch,
res.exit_code, res.logs) or {
d.lerror('Failed to upload logs for build: $sb.target.url -> $sb.target.repo')
}
} else {
d.linfo('an error occured during build: $sb.target.url -> $sb.target.repo')
}
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
}
// current_build_count returns how many builds are currently running.
fn (mut d Daemon) current_build_count() int {
mut res := 0
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_running {
res += 1
}
}
return res
}

View File

@ -0,0 +1,274 @@
module daemon
import time
import log
import datatypes { MinHeap }
import cron.expression { CronExpression, parse_expression }
import math
import build
import docker
import os
import client
import models { Target }
const (
// How many seconds to wait before retrying to update API if failed
api_update_retry_timeout = 5
// How many seconds to wait before retrying to rebuild image if failed
rebuild_base_image_retry_timout = 30
)
struct ScheduledBuild {
pub:
target Target
timestamp time.Time
}
// Overloaded operator for comparing ScheduledBuild objects
fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
return r1.timestamp < r2.timestamp
}
pub struct Daemon {
mut:
client client.Client
base_image string
builder_images []string
global_schedule CronExpression
api_update_frequency int
image_rebuild_frequency int
// Targets currently loaded from API.
targets []Target
// At what point to update the list of targets.
api_update_timestamp time.Time
image_build_timestamp time.Time
queue MinHeap<ScheduledBuild>
// Which builds are currently running
builds []ScheduledBuild
// Atomic variables used to detect when a build has finished; length is the
// same as builds
atomics []u64
logger shared log.Log
}
// init_daemon initializes a new Daemon object. It renews the targets &
// populates the build queue for the first time.
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) !Daemon {
mut d := Daemon{
client: client.new(address, api_key)
base_image: base_image
global_schedule: global_schedule
api_update_frequency: api_update_frequency
image_rebuild_frequency: image_rebuild_frequency
atomics: []u64{len: max_concurrent_builds}
builds: []ScheduledBuild{len: max_concurrent_builds}
logger: logger
}
// Initialize the targets & queue
d.renew_targets()
d.renew_queue()
if !d.rebuild_base_image() {
return error('The base image failed to build. The Vieter cron daemon cannot run without an initial builder image.')
}
return d
}
// run starts the actual daemon process. It runs builds when possible &
// periodically refreshes the list of targets to ensure we stay in sync.
pub fn (mut d Daemon) run() {
for {
finished_builds := d.clean_finished_builds()
// Update the API's contents if needed & renew the queue
if time.now() >= d.api_update_timestamp {
d.renew_targets()
d.renew_queue()
}
// The finished builds should only be rescheduled if the API contents
// haven't been renewed.
else {
for sb in finished_builds {
d.schedule_build(sb.target)
}
}
// TODO remove old builder images.
// This issue is less trivial than it sounds, because a build could
// still be running when the image has to be rebuilt. That would
// prevent the image from being removed. Therefore, we will need to
// keep track of a list or something & remove an image once we have
// made sure it isn't being used anymore.
if time.now() >= d.image_build_timestamp {
d.rebuild_base_image()
// In theory, executing this function here allows an old builder
// image to exist for at most image_rebuild_frequency minutes.
d.clean_old_base_images()
}
// Schedules new builds when possible
d.start_new_builds()
// If there are builds currently running, the daemon should refresh
// every second to clean up any finished builds & start new ones.
mut delay := time.Duration(1 * time.second)
// Sleep either until we have to refresh the targets or when the next
// build has to start, with a minimum of 1 second.
if d.current_build_count() == 0 {
now := time.now()
delay = d.api_update_timestamp - now
if d.queue.len() > 0 {
elem := d.queue.peek() or {
d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
// This is just a fallback option. In theory, queue.peek()
// should *never* return an error or none, because we check
// its len beforehand.
time.sleep(1)
continue
}
time_until_next_job := elem.timestamp - now
delay = math.min(delay, time_until_next_job)
}
}
// We sleep for at least one second. This is to prevent the program
// from looping agressively when a cronjob can be scheduled, but
// there's no spots free for it to be started.
delay = math.max(delay, 1 * time.second)
d.ldebug('Sleeping for ${delay}...')
time.sleep(delay)
}
}
// schedule_build adds the next occurence of the given targets build to the
// queue.
fn (mut d Daemon) schedule_build(target Target) {
ce := if target.schedule != '' {
parse_expression(target.schedule) or {
// TODO This shouldn't return an error if the expression is empty.
d.lerror("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()")
d.global_schedule
}
} else {
d.global_schedule
}
// A target that can't be scheduled will just be skipped for now
timestamp := ce.next_from_now() or {
d.lerror("Couldn't calculate next timestamp from '$target.schedule'; skipping")
return
}
d.queue.insert(ScheduledBuild{
target: target
timestamp: timestamp
})
}
// renew_targets requests the newest list of targets from the server & replaces
// the old one.
fn (mut d Daemon) renew_targets() {
d.linfo('Renewing targets...')
mut new_targets := d.client.get_all_targets() or {
d.lerror('Failed to renew targets. Retrying in ${daemon.api_update_retry_timeout}s...')
d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout)
return
}
// Filter out any targets that shouldn't run on this architecture
cur_arch := os.uname().machine
new_targets = new_targets.filter(it.arch.any(it.value == cur_arch))
d.targets = new_targets
d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency)
}
// renew_queue replaces the old queue with a new one that reflects the newest
// values in targets.
fn (mut d Daemon) renew_queue() {
d.linfo('Renewing queue...')
mut new_queue := MinHeap<ScheduledBuild>{}
// Move any jobs that should have already started from the old queue onto
// the new one
now := time.now()
// For some reason, using
// ```v
// for d.queue.len() > 0 && d.queue.peek() !.timestamp < now {
//```
// here causes the function to prematurely just exit, without any errors or anything, very weird
// https://github.com/vlang/v/issues/14042
for d.queue.len() > 0 {
elem := d.queue.pop() or {
d.lerror("queue.pop() returned an error. This shouldn't happen.")
continue
}
if elem.timestamp < now {
new_queue.insert(elem)
} else {
break
}
}
d.queue = new_queue
// For each target in targets, parse their cron expression (or use the
// default one if not present) & add them to the queue
for target in d.targets {
d.schedule_build(target)
}
}
// rebuild_base_image recreates the builder image.
fn (mut d Daemon) rebuild_base_image() bool {
d.linfo('Rebuilding builder image....')
d.builder_images << build.create_build_image(d.base_image) or {
d.lerror('Failed to rebuild base image. Retrying in ${daemon.rebuild_base_image_retry_timout}s...')
d.image_build_timestamp = time.now().add_seconds(daemon.rebuild_base_image_retry_timout)
return false
}
d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency)
return true
}
// clean_old_base_images tries to remove any old but still present builder
// images.
fn (mut d Daemon) clean_old_base_images() {
mut i := 0
mut dd := docker.new_conn() or {
d.lerror('Failed to connect to Docker socket.')
return
}
defer {
dd.close() or {}
}
for i < d.builder_images.len - 1 {
// For each builder image, we try to remove it by calling the Docker
// API. If the function returns an error or false, that means the image
// wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't
// move the index.
dd.remove_image(d.builder_images[i]) or { i += 1 }
}
}

View File

@ -0,0 +1,35 @@
module daemon
import log
// log reate a log message with the given level
pub fn (mut d Daemon) log(msg string, level log.Level) {
lock d.logger {
d.logger.send_output(msg, level)
}
}
// lfatal create a log message with the fatal level
pub fn (mut d Daemon) lfatal(msg string) {
d.log(msg, log.Level.fatal)
}
// lerror create a log message with the error level
pub fn (mut d Daemon) lerror(msg string) {
d.log(msg, log.Level.error)
}
// lwarn create a log message with the warn level
pub fn (mut d Daemon) lwarn(msg string) {
d.log(msg, log.Level.warn)
}
// linfo create a log message with the info level
pub fn (mut d Daemon) linfo(msg string) {
d.log(msg, log.Level.info)
}
// ldebug create a log message with the debug level
pub fn (mut d Daemon) ldebug(msg string) {
d.log(msg, log.Level.debug)
}

View File

@ -1,101 +0,0 @@
module cron
#flag -I @VMODROOT/libvieter/include
#flag -L @VMODROOT/libvieter/build
#flag -lvieter
#include "vieter_cron.h"
[typedef]
pub struct C.vieter_cron_expression {
minutes &u8
hours &u8
days &u8
months &u8
minute_count u8
hour_count u8
day_count u8
month_count u8
}
pub type Expression = C.vieter_cron_expression
// == returns whether the two expressions are equal by value.
fn (ce1 Expression) == (ce2 Expression) bool {
if ce1.month_count != ce2.month_count || ce1.day_count != ce2.day_count
|| ce1.hour_count != ce2.hour_count || ce1.minute_count != ce2.minute_count {
return false
}
for i in 0 .. ce1.month_count {
unsafe {
if ce1.months[i] != ce2.months[i] {
return false
}
}
}
for i in 0 .. ce1.day_count {
unsafe {
if ce1.days[i] != ce2.days[i] {
return false
}
}
}
for i in 0 .. ce1.hour_count {
unsafe {
if ce1.hours[i] != ce2.hours[i] {
return false
}
}
}
for i in 0 .. ce1.minute_count {
unsafe {
if ce1.minutes[i] != ce2.minutes[i] {
return false
}
}
}
return true
}
[typedef]
struct C.vieter_cron_simple_time {
year int
month int
day int
hour int
minute int
}
type SimpleTime = C.vieter_cron_simple_time
enum ParseError as u8 {
ok = 0
invalid_expression = 1
invalid_number = 2
out_of_range = 3
too_many_parts = 4
not_enough_parts = 5
}
// str returns the string representation of a ParseError.
fn (e ParseError) str() string {
return match e {
.ok { '' }
.invalid_expression { 'Invalid expression' }
.invalid_number { 'Invalid number' }
.out_of_range { 'Out of range' }
.too_many_parts { 'Too many parts' }
.not_enough_parts { 'Not enough parts' }
}
}
fn C.vieter_cron_expr_init() &C.vieter_cron_expression
fn C.vieter_cron_expr_free(ce &C.vieter_cron_expression)
fn C.vieter_cron_expr_next(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression, ref &C.vieter_cron_simple_time)
fn C.vieter_cron_expr_next_from_now(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression)
fn C.vieter_cron_expr_parse(out &C.vieter_cron_expression, s &char) ParseError

View File

@ -1,73 +0,0 @@
module cron
import time
// free the memory associated with the Expression.
[unsafe]
pub fn (ce &Expression) free() {
C.vieter_cron_expr_free(ce)
}
// parse_expression parses a string into an Expression.
pub fn parse_expression(exp string) !&Expression {
out := C.vieter_cron_expr_init()
res := C.vieter_cron_expr_parse(out, exp.str)
if res != .ok {
return error(res.str())
}
return out
}
// next calculates the next occurence of the cron schedule, given a reference
// point.
pub fn (ce &Expression) next(ref time.Time) time.Time {
st := SimpleTime{
year: ref.year
month: ref.month
day: ref.day
hour: ref.hour
minute: ref.minute
}
out := SimpleTime{}
C.vieter_cron_expr_next(&out, ce, &st)
return time.new_time(time.Time{
year: out.year
month: out.month
day: out.day
hour: out.hour
minute: out.minute
})
}
// next_from_now calculates the next occurence of the cron schedule with the
// current time as reference.
pub fn (ce &Expression) next_from_now() time.Time {
out := SimpleTime{}
C.vieter_cron_expr_next_from_now(&out, ce)
return time.new_time(time.Time{
year: out.year
month: out.month
day: out.day
hour: out.hour
minute: out.minute
})
}
// next_n returns the n next occurences of the expression, given a starting
// time.
pub fn (ce &Expression) next_n(ref time.Time, n int) []time.Time {
mut times := []time.Time{cap: n}
times << ce.next(ref)
for i in 1 .. n {
times << ce.next(times[i - 1])
}
return times
}

View File

@ -0,0 +1,136 @@
module expression
import time
pub struct CronExpression {
minutes []int
hours []int
days []int
months []int
}
// next calculates the earliest time this cron expression is valid. It will
// always pick a moment in the future, even if ref matches completely up to the
// minute. This function conciously does not take gap years into account.
pub fn (ce &CronExpression) next(ref time.Time) !time.Time {
// If the given ref matches the next cron occurence up to the minute, it
// will return that value. Because we always want to return a value in the
// future, we artifically shift the ref 60 seconds to make sure we always
// match in the future. A shift of 60 seconds is enough because the cron
// expression does not allow for accuracy smaller than one minute.
sref := ref
// For all of these values, the rule is the following: if their value is
// the length of their respective array in the CronExpression object, that
// means we've looped back around. This means that the "bigger" value has
// to be incremented by one. For example, if the minutes have looped
// around, that means that the hour has to be incremented as well.
mut minute_index := 0
mut hour_index := 0
mut day_index := 0
mut month_index := 0
// This chain is the same logic multiple times, namely that if a "bigger"
// value loops around, then the smaller value will always reset as well.
// For example, if we're going to a new day, the hour & minute will always
// be their smallest value again.
for month_index < ce.months.len && sref.month > ce.months[month_index] {
month_index++
}
if month_index < ce.months.len && sref.month == ce.months[month_index] {
for day_index < ce.days.len && sref.day > ce.days[day_index] {
day_index++
}
if day_index < ce.days.len && ce.days[day_index] == sref.day {
for hour_index < ce.hours.len && sref.hour > ce.hours[hour_index] {
hour_index++
}
if hour_index < ce.hours.len && ce.hours[hour_index] == sref.hour {
// Minute is the only value where we explicitely make sure we
// can't match sref's value exactly. This is to ensure we only
// return values in the future.
for minute_index < ce.minutes.len && sref.minute >= ce.minutes[minute_index] {
minute_index++
}
}
}
}
// Here, we increment the "bigger" values by one if the smaller ones loop
// around. The order is important, as it allows a sort-of waterfall effect
// to occur which updates all values if required.
if minute_index == ce.minutes.len && hour_index < ce.hours.len {
hour_index += 1
}
if hour_index == ce.hours.len && day_index < ce.days.len {
day_index += 1
}
if day_index == ce.days.len && month_index < ce.months.len {
month_index += 1
}
mut minute := ce.minutes[minute_index % ce.minutes.len]
mut hour := ce.hours[hour_index % ce.hours.len]
mut day := ce.days[day_index % ce.days.len]
// Sometimes, we end up with a day that does not exist within the selected
// month, e.g. day 30 in February. When this occurs, we reset day back to
// the smallest value & loop over to the next month that does have this
// day.
if day > time.month_days[ce.months[month_index % ce.months.len] - 1] {
day = ce.days[0]
month_index += 1
for day > time.month_days[ce.months[month_index & ce.months.len] - 1] {
month_index += 1
// If for whatever reason the day value ends up being something
// that can't be scheduled in any month, we have to make sure we
// don't create an infinite loop.
if month_index == 2 * ce.months.len {
return error('No schedulable moment.')
}
}
}
month := ce.months[month_index % ce.months.len]
mut year := sref.year
// If the month loops over, we need to increment the year.
if month_index >= ce.months.len {
year++
}
return time.new_time(time.Time{
year: year
month: month
day: day
minute: minute
hour: hour
})
}
// next_from_now returns the result of ce.next(ref) where ref is the result of
// time.now().
pub fn (ce &CronExpression) next_from_now() !time.Time {
return ce.next(time.now())
}
// next_n returns the n next occurences of the expression, given a starting
// time.
pub fn (ce &CronExpression) next_n(ref time.Time, n int) ![]time.Time {
mut times := []time.Time{cap: n}
times << ce.next(ref)!
for i in 1 .. n {
times << ce.next(times[i - 1])!
}
return times
}

View File

@ -0,0 +1,146 @@
module expression
import bitfield
// parse_range parses a given string into a range of sorted integers. Its
// result is a BitField with set bits for all numbers in the result.
fn parse_range(s string, min int, max int) !bitfield.BitField {
mut start := min
mut end := max
mut interval := 1
mut bf := bitfield.new(max - min + 1)
exps := s.split('/')
if exps.len > 2 {
return error('Invalid expression.')
}
if exps[0] != '*' {
dash_parts := exps[0].split('-')
if dash_parts.len > 2 {
return error('Invalid expression.')
}
start = dash_parts[0].int()
// The builtin parsing functions return zero if the string can't be
// parsed into a number, so we have to explicitely check whether they
// actually entered zero or if it's an invalid number.
if start == 0 && dash_parts[0] != '0' {
return error('Invalid number.')
}
// Check whether the start value is out of range
if start < min || start > max {
return error('Out of range.')
}
if dash_parts.len == 2 {
end = dash_parts[1].int()
if end == 0 && dash_parts[1] != '0' {
return error('Invalid number.')
}
if end < start || end > max {
return error('Out of range.')
}
}
}
if exps.len > 1 {
interval = exps[1].int()
// interval being zero is always invalid, but we want to check why
// it's invalid for better error messages.
if interval == 0 {
if exps[1] != '0' {
return error('Invalid number.')
} else {
return error('Step size zero not allowed.')
}
}
if interval > max - min {
return error('Step size too large.')
}
}
// Here, s solely consists of a number, so that's the only value we
// should return.
else if exps[0] != '*' && !exps[0].contains('-') {
bf.set_bit(start - min)
return bf
}
for start <= end {
bf.set_bit(start - min)
start += interval
}
return bf
}
// bf_to_ints takes a BitField and converts it into the expected list of actual
// integers.
fn bf_to_ints(bf bitfield.BitField, min int) []int {
mut out := []int{}
for i in 0 .. bf.get_size() {
if bf.get_bit(i) == 1 {
out << min + i
}
}
return out
}
// parse_part parses a given part of a cron expression & returns the
// corresponding array of ints.
fn parse_part(s string, min int, max int) ![]int {
mut bf := bitfield.new(max - min + 1)
for range in s.split(',') {
bf2 := parse_range(range, min, max)!
bf = bitfield.bf_or(bf, bf2)
}
return bf_to_ints(bf, min)
}
// parse_expression parses an entire cron expression string into a
// CronExpression object, if possible.
pub fn parse_expression(exp string) !CronExpression {
// The filter allows for multiple spaces between parts
mut parts := exp.split(' ').filter(it != '')
if parts.len < 2 || parts.len > 4 {
return error('Expression must contain between 2 and 4 space-separated parts.')
}
// For ease of use, we allow the user to only specify as many parts as they
// need.
for parts.len < 4 {
parts << '*'
}
mut part_results := [][]int{}
mins := [0, 0, 1, 1]
maxs := [59, 23, 31, 12]
// This for loop allows us to more clearly propagate the error to the user.
for i, min in mins {
part_results << parse_part(parts[i], min, maxs[i]) or {
return error('An error occurred with part $i: $err.msg()')
}
}
return CronExpression{
minutes: part_results[0]
hours: part_results[1]
days: part_results[2]
months: part_results[3]
}
}

View File

@ -0,0 +1,89 @@
module expression
// parse_range_error returns the returned error message. If the result is '',
// that means the function didn't error.
fn parse_range_error(s string, min int, max int) string {
parse_range(s, min, max) or { return err.msg }
return ''
}
// =====parse_range=====
fn test_range_star_range() ! {
bf := parse_range('*', 0, 5)!
assert bf_to_ints(bf, 0) == [0, 1, 2, 3, 4, 5]
}
fn test_range_number() ! {
bf := parse_range('4', 0, 5)!
assert bf_to_ints(bf, 0) == [4]
}
fn test_range_number_too_large() ! {
assert parse_range_error('10', 0, 6) == 'Out of range.'
}
fn test_range_number_too_small() ! {
assert parse_range_error('0', 2, 6) == 'Out of range.'
}
fn test_range_number_invalid() ! {
assert parse_range_error('x', 0, 6) == 'Invalid number.'
}
fn test_range_step_star_1() ! {
bf := parse_range('*/4', 0, 20)!
assert bf_to_ints(bf, 0) == [0, 4, 8, 12, 16, 20]
}
fn test_range_step_star_2() ! {
bf := parse_range('*/3', 1, 8)!
assert bf_to_ints(bf, 1) == [1, 4, 7]
}
fn test_range_step_star_too_large() ! {
assert parse_range_error('*/21', 0, 20) == 'Step size too large.'
}
fn test_range_step_zero() ! {
assert parse_range_error('*/0', 0, 20) == 'Step size zero not allowed.'
}
fn test_range_step_number() ! {
bf := parse_range('5/4', 2, 22)!
assert bf_to_ints(bf, 2) == [5, 9, 13, 17, 21]
}
fn test_range_step_number_too_large() ! {
assert parse_range_error('10/4', 0, 5) == 'Out of range.'
}
fn test_range_step_number_too_small() ! {
assert parse_range_error('2/4', 5, 10) == 'Out of range.'
}
fn test_range_dash() ! {
bf := parse_range('4-8', 0, 9)!
assert bf_to_ints(bf, 0) == [4, 5, 6, 7, 8]
}
fn test_range_dash_step() ! {
bf := parse_range('4-8/2', 0, 9)!
assert bf_to_ints(bf, 0) == [4, 6, 8]
}
// =====parse_part=====
fn test_part_single() ! {
assert parse_part('*', 0, 5)! == [0, 1, 2, 3, 4, 5]
}
fn test_part_multiple() ! {
assert parse_part('*/2,2/3', 1, 8)! == [1, 2, 3, 5, 7, 8]
}

View File

@ -1,4 +1,4 @@
module cron module expression
import time { parse } import time { parse }
@ -7,7 +7,7 @@ fn util_test_time(exp string, t1_str string, t2_str string) ! {
t1 := parse(t1_str)! t1 := parse(t1_str)!
t2 := parse(t2_str)! t2 := parse(t2_str)!
t3 := ce.next(t1) t3 := ce.next(t1)!
assert t2.year == t3.year assert t2.year == t3.year
assert t2.month == t3.month assert t2.month == t3.month
@ -18,18 +18,17 @@ fn util_test_time(exp string, t1_str string, t2_str string) ! {
fn test_next_simple() ! { fn test_next_simple() ! {
// Very simple // Very simple
// util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')! util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')!
// Overlap to next day // Overlap to next day
mut exp := '0 3 ' util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')!
util_test_time(exp, '2002-01-01 03:00:00', '2002-01-02 03:00:00')! util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')!
util_test_time(exp, '2002-01-01 04:00:00', '2002-01-02 03:00:00')!
util_test_time('0 3-7/4,7-19', '2002-01-01 04:00:00', '2002-01-01 07:00:00')! util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')!
//// Overlap to next month // Overlap to next month
util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')! util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')!
//// Overlap to next year // Overlap to next year
util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')! util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')!
} }

View File

@ -1,42 +0,0 @@
module cron
fn test_not_allowed() {
illegal_expressions := [
'4 *-7',
'4 *-7/4',
'4 7/*',
'0 0 30 2',
'0 /5',
'0 ',
'0',
' 0',
' 0 ',
'1 2 3 4~9',
'1 1-3-5',
'0 5/2-5',
'',
'1 1/2/3',
'*5 8',
'x 8',
]
mut res := false
for exp in illegal_expressions {
res = false
parse_expression(exp) or { res = true }
assert res, "'$exp' should produce an error"
}
}
fn test_auto_extend() ! {
ce1 := parse_expression('5 5')!
ce2 := parse_expression('5 5 *')!
ce3 := parse_expression('5 5 * *')!
assert ce1 == ce2 && ce2 == ce3
}
fn test_four() {
parse_expression('0 1 2 3 ') or { assert false }
}

View File

@ -1,6 +1,25 @@
module db module db
import models { Target, TargetArch } import models { Target, TargetArch, TargetFilter }
// get_targets returns all targets in the database.
pub fn (db &VieterDb) get_targets(filter TargetFilter) []Target {
// This seems to currently be blocked by a bug in the ORM, I'll have to ask
// around.
if filter.repo != '' {
res := sql db.conn {
select from Target where repo == filter.repo order by id limit filter.limit offset filter.offset
}
return res
}
res := sql db.conn {
select from Target order by id limit filter.limit offset filter.offset
}
return res
}
// get_target tries to return a specific target. // get_target tries to return a specific target.
pub fn (db &VieterDb) get_target(target_id int) ?Target { pub fn (db &VieterDb) get_target(target_id int) ?Target {

View File

@ -1,129 +0,0 @@
module db
import models { Target, TargetFilter }
import sqlite
// Iterator providing a filtered view into the list of targets currently stored
// in the database. It replaces functionality usually performed in the database
// using SQL queries that can't currently be used due to missing stuff in V's
// ORM.
pub struct TargetsIterator {
conn sqlite.DB
filter TargetFilter
window_size int = 32
mut:
window []Target
window_index u64
// Offset in entire list of unfiltered targets
offset int
// Offset in filtered list of targets
filtered_offset u64
started bool
done bool
}
// targets returns an iterator allowing filtered access to the list of targets.
pub fn (db &VieterDb) targets(filter TargetFilter) TargetsIterator {
window_size := 32
return TargetsIterator{
conn: db.conn
filter: filter
window: []Target{cap: window_size}
window_size: window_size
}
}
// advance_window moves the sliding window over the filtered list of targets
// until it either reaches the end of the list of targets, or has encountered a
// non-empty window.
fn (mut ti TargetsIterator) advance_window() {
for {
ti.window = sql ti.conn {
select from Target order by id limit ti.window_size offset ti.offset
}
ti.offset += ti.window.len
if ti.window.len == 0 {
ti.done = true
return
}
if ti.filter.repo != '' {
ti.window = ti.window.filter(it.repo == ti.filter.repo)
}
if ti.filter.arch != '' {
ti.window = ti.window.filter(it.arch.any(it.value == ti.filter.arch))
}
if ti.filter.query != '' {
ti.window = ti.window.filter(it.url.contains(ti.filter.query)
|| it.path.contains(ti.filter.query) || it.branch.contains(ti.filter.query))
}
// We break out of the loop once we found a non-empty window
if ti.window.len > 0 {
break
}
}
}
// next returns the next target, if possible.
pub fn (mut ti TargetsIterator) next() ?Target {
if ti.done {
return none
}
// The first call to `next` will cause the sliding window to move to where
// the requested offset starts
if !ti.started {
ti.advance_window()
// Skip all matched targets until the requested offset
for !ti.done && ti.filtered_offset + u64(ti.window.len) <= ti.filter.offset {
ti.filtered_offset += u64(ti.window.len)
ti.advance_window()
}
if ti.done {
return none
}
left_inside_window := ti.filter.offset - ti.filtered_offset
ti.window_index = left_inside_window
ti.filtered_offset += left_inside_window
ti.started = true
}
return_value := ti.window[ti.window_index]
ti.window_index++
ti.filtered_offset++
// Next call will be past the requested offset
if ti.filter.limit > 0 && ti.filtered_offset == ti.filter.offset + ti.filter.limit {
ti.done = true
}
// Ensure the next call has a new valid window
if ti.window_index == u64(ti.window.len) {
ti.advance_window()
ti.window_index = 0
}
return return_value
}
// collect consumes the entire iterator & returns the result as an array.
pub fn (mut ti TargetsIterator) collect() []Target {
mut out := []Target{}
for t in ti {
out << t
}
return out
}

@ -1 +0,0 @@
Subproject commit 379a05a7b6b604c107360e0a679fb3ea5400e02c

View File

@ -8,7 +8,7 @@ import console.logs
import console.schedule import console.schedule
import console.man import console.man
import console.aur import console.aur
import console.repos import cron
import agent import agent
fn main() { fn main() {
@ -20,8 +20,7 @@ fn main() {
mut app := cli.Command{ mut app := cli.Command{
name: 'vieter' name: 'vieter'
description: 'Vieter is a lightweight implementation of an Arch repository server.' description: 'Vieter is a lightweight implementation of an Arch repository server.'
version: '0.5.0' version: '0.4.0'
posix_mode: true
flags: [ flags: [
cli.Flag{ cli.Flag{
flag: cli.FlagType.string flag: cli.FlagType.string
@ -42,12 +41,12 @@ fn main() {
commands: [ commands: [
server.cmd(), server.cmd(),
targets.cmd(), targets.cmd(),
cron.cmd(),
logs.cmd(), logs.cmd(),
schedule.cmd(), schedule.cmd(),
man.cmd(), man.cmd(),
aur.cmd(), aur.cmd(),
agent.cmd(), agent.cmd(),
repos.cmd(),
] ]
} }
app.setup() app.setup()

View File

@ -1,7 +1,6 @@
module models module models
import time import time
import os
pub struct BuildLog { pub struct BuildLog {
pub mut: pub mut:
@ -29,13 +28,6 @@ pub fn (bl &BuildLog) str() string {
return str return str
} }
// path returns the path to the log file, relative to the logs directory
pub fn (bl &BuildLog) path() string {
filename := bl.start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
return os.join_path(bl.target_id.str(), bl.arch, filename)
}
[params] [params]
pub struct BuildLogFilter { pub struct BuildLogFilter {
pub mut: pub mut:

View File

@ -73,6 +73,4 @@ pub mut:
limit u64 = 25 limit u64 = 25
offset u64 offset u64
repo string repo string
query string
arch string
} }

View File

@ -4,7 +4,7 @@ import web
import web.response { new_data_response, new_response } import web.response { new_data_response, new_response }
// v1_poll_job_queue allows agents to poll for new build jobs. // v1_poll_job_queue allows agents to poll for new build jobs.
['/api/v1/jobs/poll'; auth; get; markused] ['/api/v1/jobs/poll'; auth; get]
fn (mut app App) v1_poll_job_queue() web.Result { fn (mut app App) v1_poll_job_queue() web.Result {
arch := app.query['arch'] or { arch := app.query['arch'] or {
return app.json(.bad_request, new_response('Missing arch query arg.')) return app.json(.bad_request, new_response('Missing arch query arg.'))
@ -21,7 +21,7 @@ fn (mut app App) v1_poll_job_queue() web.Result {
} }
// v1_queue_job allows queueing a new one-time build job for the given target. // v1_queue_job allows queueing a new one-time build job for the given target.
['/api/v1/jobs/queue'; auth; markused; post] ['/api/v1/jobs/queue'; auth; post]
fn (mut app App) v1_queue_job() web.Result { fn (mut app App) v1_queue_job() web.Result {
target_id := app.query['target'] or { target_id := app.query['target'] or {
return app.json(.bad_request, new_response('Missing target query arg.')) return app.json(.bad_request, new_response('Missing target query arg.'))

View File

@ -11,7 +11,7 @@ import models { BuildLog, BuildLogFilter }
// v1_get_logs returns all build logs in the database. A 'target' query param can // v1_get_logs returns all build logs in the database. A 'target' query param can
// optionally be added to limit the list of build logs to that repository. // optionally be added to limit the list of build logs to that repository.
['/api/v1/logs'; auth; get; markused] ['/api/v1/logs'; auth; get]
fn (mut app App) v1_get_logs() web.Result { fn (mut app App) v1_get_logs() web.Result {
filter := models.from_params<BuildLogFilter>(app.query) or { filter := models.from_params<BuildLogFilter>(app.query) or {
return app.json(.bad_request, new_response('Invalid query parameters.')) return app.json(.bad_request, new_response('Invalid query parameters.'))
@ -22,7 +22,7 @@ fn (mut app App) v1_get_logs() web.Result {
} }
// v1_get_single_log returns the build log with the given id. // v1_get_single_log returns the build log with the given id.
['/api/v1/logs/:id'; auth; get; markused] ['/api/v1/logs/:id'; auth; get]
fn (mut app App) v1_get_single_log(id int) web.Result { fn (mut app App) v1_get_single_log(id int) web.Result {
log := app.db.get_build_log(id) or { return app.status(.not_found) } log := app.db.get_build_log(id) or { return app.status(.not_found) }
@ -30,7 +30,7 @@ fn (mut app App) v1_get_single_log(id int) web.Result {
} }
// v1_get_log_content returns the actual build log file for the given id. // v1_get_log_content returns the actual build log file for the given id.
['/api/v1/logs/:id/content'; auth; get; markused] ['/api/v1/logs/:id/content'; auth; get]
fn (mut app App) v1_get_log_content(id int) web.Result { fn (mut app App) v1_get_log_content(id int) web.Result {
log := app.db.get_build_log(id) or { return app.status(.not_found) } log := app.db.get_build_log(id) or { return app.status(.not_found) }
file_name := log.start_time.custom_format('YYYY-MM-DD_HH-mm-ss') file_name := log.start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
@ -50,7 +50,7 @@ fn parse_query_time(query string) !time.Time {
} }
// v1_post_log adds a new log to the database. // v1_post_log adds a new log to the database.
['/api/v1/logs'; auth; markused; post] ['/api/v1/logs'; auth; post]
fn (mut app App) v1_post_log() web.Result { fn (mut app App) v1_post_log() web.Result {
// Parse query params // Parse query params
start_time_int := app.query['startTime'].int() start_time_int := app.query['startTime'].int()
@ -86,7 +86,7 @@ fn (mut app App) v1_post_log() web.Result {
} }
// Store log in db // Store log in db
mut log := BuildLog{ log := BuildLog{
target_id: target_id target_id: target_id
start_time: start_time start_time: start_time
end_time: end_time end_time: end_time
@ -95,20 +95,25 @@ fn (mut app App) v1_post_log() web.Result {
} }
// id of newly created log // id of newly created log
log.id = app.db.add_build_log(log) log_id := app.db.add_build_log(log)
log_file_path := os.join_path(app.conf.data_dir, logs_dir_name, log.path())
repo_logs_dir := os.join_path(app.conf.data_dir, logs_dir_name, target_id.str(), arch)
// Create the logs directory of it doesn't exist // Create the logs directory of it doesn't exist
if !os.exists(os.dir(log_file_path)) { if !os.exists(repo_logs_dir) {
os.mkdir_all(os.dir(log_file_path)) or { os.mkdir_all(repo_logs_dir) or {
app.lerror('Error while creating log file: $err.msg()') app.lerror("Couldn't create dir '$repo_logs_dir'.")
return app.status(.internal_server_error) return app.status(.internal_server_error)
} }
} }
// Stream log contents to correct file
file_name := start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
full_path := os.join_path_single(repo_logs_dir, file_name)
if length := app.req.header.get(.content_length) { if length := app.req.header.get(.content_length) {
util.reader_to_file(mut app.reader, length.int(), log_file_path) or { util.reader_to_file(mut app.reader, length.int(), full_path) or {
app.lerror('An error occured while receiving logs: $err.msg()') app.lerror('An error occured while receiving logs: $err.msg()')
return app.status(.internal_server_error) return app.status(.internal_server_error)
@ -117,22 +122,5 @@ fn (mut app App) v1_post_log() web.Result {
return app.status(.length_required) return app.status(.length_required)
} }
return app.json(.ok, new_data_response(log.id)) return app.json(.ok, new_data_response(log_id))
}
// v1_delete_log allows removing a build log from the system.
['/api/v1/logs/:id'; auth; delete; markused]
fn (mut app App) v1_delete_log(id int) web.Result {
log := app.db.get_build_log(id) or { return app.status(.not_found) }
full_path := os.join_path(app.conf.data_dir, logs_dir_name, log.path())
os.rm(full_path) or {
app.lerror('Failed to remove log file $full_path: $err.msg()')
return app.status(.internal_server_error)
}
app.db.delete_build_log(id)
return app.status(.ok)
} }

View File

@ -1,19 +0,0 @@
module server
import metrics
import web
// v1_metrics serves a Prometheus-compatible metrics endpoint.
['/api/v1/metrics'; get; markused]
fn (mut app App) v1_metrics() web.Result {
if !app.conf.collect_metrics {
return app.status(.not_found)
}
mut exporter := metrics.new_prometheus_exporter()
exporter.load('vieter_', app.collector)
// TODO stream to connection instead
body := exporter.export_to_string() or { return app.status(.internal_server_error) }
return app.body(.ok, 'text/plain', body)
}

View File

@ -6,18 +6,18 @@ import db
import models { Target, TargetArch, TargetFilter } import models { Target, TargetArch, TargetFilter }
// v1_get_targets returns the current list of targets. // v1_get_targets returns the current list of targets.
['/api/v1/targets'; auth; get; markused] ['/api/v1/targets'; auth; get]
fn (mut app App) v1_get_targets() web.Result { fn (mut app App) v1_get_targets() web.Result {
filter := models.from_params<TargetFilter>(app.query) or { filter := models.from_params<TargetFilter>(app.query) or {
return app.json(.bad_request, new_response('Invalid query parameters.')) return app.json(.bad_request, new_response('Invalid query parameters.'))
} }
mut iter := app.db.targets(filter) targets := app.db.get_targets(filter)
return app.json(.ok, new_data_response(iter.collect())) return app.json(.ok, new_data_response(targets))
} }
// v1_get_single_target returns the information for a single target. // v1_get_single_target returns the information for a single target.
['/api/v1/targets/:id'; auth; get; markused] ['/api/v1/targets/:id'; auth; get]
fn (mut app App) v1_get_single_target(id int) web.Result { fn (mut app App) v1_get_single_target(id int) web.Result {
target := app.db.get_target(id) or { return app.status(.not_found) } target := app.db.get_target(id) or { return app.status(.not_found) }
@ -25,7 +25,7 @@ fn (mut app App) v1_get_single_target(id int) web.Result {
} }
// v1_post_target creates a new target from the provided query string. // v1_post_target creates a new target from the provided query string.
['/api/v1/targets'; auth; markused; post] ['/api/v1/targets'; auth; post]
fn (mut app App) v1_post_target() web.Result { fn (mut app App) v1_post_target() web.Result {
mut params := app.query.clone() mut params := app.query.clone()
@ -55,7 +55,7 @@ fn (mut app App) v1_post_target() web.Result {
} }
// v1_delete_target removes a given target from the server's list. // v1_delete_target removes a given target from the server's list.
['/api/v1/targets/:id'; auth; delete; markused] ['/api/v1/targets/:id'; auth; delete]
fn (mut app App) v1_delete_target(id int) web.Result { fn (mut app App) v1_delete_target(id int) web.Result {
app.db.delete_target(id) app.db.delete_target(id)
app.job_queue.invalidate(id) app.job_queue.invalidate(id)
@ -64,7 +64,7 @@ fn (mut app App) v1_delete_target(id int) web.Result {
} }
// v1_patch_target updates a target's data with the given query params. // v1_patch_target updates a target's data with the given query params.
['/api/v1/targets/:id'; auth; markused; patch] ['/api/v1/targets/:id'; auth; patch]
fn (mut app App) v1_patch_target(id int) web.Result { fn (mut app App) v1_patch_target(id int) web.Result {
app.db.update_target(id, app.query) app.db.update_target(id, app.query)

View File

@ -5,17 +5,14 @@ import conf as vconf
struct Config { struct Config {
pub: pub:
port int = 8000
log_level string = 'WARN' log_level string = 'WARN'
pkg_dir string pkg_dir string
data_dir string data_dir string
api_key string api_key string
default_arch string default_arch string
global_schedule string = '0 3' global_schedule string = '0 3'
port int = 8000
base_image string = 'archlinux:base-devel' base_image string = 'archlinux:base-devel'
max_log_age int [empty_default]
log_removal_schedule string = '0 0'
collect_metrics bool [empty_default]
} }
// cmd returns the cli submodule that handles starting the server // cmd returns the cli submodule that handles starting the server

View File

@ -1,53 +0,0 @@
module server
import time
import models { BuildLog }
import os
import cron
const fallback_log_removal_frequency = 24 * time.hour
// log_removal_daemon removes old build logs every `log_removal_frequency`.
fn (mut app App) log_removal_daemon(schedule &cron.Expression) {
for {
mut too_old_timestamp := time.now().add_days(-app.conf.max_log_age)
app.linfo('Cleaning logs before $too_old_timestamp')
mut logs := []BuildLog{}
mut counter := 0
mut failed := u64(0)
// Remove old logs
for {
// The offset is used to skip logs that failed to remove. Besides
// this, we don't need to move the offset, because all previously
// oldest logs will have been removed.
logs = app.db.get_build_logs(before: too_old_timestamp, offset: failed, limit: 50)
for log in logs {
log_file_path := os.join_path(app.conf.data_dir, logs_dir_name, log.path())
os.rm(log_file_path) or {
app.lerror('Failed to remove log file $log_file_path: $err.msg()')
failed += 1
continue
}
app.db.delete_build_log(log.id)
counter += 1
}
if logs.len < 50 {
break
}
}
app.linfo('Cleaned $counter logs ($failed failed)')
// Sleep until the next cycle
next_time := schedule.next_from_now()
time.sleep(next_time - time.now())
}
}

View File

@ -10,7 +10,7 @@ import web.response { new_data_response, new_response }
// healthcheck just returns a string, but can be used to quickly check if the // healthcheck just returns a string, but can be used to quickly check if the
// server is still responsive. // server is still responsive.
['/health'; get; markused] ['/health'; get]
pub fn (mut app App) healthcheck() web.Result { pub fn (mut app App) healthcheck() web.Result {
return app.json(.ok, new_response('Healthy.')) return app.json(.ok, new_response('Healthy.'))
} }
@ -18,7 +18,7 @@ pub fn (mut app App) healthcheck() web.Result {
// get_repo_file handles all Pacman-related routes. It returns both the // get_repo_file handles all Pacman-related routes. It returns both the
// repository's archives, but also package archives or the contents of a // repository's archives, but also package archives or the contents of a
// package's desc file. // package's desc file.
['/:repo/:arch/:filename'; get; head; markused] ['/:repo/:arch/:filename'; get; head]
fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Result { fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Result {
mut full_path := '' mut full_path := ''
@ -48,7 +48,7 @@ fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Re
} }
// put_package handles publishing a package to a repository. // put_package handles publishing a package to a repository.
['/:repo/publish'; auth; markused; post] ['/:repo/publish'; auth; post]
fn (mut app App) put_package(repo string) web.Result { fn (mut app App) put_package(repo string) web.Result {
// api is a reserved keyword for api routes & should never be allowed to be // api is a reserved keyword for api routes & should never be allowed to be
// a repository. // a repository.

View File

@ -3,7 +3,7 @@ module server
import web import web
// delete_package tries to remove the given package. // delete_package tries to remove the given package.
['/:repo/:arch/:pkg'; auth; delete; markused] ['/:repo/:arch/:pkg'; auth; delete]
fn (mut app App) delete_package(repo string, arch string, pkg string) web.Result { fn (mut app App) delete_package(repo string, arch string, pkg string) web.Result {
res := app.repo.remove_pkg_from_arch_repo(repo, arch, pkg, true) or { res := app.repo.remove_pkg_from_arch_repo(repo, arch, pkg, true) or {
app.lerror('Error while deleting package: $err.msg()') app.lerror('Error while deleting package: $err.msg()')
@ -23,7 +23,7 @@ fn (mut app App) delete_package(repo string, arch string, pkg string) web.Result
} }
// delete_arch_repo tries to remove the given arch-repo. // delete_arch_repo tries to remove the given arch-repo.
['/:repo/:arch'; auth; delete; markused] ['/:repo/:arch'; auth; delete]
fn (mut app App) delete_arch_repo(repo string, arch string) web.Result { fn (mut app App) delete_arch_repo(repo string, arch string) web.Result {
res := app.repo.remove_arch_repo(repo, arch) or { res := app.repo.remove_arch_repo(repo, arch) or {
app.lerror('Error while deleting arch-repo: $err.msg()') app.lerror('Error while deleting arch-repo: $err.msg()')
@ -43,7 +43,7 @@ fn (mut app App) delete_arch_repo(repo string, arch string) web.Result {
} }
// delete_repo tries to remove the given repo. // delete_repo tries to remove the given repo.
['/:repo'; auth; delete; markused] ['/:repo'; auth; delete]
fn (mut app App) delete_repo(repo string) web.Result { fn (mut app App) delete_repo(repo string) web.Result {
res := app.repo.remove_repo(repo) or { res := app.repo.remove_repo(repo) or {
app.lerror('Error while deleting repo: $err.msg()') app.lerror('Error while deleting repo: $err.msg()')

View File

@ -7,8 +7,7 @@ import repo
import util import util
import db import db
import build { BuildJobQueue } import build { BuildJobQueue }
import cron import cron.expression
import metrics
const ( const (
log_file_name = 'vieter.log' log_file_name = 'vieter.log'
@ -31,9 +30,18 @@ pub mut:
// init_job_queue populates a fresh job queue with all the targets currently // init_job_queue populates a fresh job queue with all the targets currently
// stored in the database. // stored in the database.
fn (mut app App) init_job_queue() ! { fn (mut app App) init_job_queue() ! {
for target in app.db.targets(limit: 0) { // Initialize build queues
mut targets := app.db.get_targets(limit: 25)
mut i := u64(0)
for targets.len > 0 {
for target in targets {
app.job_queue.insert_all(target)! app.job_queue.insert_all(target)!
} }
i += 25
targets = app.db.get_targets(limit: 25, offset: i)
}
} }
// server starts the web server & starts listening for requests // server starts the web server & starts listening for requests
@ -43,14 +51,10 @@ pub fn server(conf Config) ! {
util.exit_with_message(1, "'any' is not allowed as the value for default_arch.") util.exit_with_message(1, "'any' is not allowed as the value for default_arch.")
} }
global_ce := cron.parse_expression(conf.global_schedule) or { global_ce := expression.parse_expression(conf.global_schedule) or {
util.exit_with_message(1, 'Invalid global cron expression: $err.msg()') util.exit_with_message(1, 'Invalid global cron expression: $err.msg()')
} }
log_removal_ce := cron.parse_expression(conf.log_removal_schedule) or {
util.exit_with_message(1, 'Invalid log removal cron expression: $err.msg()')
}
// Configure logger // Configure logger
log_level := log.level_from_tag(conf.log_level) or { log_level := log.level_from_tag(conf.log_level) or {
util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
@ -92,31 +96,17 @@ pub fn server(conf Config) ! {
util.exit_with_message(1, 'Failed to initialize database: $err.msg()') util.exit_with_message(1, 'Failed to initialize database: $err.msg()')
} }
mut collector := if conf.collect_metrics {
&metrics.MetricsCollector(metrics.new_default_collector())
} else {
&metrics.MetricsCollector(metrics.new_null_collector())
}
collector.histogram_buckets_set('http_requests_duration_seconds', [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5,
10] )
mut app := &App{ mut app := &App{
logger: logger logger: logger
api_key: conf.api_key api_key: conf.api_key
conf: conf conf: conf
repo: repo repo: repo
db: db db: db
collector: collector
job_queue: build.new_job_queue(global_ce, conf.base_image) job_queue: build.new_job_queue(global_ce, conf.base_image)
} }
app.init_job_queue() or { app.init_job_queue() or {
util.exit_with_message(1, 'Failed to inialize job queue: $err.msg()') util.exit_with_message(1, 'Failed to inialize job queue: $err.msg()')
} }
if conf.max_log_age > 0 {
go app.log_removal_daemon(log_removal_ce)
}
web.run(app, conf.port) web.run(app, conf.port)
} }

View File

@ -2,7 +2,6 @@ Module {
dependencies: [ dependencies: [
'https://git.rustybever.be/vieter-v/conf', 'https://git.rustybever.be/vieter-v/conf',
'https://git.rustybever.be/vieter-v/docker', 'https://git.rustybever.be/vieter-v/docker',
'https://git.rustybever.be/vieter-v/aur', 'https://git.rustybever.be/vieter-v/aur'
'https://git.rustybever.be/vieter-v/metrics'
] ]
} }

View File

@ -1,36 +1,35 @@
module web module web
import log
// log reate a log message with the given level
pub fn (mut ctx Context) log(msg string, level log.Level) {
lock ctx.logger {
ctx.logger.send_output(msg, level)
}
}
// lfatal create a log message with the fatal level // lfatal create a log message with the fatal level
pub fn (mut ctx Context) lfatal(msg string) { pub fn (mut ctx Context) lfatal(msg string) {
lock ctx.logger { ctx.log(msg, log.Level.fatal)
ctx.logger.fatal(msg)
}
} }
// lerror create a log message with the error level // lerror create a log message with the error level
pub fn (mut ctx Context) lerror(msg string) { pub fn (mut ctx Context) lerror(msg string) {
lock ctx.logger { ctx.log(msg, log.Level.error)
ctx.logger.error(msg)
}
} }
// lwarn create a log message with the warn level // lwarn create a log message with the warn level
pub fn (mut ctx Context) lwarn(msg string) { pub fn (mut ctx Context) lwarn(msg string) {
lock ctx.logger { ctx.log(msg, log.Level.warn)
ctx.logger.warn(msg)
}
} }
// linfo create a log message with the info level // linfo create a log message with the info level
pub fn (mut ctx Context) linfo(msg string) { pub fn (mut ctx Context) linfo(msg string) {
lock ctx.logger { ctx.log(msg, log.Level.info)
ctx.logger.info(msg)
}
} }
// ldebug create a log message with the debug level // ldebug create a log message with the debug level
pub fn (mut ctx Context) ldebug(msg string) { pub fn (mut ctx Context) ldebug(msg string) {
lock ctx.logger { ctx.log(msg, log.Level.debug)
ctx.logger.debug(msg)
}
} }

View File

@ -5,7 +5,7 @@ import net.http
// Method attributes that should be ignored when parsing, as they're used // Method attributes that should be ignored when parsing, as they're used
// elsewhere. // elsewhere.
const attrs_to_ignore = ['auth', 'markused'] const attrs_to_ignore = ['auth']
// Parsing function attributes for methods and path. // Parsing function attributes for methods and path.
fn parse_attrs(name string, attrs []string) !([]http.Method, string) { fn parse_attrs(name string, attrs []string) !([]http.Method, string) {

View File

@ -11,7 +11,6 @@ import net.urllib
import time import time
import json import json
import log import log
import metrics
// The Context struct represents the Context which hold the HTTP request and response. // The Context struct represents the Context which hold the HTTP request and response.
// It has fields for the query, form, files. // It has fields for the query, form, files.
@ -28,8 +27,6 @@ pub mut:
conn &net.TcpConn = unsafe { nil } conn &net.TcpConn = unsafe { nil }
// Gives access to a shared logger object // Gives access to a shared logger object
logger shared log.Log logger shared log.Log
// Used to collect metrics on the web server
collector &metrics.MetricsCollector
// time.ticks() from start of web connection handle. // time.ticks() from start of web connection handle.
// You can use it to determine how much time is spent on your request. // You can use it to determine how much time is spent on your request.
page_gen_start i64 page_gen_start i64
@ -148,15 +145,6 @@ pub fn (ctx &Context) is_authenticated() bool {
return false return false
} }
// body sends the given body as an HTTP response.
pub fn (mut ctx Context) body(status http.Status, content_type string, body string) Result {
ctx.status = status
ctx.content_type = content_type
ctx.send_response(body)
return Result{}
}
// json<T> HTTP_OK with json_s as payload with content-type `application/json` // json<T> HTTP_OK with json_s as payload with content-type `application/json`
pub fn (mut ctx Context) json<T>(status http.Status, j T) Result { pub fn (mut ctx Context) json<T>(status http.Status, j T) Result {
ctx.status = status ctx.status = status
@ -331,22 +319,6 @@ fn handle_conn<T>(mut conn net.TcpConn, mut app T, routes map[string]Route) {
app.logger.flush() app.logger.flush()
} }
// Record how long request took to process
labels := [
['method', app.req.method.str()]!,
['path', app.req.url]!,
// Not all methods properly set this value yet I think
['status', app.status.int().str()]!,
]
app.collector.counter_increment(name: 'http_requests_total', labels: labels)
// Prometheus prefers metrics containing base units, as defined here
// https://prometheus.io/docs/practices/naming/
app.collector.histogram_record(f64(time.ticks() - app.page_gen_start) / 1000,
name: 'http_requests_duration_seconds'
labels: labels
)
unsafe { unsafe {
free(app) free(app)
} }
@ -412,7 +384,6 @@ fn handle_conn<T>(mut conn net.TcpConn, mut app T, routes map[string]Route) {
static_mime_types: app.static_mime_types static_mime_types: app.static_mime_types
reader: reader reader: reader
logger: app.logger logger: app.logger
collector: app.collector
api_key: app.api_key api_key: app.api_key
} }

View File

@ -12,6 +12,3 @@ address = "http://localhost:8000"
api_update_frequency = 2 api_update_frequency = 2
image_rebuild_frequency = 1 image_rebuild_frequency = 1
max_concurrent_builds = 3 max_concurrent_builds = 3
# max_log_age = 64
log_removal_schedule = '* * *'
collect_metrics = true