Compare commits

..

1 Commits
dev ... docs

Author SHA1 Message Date
Jef Roosens 1f4b89f820
docs: started terminology page 2023-01-03 17:20:40 +01:00
89 changed files with 1340 additions and 933 deletions

View File

@ -1,4 +0,0 @@
# To stay consistent with the V formatting style, we use tabs
UseTab: Always
IndentWidth: 4
TabWidth: 4

View File

@ -5,5 +5,6 @@ root = true
end_of_line = lf
insert_final_newline = true
[*.{v,c,h}]
[*.v]
# vfmt wants it :(
indent_style = tab

2
.gitignore vendored
View File

@ -1,4 +1,4 @@
vieter.c
*.c
/data/
# Build artifacts

3
.gitmodules vendored
View File

@ -1,6 +1,3 @@
[submodule "docs/themes/hugo-book"]
path = docs/themes/hugo-book
url = https://github.com/alex-shpak/hugo-book
[submodule "src/libvieter"]
path = src/libvieter
url = https://git.rustybever.be/vieter-v/libvieter

View File

@ -10,7 +10,6 @@ skip_clone: true
pipeline:
build:
image: 'git.rustybever.be/vieter-v/vieter-builder'
pull: true
commands:
# Add the vieter repository so we can use the compiler
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf

View File

@ -10,7 +10,6 @@ skip_clone: true
pipeline:
build:
image: 'git.rustybever.be/vieter-v/vieter-builder'
pull: true
commands:
# Add the vieter repository so we can use the compiler
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf

View File

@ -1,5 +1,5 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
matrix:
PLATFORM:
@ -57,7 +57,7 @@ pipeline:
- export OBJ_PATH="/vieter/commits/$CI_COMMIT_SHA/vieter-$(echo '${PLATFORM}' | sed 's:/:-:g')"
- export SIG_STRING="PUT\n\n$CONTENT_TYPE\n$DATE\n$OBJ_PATH"
- export SIGNATURE="$(echo -en $SIG_STRING | openssl dgst -sha1 -hmac $S3_PASSWORD -binary | base64)"
- export SIGNATURE="$(echo -en $SIG_STRING | openssl sha1 -hmac $S3_PASSWORD -binary | base64)"
- >
curl
--silent

View File

@ -1,5 +1,5 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
platform: 'linux/amd64'
branches:
@ -21,9 +21,8 @@ pipeline:
- make api-docs
slate-docs:
image: 'slatedocs/slate:v2.13.0'
image: 'slatedocs/slate'
group: 'generate'
# Slate requires a specific directory to run in
commands:
- cd docs/api
- bundle exec middleman build --clean

View File

@ -1,5 +1,5 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
platform: 'linux/amd64'
branches: [ 'main' ]

View File

@ -1,5 +1,5 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
# These checks already get performed on the feature branches
branches:

View File

@ -1,5 +1,5 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
platform: 'linux/amd64'
branches:
@ -8,21 +8,15 @@ branches:
depends_on:
- build
skip_clone: true
pipeline:
install-modules:
generate:
image: *vlang_image
pull: true
commands:
- export VMODULES=$PWD/.vmodules
- 'cd src && v install'
generate:
image: *vlang_image
commands:
# - curl -o vieter -L "https://s3.rustybever.be/vieter/commits/$CI_COMMIT_SHA/vieter-linux-amd64"
# - chmod +x vieter
- export VMODULES=$PWD/.vmodules
- make
- curl -o vieter -L "https://s3.rustybever.be/vieter/commits/$CI_COMMIT_SHA/vieter-linux-amd64"
- chmod +x vieter
- ./vieter man man
- cd man

View File

@ -1,5 +1,5 @@
variables:
- &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
- &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
matrix:
PLATFORM:

View File

@ -7,31 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/vieter-v/vieter/src/branch/dev)
## [0.6.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.6.0)
### Added
* Metrics endpoint for Prometheus integration
* Search in list of targets using API & CLI
* Allow filtering targets by arch value
* Configurable global timeout for builds
### Changed
* Rewrote cron expression logic in C
* Updated codebase to V commit after 0.3.3
* Agents now use worker threads and no longer spawn a new thread for every
build
### Fixed
* Package upload now fails if TCP connection is closed before all bytes have
been received
### Removed
* Deprecated cron daemon
## [0.5.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0)
### Added

View File

@ -1,20 +1,16 @@
# =====CONFIG=====
SRC_DIR := src
SRCS != find '$(SRC_DIR)' -iname '*.v'
SOURCES != find '$(SRC_DIR)' -iname '*.v'
V_PATH ?= v
V := $(V_PATH) -showcc -gc boehm -d use_openssl -skip-unused
V := $(V_PATH) -showcc -gc boehm -W -d use_openssl -skip-unused
all: vieter
# =====COMPILATION=====
.PHONY: libvieter
libvieter:
make -C '$(SRC_DIR)/libvieter' CFLAGS='-O3'
# Regular binary
vieter: $(SOURCES) libvieter
vieter: $(SOURCES)
$(V) -g -o vieter $(SRC_DIR)
# Debug build using gcc
@ -22,7 +18,7 @@ vieter: $(SOURCES) libvieter
# multi-threaded and causes issues when running vieter inside gdb.
.PHONY: debug
debug: dvieter
dvieter: $(SOURCES) libvieter
dvieter: $(SOURCES)
$(V_PATH) -showcc -keepc -cg -o dvieter $(SRC_DIR)
# Run the debug build inside gdb
@ -33,12 +29,12 @@ gdb: dvieter
# Optimised production build
.PHONY: prod
prod: pvieter
pvieter: $(SOURCES) libvieter
pvieter: $(SOURCES)
$(V) -o pvieter -prod $(SRC_DIR)
# Only generate C code
.PHONY: c
c: $(SOURCES) libvieter
c: $(SOURCES)
$(V) -o vieter.c $(SRC_DIR)
@ -71,7 +67,6 @@ man: vieter
# =====OTHER=====
# Linting
.PHONY: lint
lint:
$(V) fmt -verify $(SRC_DIR)
@ -79,24 +74,18 @@ lint:
$(V_PATH) missdoc -p $(SRC_DIR)
@ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ]
# Formatting
# Format the V codebase
.PHONY: fmt
fmt:
$(V) fmt -w $(SRC_DIR)
# Testing
.PHONY: test
test: libvieter
$(V) -g test $(SRC_DIR)
test:
$(V) test $(SRC_DIR)
# Cleaning
.PHONY: clean
clean:
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' 'docs/public'
make -C '$(SRC_DIR)/libvieter' clean
# =====EXPERIMENTAL=====

View File

@ -3,31 +3,21 @@
pkgbase='vieter'
pkgname='vieter'
pkgver='0.6.0'
pkgver='0.5.0'
pkgrel=1
pkgdesc="Lightweight Arch repository server & package build system"
depends=('glibc' 'openssl' 'libarchive' 'sqlite')
makedepends=('git' 'vieter-vlang')
makedepends=('git' 'vlang')
arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter-v/vieter'
license=('AGPL3')
source=(
"$pkgname::git+https://git.rustybever.be/vieter-v/vieter#tag=${pkgver//_/-}"
"libvieter::git+https://git.rustybever.be/vieter-v/libvieter"
)
md5sums=('SKIP' 'SKIP')
source=("$pkgname::git+https://git.rustybever.be/vieter-v/vieter#tag=${pkgver//_/-}")
md5sums=('SKIP')
prepare() {
cd "${pkgname}"
export VMODULES="$srcdir/.vmodules"
# Add the libvieter submodule
git submodule init
git config submodules.src/libvieter.url "${srcdir}/libvieter"
git -c protocol.file.allow=always submodule update
export VMODULES="${srcdir}/.vmodules"
cd src && v install
cd "$pkgname/src" && v install
}
build() {

View File

@ -7,41 +7,31 @@ pkgver=0.2.0.r25.g20112b8
pkgrel=1
pkgdesc="Lightweight Arch repository server & package build system (development version)"
depends=('glibc' 'openssl' 'libarchive' 'sqlite')
makedepends=('git' 'vieter-vlang')
makedepends=('git' 'vlang')
arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter-v/vieter'
license=('AGPL3')
source=(
"${pkgname}::git+https://git.rustybever.be/vieter-v/vieter#branch=dev"
"libvieter::git+https://git.rustybever.be/vieter-v/libvieter"
)
md5sums=('SKIP' 'SKIP')
source=("$pkgname::git+https://git.rustybever.be/vieter-v/vieter#branch=dev")
md5sums=('SKIP')
provides=('vieter')
conflicts=('vieter')
pkgver() {
cd "${pkgname}"
cd "$pkgname"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
prepare() {
cd "${pkgname}"
export VMODULES="$srcdir/.vmodules"
# Add the libvieter submodule
git submodule init
git config submodules.src/libvieter.url "${srcdir}/libvieter"
git -c protocol.file.allow=always submodule update
export VMODULES="${srcdir}/.vmodules"
cd src && v install
cd "$pkgname/src" && v install
}
build() {
export VMODULES="${srcdir}/.vmodules"
export VMODULES="$srcdir/.vmodules"
cd "${pkgname}"
cd "$pkgname"
make prod
@ -52,9 +42,9 @@ build() {
}
package() {
install -dm755 "${pkgdir}/usr/bin"
install -Dm755 "${pkgname}/pvieter" "${pkgdir}/usr/bin/vieter"
install -dm755 "$pkgdir/usr/bin"
install -Dm755 "$pkgname/pvieter" "$pkgdir/usr/bin/vieter"
install -dm755 "${pkgdir}/usr/share/man/man1"
install -Dm644 "${pkgname}/man"/*.1 "${pkgdir}/usr/share/man/man1"
install -dm755 "$pkgdir/usr/share/man/man1"
install -Dm644 "$pkgname/man"/*.1 "$pkgdir/usr/share/man/man1"
}

View File

@ -48,9 +48,9 @@ update`.
### Compiler
V is developed using a specific compiler commit that is usually updated
whenever a new version is released. Information on this can be found in the
[tools](https://git.rustybever.be/vieter-v/tools) repository.
I used to maintain a mirror that tracked the latest master, but nowadays, I
maintain a Docker image containing the specific compiler version that Vieter
builds with. Currently, this is V 0.3.2.
## Contributing

View File

@ -55,8 +55,6 @@ Parameter | Description
limit | Maximum amount of results to return.
offset | Offset of results.
repo | Limit results to targets that publish to the given repo.
query | Only return targets that have this substring in their URL, path or branch.
arch | Only return targets that publish to this arch.
## Get specific target

View File

@ -10,8 +10,9 @@ documentation might not be relevant anymore for the latest release.
## Overview
Vieter consists of two main parts, namely an implementation of an Arch
repository server & a scheduling system to periodically build Pacman packages &
publish them to a repository.
repository server & an accompanying build system for populating said repository
by periodically building packages. Package builds happen in isolated Docker
containers.
{{< hint info >}}
**Note**

View File

@ -0,0 +1,39 @@
---
weight: 40
---
# Terminology
Throughout both the codebase and the documentation, I use a couple of terms
that might be confusing at first glance. This page aims to explain this
Vieter-specific jargon.
## Repo & arch-repo
In Pacman jargon, a repository is simply an HTTP(S) web server that provides a
database archive, and optionally a files archive. This database archive
contains information about what packages are contained in the repository,
including the filenames of the actual package tarballs that Pacman then
downloads when installing and updating packages.
In Vieter, this is called an arch-repository. The reason for this change in
naming is because Vieter allows packages of different architectures to reside
under the same repository name. For a Vieter repository named `vieter`, there
can be packages built for the `x86_64` and the `aarch64` architecture. For both
of these architectures, a Pacman repository (or arch-repository) is created,
and it's these architecture-specific repositories that are then accessed by
Pacman. On either architecture, the repository could then be added to
`/etc/pacman.conf` as follows:
```
[vieter]
Server = https://arch.r8r.be/$repo/$arch
SigLevel = Optional
```
## Target
A target describes a build that should be periodically performed. Each target
consists of a URL to a Git repository containing a PKGBUILD (or a direct URL to
said PKGBUILD), among various other configuration options. The list of targets
defines what builds need to be performed at what times, on what architectures.

View File

@ -23,9 +23,9 @@ pub fn cmd() cli.Command {
description: 'Start an agent daemon.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
agent(conf_)!
agent(conf)!
}
}
}

View File

@ -20,13 +20,11 @@ struct AgentDaemon {
client client.Client
mut:
images ImageManager
// Which builds are currently running; length is conf.max_concurrent_builds
builds []BuildConfig
// Atomic variables used to detect when a build has finished; length is
// conf.max_concurrent_builds. This approach is used as the difference
// between a recently finished build and an empty build slot is important
// for knowing whether the agent is currently "active".
// conf.max_concurrent_builds
atomics []u64
// Channel used to send builds to worker threads
build_channel chan BuildConfig
}
// agent_init initializes a new agent
@ -36,8 +34,8 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon {
client: client.new(conf.address, conf.api_key)
conf: conf
images: new_image_manager(conf.image_rebuild_frequency * 60)
builds: []BuildConfig{len: conf.max_concurrent_builds}
atomics: []u64{len: conf.max_concurrent_builds}
build_channel: chan BuildConfig{cap: conf.max_concurrent_builds}
}
return d
@ -45,11 +43,6 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon {
// run starts the actual agent daemon. This function will run forever.
pub fn (mut d AgentDaemon) run() {
// Spawn worker threads
for builder_index in 0 .. d.conf.max_concurrent_builds {
spawn d.builder_thread(d.build_channel, builder_index)
}
// This is just so that the very first time the loop is ran, the jobs are
// always polled
mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency)
@ -58,7 +51,7 @@ pub fn (mut d AgentDaemon) run() {
for {
if sleep_time > 0 {
d.ldebug('Sleeping for ${sleep_time}')
d.ldebug('Sleeping for $sleep_time')
time.sleep(sleep_time)
}
@ -87,14 +80,14 @@ pub fn (mut d AgentDaemon) run() {
d.ldebug('Polling for new jobs')
new_configs := d.client.poll_jobs(d.conf.arch, finished + empty) or {
d.lerror('Failed to poll jobs: ${err.msg()}')
d.lerror('Failed to poll jobs: $err.msg()')
// TODO pick a better delay here
sleep_time = 5 * time.second
continue
}
d.ldebug('Received ${new_configs.len} jobs')
d.ldebug('Received $new_configs.len jobs')
last_poll_time = time.now()
@ -102,7 +95,7 @@ pub fn (mut d AgentDaemon) run() {
// Make sure a recent build base image is available for
// building the config
if !d.images.up_to_date(config.base_image) {
d.linfo('Building builder image from base image ${config.base_image}')
d.linfo('Building builder image from base image $config.base_image')
// TODO handle this better than to just skip the config
d.images.refresh_image(config.base_image) or {
@ -114,10 +107,10 @@ pub fn (mut d AgentDaemon) run() {
// It's technically still possible that the build image is
// removed in the very short period between building the
// builder image and starting a build container with it. If
// this happens, fate really just didn't want you to do this
// this happens, faith really just didn't want you to do this
// build.
d.build_channel <- config
d.start_build(config)
running++
}
}
@ -154,9 +147,25 @@ fn (mut d AgentDaemon) update_atomics() (int, int) {
return finished, empty
}
// start_build starts a build for the given BuildConfig.
fn (mut d AgentDaemon) start_build(config BuildConfig) bool {
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty {
stdatomic.store_u64(&d.atomics[i], agent.build_running)
d.builds[i] = config
go d.run_build(i, config)
return true
}
}
return false
}
// run_build actually starts the build process for a given target.
fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
d.linfo('started build: ${config}')
d.linfo('started build: $config')
// 0 means success, 1 means failure
mut status := 0
@ -167,31 +176,22 @@ fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
}
res := build.build_config(d.client.address, d.client.api_key, new_config) or {
d.ldebug('build_config error: ${err.msg()}')
d.ldebug('build_config error: $err.msg()')
status = 1
build.BuildResult{}
}
if status == 0 {
d.linfo('Uploading build logs for ${config}')
d.linfo('Uploading build logs for $config')
// TODO use the arch value here
build_arch := os.uname().machine
d.client.add_build_log(config.target_id, res.start_time, res.end_time, build_arch,
res.exit_code, res.logs) or { d.lerror('Failed to upload logs for ${config}') }
res.exit_code, res.logs) or { d.lerror('Failed to upload logs for $config') }
} else {
d.lwarn('an error occurred during build: ${config}')
d.lwarn('an error occurred during build: $config')
}
stdatomic.store_u64(&d.atomics[build_index], agent.build_done)
}
// builder_thread is a thread that constantly listens for builds to process
fn (mut d AgentDaemon) builder_thread(ch chan BuildConfig, builder_index int) {
for {
build_config := <-ch or { break }
d.run_build(builder_index, build_config)
}
}

View File

@ -71,7 +71,7 @@ pub fn (mut m ImageManager) up_to_date(base_image string) bool {
fn (mut m ImageManager) refresh_image(base_image string) ! {
// TODO use better image tags for built images
new_image := build.create_build_image(base_image) or {
return error('Failed to build builder image from base image ${base_image}')
return error('Failed to build builder image from base image $base_image')
}
m.images[base_image] << new_image
@ -99,7 +99,7 @@ fn (mut m ImageManager) clean_old_images() {
// wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't
// move the index.
dd.image_remove(m.images[image][i]) or {
dd.remove_image(m.images[image][i]) or {
// The image was removed by an external event
if err.code() == 404 {
m.images[image].delete(i)

View File

@ -1,36 +1,35 @@
module agent
import log
// log a message with the given level
pub fn (mut d AgentDaemon) log(msg string, level log.Level) {
lock d.logger {
d.logger.send_output(msg, level)
}
}
// lfatal create a log message with the fatal level
pub fn (mut d AgentDaemon) lfatal(msg string) {
lock d.logger {
d.logger.fatal(msg)
}
d.log(msg, log.Level.fatal)
}
// lerror create a log message with the error level
pub fn (mut d AgentDaemon) lerror(msg string) {
lock d.logger {
d.logger.error(msg)
}
d.log(msg, log.Level.error)
}
// lwarn create a log message with the warn level
pub fn (mut d AgentDaemon) lwarn(msg string) {
lock d.logger {
d.logger.warn(msg)
}
d.log(msg, log.Level.warn)
}
// linfo create a log message with the info level
pub fn (mut d AgentDaemon) linfo(msg string) {
lock d.logger {
d.logger.info(msg)
}
d.log(msg, log.Level.info)
}
// ldebug create a log message with the debug level
pub fn (mut d AgentDaemon) ldebug(msg string) {
lock d.logger {
d.logger.debug(msg)
}
d.log(msg, log.Level.debug)
}

View File

@ -45,7 +45,7 @@ pub fn create_build_image(base_image string) !string {
c := docker.NewContainer{
image: base_image
env: ['BUILD_SCRIPT=${cmds_str}']
env: ['BUILD_SCRIPT=$cmds_str']
entrypoint: ['/bin/sh', '-c']
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/sh -e']
}
@ -57,7 +57,7 @@ pub fn create_build_image(base_image string) !string {
image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' }
// We pull the provided image
dd.image_pull(image_name, image_tag)!
dd.pull_image(image_name, image_tag)!
id := dd.container_create(c)!.id
// id := docker.create_container(c)!
@ -79,7 +79,7 @@ pub fn create_build_image(base_image string) !string {
// TODO also add the base image's name into the image name to prevent
// conflicts.
tag := time.sys_mono_now().str()
image := dd.image_from_container(id, 'vieter-build', tag)!
image := dd.create_image_from_container(id, 'vieter-build', tag)!
dd.container_remove(id)!
return image.id
@ -94,8 +94,8 @@ pub:
}
// build_target builds the given target. Internally it calls `build_config`.
pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool, timeout int) !BuildResult {
config := target.as_build_config(base_image_id, force, timeout)
pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool) !BuildResult {
config := target.as_build_config(base_image_id, force)
return build_config(address, api_key, config)
}
@ -118,10 +118,10 @@ pub fn build_config(address string, api_key string, config BuildConfig) !BuildRe
base64_script := base64.encode_str(build_script)
c := docker.NewContainer{
image: '${config.base_image}'
image: '$config.base_image'
env: [
'BUILD_SCRIPT=${base64_script}',
'API_KEY=${api_key}',
'BUILD_SCRIPT=$base64_script',
'API_KEY=$api_key',
// `archlinux:base-devel` does not correctly set the path variable,
// causing certain builds to fail. This fixes it.
'PATH=${build.path_dirs.join(':')}',
@ -136,17 +136,9 @@ pub fn build_config(address string, api_key string, config BuildConfig) !BuildRe
dd.container_start(id)!
mut data := dd.container_inspect(id)!
start_time := time.now()
// This loop waits until the container has stopped, so we can remove it after
for data.state.running {
if time.now() - start_time > config.timeout * time.second {
dd.container_kill(id)!
dd.container_remove(id)!
return error('Build killed due to timeout (${config.timeout}s)')
}
time.sleep(1 * time.second)
data = dd.container_inspect(id)!

View File

@ -1,7 +1,7 @@
module build
import models { BuildConfig, Target }
import cron
import cron.expression { CronExpression, parse_expression }
import time
import datatypes { MinHeap }
import util
@ -13,7 +13,7 @@ pub mut:
// Next timestamp from which point this job is allowed to be executed
timestamp time.Time
// Required for calculating next timestamp after having pop'ed a job
ce &cron.Expression = unsafe { nil }
ce CronExpression
// Actual build config sent to the agent
config BuildConfig
// Whether this is a one-time job
@ -30,15 +30,13 @@ fn (r1 BuildJob) < (r2 BuildJob) bool {
// for each architecture. Agents receive jobs from this queue.
pub struct BuildJobQueue {
// Schedule to use for targets without explicitely defined cron expression
default_schedule &cron.Expression
default_schedule CronExpression
// Base image to use for targets without defined base image
default_base_image string
// After how many minutes a build should be forcefully cancelled
default_build_timeout int
mut:
mutex shared util.Dummy
// For each architecture, a priority queue is tracked
queues map[string]MinHeap[BuildJob]
queues map[string]MinHeap<BuildJob>
// When a target is removed from the server or edited, its previous build
// configs will be invalid. This map allows for those to be simply skipped
// by ignoring any build configs created before this timestamp.
@ -46,11 +44,10 @@ mut:
}
// new_job_queue initializes a new job queue
pub fn new_job_queue(default_schedule &cron.Expression, default_base_image string, default_build_timeout int) BuildJobQueue {
pub fn new_job_queue(default_schedule CronExpression, default_base_image string) BuildJobQueue {
return BuildJobQueue{
default_schedule: unsafe { default_schedule }
default_schedule: default_schedule
default_base_image: default_base_image
default_build_timeout: default_build_timeout
invalidated: map[int]time.Time{}
}
}
@ -77,25 +74,25 @@ pub struct InsertConfig {
pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! {
lock q.mutex {
if input.arch !in q.queues {
q.queues[input.arch] = MinHeap[BuildJob]{}
q.queues[input.arch] = MinHeap<BuildJob>{}
}
mut job := BuildJob{
created: time.now()
single: input.single
config: input.target.as_build_config(q.default_base_image, input.force, q.default_build_timeout)
config: input.target.as_build_config(q.default_base_image, input.force)
}
if !input.now {
ce := if input.target.schedule != '' {
cron.parse_expression(input.target.schedule) or {
return error("Error while parsing cron expression '${input.target.schedule}' (id ${input.target.id}): ${err.msg()}")
parse_expression(input.target.schedule) or {
return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()")
}
} else {
q.default_schedule
}
job.timestamp = ce.next_from_now()
job.timestamp = ce.next_from_now()!
job.ce = ce
} else {
job.timestamp = time.now()
@ -108,8 +105,8 @@ pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! {
// reschedule the given job by calculating the next timestamp and re-adding it
// to its respective queue. This function is called by the pop functions
// *after* having pop'ed the job.
fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) {
new_timestamp := job.ce.next_from_now()
fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) ! {
new_timestamp := job.ce.next_from_now()!
new_job := BuildJob{
...job
@ -146,7 +143,7 @@ pub fn (mut q BuildJobQueue) peek(arch string) ?BuildJob {
}
q.pop_invalid(arch)
job := q.queues[arch].peek() or { return none }
job := q.queues[arch].peek()?
if job.timestamp < time.now() {
return job
@ -165,13 +162,16 @@ pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob {
}
q.pop_invalid(arch)
mut job := q.queues[arch].peek() or { return none }
mut job := q.queues[arch].peek()?
if job.timestamp < time.now() {
job = q.queues[arch].pop() or { return none }
job = q.queues[arch].pop()?
if !job.single {
q.reschedule(job, arch)
// TODO how do we handle this properly? Is it even possible for a
// cron expression to not return a next time if it's already been
// used before?
q.reschedule(job, arch) or {}
}
return job
@ -198,7 +198,8 @@ pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob {
job = q.queues[arch].pop() or { break }
if !job.single {
q.reschedule(job, arch)
// TODO idem
q.reschedule(job, arch) or {}
}
out << job

View File

@ -24,12 +24,12 @@ pub fn echo_commands(cmds []string) []string {
// create_build_script generates a shell script that builds a given Target.
fn create_build_script(address string, config BuildConfig, build_arch string) string {
repo_url := '${address}/${config.repo}'
repo_url := '$address/$config.repo'
mut commands := [
// This will later be replaced by a proper setting for changing the
// mirrorlist
"echo -e '[${config.repo}]\\nServer = ${address}/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
"echo -e '[$config.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
// We need to update the package list of the repo we just added above.
// This should however not pull in a lot of packages as long as the
// builder image is rebuilt frequently.
@ -42,18 +42,18 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
'git' {
if config.branch == '' {
[
"git clone --single-branch --depth 1 '${config.url}' repo",
"git clone --single-branch --depth 1 '$config.url' repo",
]
} else {
[
"git clone --single-branch --depth 1 --branch ${config.branch} '${config.url}' repo",
"git clone --single-branch --depth 1 --branch $config.branch '$config.url' repo",
]
}
}
'url' {
[
'mkdir repo',
"curl -o repo/PKGBUILD -L '${config.url}'",
"curl -o repo/PKGBUILD -L '$config.url'",
]
}
else {
@ -62,7 +62,7 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
}
commands << if config.path != '' {
"cd 'repo/${config.path}'"
"cd 'repo/$config.path'"
} else {
'cd repo'
}
@ -76,7 +76,7 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
// The build container checks whether the package is already present on
// the server.
commands << [
'curl -s --head --fail ${repo_url}/${build_arch}/\$pkgname-\$pkgver-\$pkgrel && exit 0',
'curl -s --head --fail $repo_url/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
// If the above curl command succeeds, we don't need to rebuild the
// package. However, because we're in a su shell, the exit command will
// drop us back into the root shell. Therefore, we must check whether
@ -86,7 +86,7 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
}
commands << [
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" ${repo_url}/publish; done',
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $repo_url/publish; done',
]
return echo_commands(commands).join('\n')

View File

@ -22,7 +22,7 @@ pub fn new(address string, api_key string) Client {
// send_request_raw sends an HTTP request, returning the http.Response object.
// It encodes the params so that they're safe to pass as HTTP query parameters.
fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) !http.Response {
mut full_url := '${c.address}${url}'
mut full_url := '$c.address$url'
if params.len > 0 {
mut params_escaped := map[string]string{}
@ -33,9 +33,9 @@ fn (c &Client) send_request_raw(method Method, url string, params map[string]str
params_escaped[k] = urllib.query_escape(v)
}
params_str := params_escaped.keys().map('${it}=${params_escaped[it]}').join('&')
params_str := params_escaped.keys().map('$it=${params_escaped[it]}').join('&')
full_url = '${full_url}?${params_str}'
full_url = '$full_url?$params_str'
}
// Looking at the source code, this function doesn't actually fail, so I'm
@ -49,13 +49,13 @@ fn (c &Client) send_request_raw(method Method, url string, params map[string]str
}
// send_request<T> just calls send_request_with_body<T> with an empty body.
fn (c &Client) send_request[T](method Method, url string, params map[string]string) !Response[T] {
return c.send_request_with_body[T](method, url, params, '')
fn (c &Client) send_request<T>(method Method, url string, params map[string]string) !Response<T> {
return c.send_request_with_body<T>(method, url, params, '')
}
// send_request_with_body<T> calls send_request_raw_response & parses its
// output as a Response<T> object.
fn (c &Client) send_request_with_body[T](method Method, url string, params map[string]string, body string) !Response[T] {
fn (c &Client) send_request_with_body<T>(method Method, url string, params map[string]string, body string) !Response<T> {
res := c.send_request_raw(method, url, params, body)!
status := res.status()
@ -64,12 +64,12 @@ fn (c &Client) send_request_with_body[T](method Method, url string, params map[s
if status.is_error() {
// A non-successful status call will have an empty body
if res.body == '' {
return error('Error ${res.status_code} (${status.str()}): (empty response)')
return error('Error $res.status_code ($status.str()): (empty response)')
}
data := json.decode(Response[string], res.body)!
data := json.decode(Response<string>, res.body)!
return error('Status ${res.status_code} (${status.str()}): ${data.message}')
return error('Status $res.status_code ($status.str()): $data.message')
}
// Just return an empty successful response
@ -77,7 +77,7 @@ fn (c &Client) send_request_with_body[T](method Method, url string, params map[s
return new_data_response(T{})
}
data := json.decode(Response[T], res.body)!
data := json.decode(Response<T>, res.body)!
return data
}

View File

@ -4,7 +4,7 @@ import models { BuildConfig }
// poll_jobs requests a list of new build jobs from the server.
pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig {
data := c.send_request[[]BuildConfig](.get, '/api/v1/jobs/poll', {
data := c.send_request<[]BuildConfig>(.get, '/api/v1/jobs/poll', {
'arch': arch
'max': max.str()
})!
@ -15,7 +15,7 @@ pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig {
// queue_job adds a new one-time build job for the given target to the job
// queue.
pub fn (c &Client) queue_job(target_id int, arch string, force bool) ! {
c.send_request[string](.post, '/api/v1/jobs/queue', {
c.send_request<string>(.post, '/api/v1/jobs/queue', {
'target': target_id.str()
'arch': arch
'force': force.str()

View File

@ -7,27 +7,27 @@ import time
// get_build_logs returns all build logs.
pub fn (c &Client) get_build_logs(filter BuildLogFilter) ![]BuildLog {
params := models.params_from(filter)
data := c.send_request[[]BuildLog](.get, '/api/v1/logs', params)!
data := c.send_request<[]BuildLog>(.get, '/api/v1/logs', params)!
return data.data
}
// get_build_log returns a specific build log.
pub fn (c &Client) get_build_log(id int) !BuildLog {
data := c.send_request[BuildLog](.get, '/api/v1/logs/${id}', {})!
data := c.send_request<BuildLog>(.get, '/api/v1/logs/$id', {})!
return data.data
}
// get_build_log_content returns the contents of the build log file.
pub fn (c &Client) get_build_log_content(id int) !string {
data := c.send_request_raw_response(.get, '/api/v1/logs/${id}/content', {}, '')!
data := c.send_request_raw_response(.get, '/api/v1/logs/$id/content', {}, '')!
return data
}
// add_build_log adds a new build log to the server.
pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) !Response[int] {
pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) !Response<int> {
params := {
'target': target_id.str()
'startTime': start_time.unix_time().str()
@ -36,12 +36,12 @@ pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time t
'exitCode': exit_code.str()
}
data := c.send_request_with_body[int](.post, '/api/v1/logs', params, content)!
data := c.send_request_with_body<int>(.post, '/api/v1/logs', params, content)!
return data
}
// remove_build_log removes the build log with the given id from the server.
pub fn (c &Client) remove_build_log(id int) ! {
c.send_request[string](.delete, '/api/v1/logs/${id}', {})!
c.send_request<string>(.delete, '/api/v1/logs/$id', {})!
}

View File

@ -2,15 +2,15 @@ module client
// remove_repo removes an entire repository.
pub fn (c &Client) remove_repo(repo string) ! {
c.send_request[string](.delete, '/${repo}', {})!
c.send_request<string>(.delete, '/$repo', {})!
}
// remove_arch_repo removes an entire arch-repo.
pub fn (c &Client) remove_arch_repo(repo string, arch string) ! {
c.send_request[string](.delete, '/${repo}/${arch}', {})!
c.send_request<string>(.delete, '/$repo/$arch', {})!
}
// remove_package removes a single package from the given arch-repo.
pub fn (c &Client) remove_package(repo string, arch string, pkgname string) ! {
c.send_request[string](.delete, '/${repo}/${arch}/${pkgname}', {})!
c.send_request<string>(.delete, '/$repo/$arch/$pkgname', {})!
}

View File

@ -5,7 +5,7 @@ import models { Target, TargetFilter }
// get_targets returns a list of targets, given a filter object.
pub fn (c &Client) get_targets(filter TargetFilter) ![]Target {
params := models.params_from(filter)
data := c.send_request[[]Target](.get, '/api/v1/targets', params)!
data := c.send_request<[]Target>(.get, '/api/v1/targets', params)!
return data.data
}
@ -33,7 +33,7 @@ pub fn (c &Client) get_all_targets() ![]Target {
// get_target returns the target for a specific id.
pub fn (c &Client) get_target(id int) !Target {
data := c.send_request[Target](.get, '/api/v1/targets/${id}', {})!
data := c.send_request<Target>(.get, '/api/v1/targets/$id', {})!
return data.data
}
@ -49,15 +49,15 @@ pub struct NewTarget {
// add_target adds a new target to the server.
pub fn (c &Client) add_target(t NewTarget) !int {
params := models.params_from[NewTarget](t)
data := c.send_request[int](.post, '/api/v1/targets', params)!
params := models.params_from<NewTarget>(t)
data := c.send_request<int>(.post, '/api/v1/targets', params)!
return data.data
}
// remove_target removes the target with the given id from the server.
pub fn (c &Client) remove_target(id int) !string {
data := c.send_request[string](.delete, '/api/v1/targets/${id}', {})!
data := c.send_request<string>(.delete, '/api/v1/targets/$id', {})!
return data.data
}
@ -65,7 +65,7 @@ pub fn (c &Client) remove_target(id int) !string {
// patch_target sends a PATCH request to the given target with the params as
// payload.
pub fn (c &Client) patch_target(id int, params map[string]string) !string {
data := c.send_request[string](.patch, '/api/v1/targets/${id}', params)!
data := c.send_request<string>(.patch, '/api/v1/targets/$id', params)!
return data.data
}

View File

@ -36,24 +36,24 @@ pub fn cmd() cli.Command {
required_args: 2
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
c := aur.new()
pkgs := c.info(cmd.args[1..])!
vc := client.new(conf_.address, conf_.api_key)
vc := client.new(conf.address, conf.api_key)
for pkg in pkgs {
vc.add_target(
kind: 'git'
url: 'https://aur.archlinux.org/${pkg.package_base}' + '.git'
url: 'https://aur.archlinux.org/$pkg.package_base' + '.git'
repo: cmd.args[0]
) or {
println('Failed to add ${pkg.name}: ${err.msg()}')
println('Failed to add $pkg.name: $err.msg()')
continue
}
println('Added ${pkg.name}' + '.')
println('Added $pkg.name' + '.')
}
}
},

View File

@ -74,7 +74,7 @@ pub fn cmd() cli.Command {
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
mut filter := BuildLogFilter{}
@ -146,7 +146,7 @@ pub fn cmd() cli.Command {
raw := cmd.flags.get_bool('raw')!
list(conf_, filter, raw)!
list(conf, filter, raw)!
}
},
cli.Command{
@ -156,9 +156,9 @@ pub fn cmd() cli.Command {
description: 'Remove a build log that matches the given id.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
remove(conf_, cmd.args[0])!
remove(conf, cmd.args[0])!
}
},
cli.Command{
@ -168,10 +168,10 @@ pub fn cmd() cli.Command {
description: 'Show all info for a specific build log.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
id := cmd.args[0].int()
info(conf_, id)!
info(conf, id)!
}
},
cli.Command{
@ -181,10 +181,10 @@ pub fn cmd() cli.Command {
description: 'Output the content of a build log to stdout.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
id := cmd.args[0].int()
content(conf_, id)!
content(conf, id)!
}
},
]
@ -204,16 +204,16 @@ fn print_log_list(logs []BuildLog, raw bool) ! {
}
// list prints a list of all build logs.
fn list(conf_ Config, filter BuildLogFilter, raw bool) ! {
c := client.new(conf_.address, conf_.api_key)
fn list(conf Config, filter BuildLogFilter, raw bool) ! {
c := client.new(conf.address, conf.api_key)
logs := c.get_build_logs(filter)!
print_log_list(logs, raw)!
}
// info print the detailed info for a given build log.
fn info(conf_ Config, id int) ! {
c := client.new(conf_.address, conf_.api_key)
fn info(conf Config, id int) ! {
c := client.new(conf.address, conf.api_key)
log := c.get_build_log(id)!
print(log)
@ -221,15 +221,15 @@ fn info(conf_ Config, id int) ! {
// content outputs the contents of the log file for a given build log to
// stdout.
fn content(conf_ Config, id int) ! {
c := client.new(conf_.address, conf_.api_key)
fn content(conf Config, id int) ! {
c := client.new(conf.address, conf.api_key)
content := c.get_build_log_content(id)!
println(content)
}
// remove removes a build log from the server's list.
fn remove(conf_ Config, id string) ! {
c := client.new(conf_.address, conf_.api_key)
fn remove(conf Config, id string) ! {
c := client.new(conf.address, conf.api_key)
c.remove_build_log(id.int())!
}

View File

@ -28,7 +28,7 @@ pub fn cmd() cli.Command {
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
if cmd.args.len < 3 {
if !cmd.flags.get_bool('force')! {
@ -36,14 +36,14 @@ pub fn cmd() cli.Command {
}
}
client_ := client.new(conf_.address, conf_.api_key)
client := client.new(conf.address, conf.api_key)
if cmd.args.len == 1 {
client_.remove_repo(cmd.args[0])!
client.remove_repo(cmd.args[0])!
} else if cmd.args.len == 2 {
client_.remove_arch_repo(cmd.args[0], cmd.args[1])!
client.remove_arch_repo(cmd.args[0], cmd.args[1])!
} else {
client_.remove_package(cmd.args[0], cmd.args[1], cmd.args[2])!
client.remove_package(cmd.args[0], cmd.args[1], cmd.args[2])!
}
}
},

View File

@ -1,7 +1,7 @@
module schedule
import cli
import cron
import cron.expression { parse_expression }
import time
// cmd returns the cli submodule for previewing a cron schedule.
@ -19,10 +19,10 @@ pub fn cmd() cli.Command {
},
]
execute: fn (cmd cli.Command) ! {
ce := cron.parse_expression(cmd.args.join(' '))!
ce := parse_expression(cmd.args.join(' '))!
count := cmd.flags.get_int('count')!
for t in ce.next_n(time.now(), count) {
for t in ce.next_n(time.now(), count)! {
println(t)
}
}

View File

@ -6,7 +6,7 @@ import os
import build
// build locally builds the target with the given id.
fn build_target(conf Config, target_id int, force bool, timeout int) ! {
fn build(conf Config, target_id int, force bool) ! {
c := client.new(conf.address, conf.api_key)
target := c.get_target(target_id)!
@ -16,7 +16,7 @@ fn build_target(conf Config, target_id int, force bool, timeout int) ! {
image_id := build.create_build_image(conf.base_image)!
println('Running build...')
res := build.build_target(conf.address, conf.api_key, image_id, target, force, timeout)!
res := build.build_target(conf.address, conf.api_key, image_id, target, force)!
println('Removing build image...')
@ -26,7 +26,7 @@ fn build_target(conf Config, target_id int, force bool, timeout int) ! {
dd.close() or {}
}
dd.image_remove(image_id)!
dd.remove_image(image_id)!
println('Uploading logs to Vieter...')
c.add_build_log(target.id, res.start_time, res.end_time, build_arch, res.exit_code,

View File

@ -2,7 +2,7 @@ module targets
import cli
import conf as vconf
import cron
import cron.expression { parse_expression }
import client { NewTarget }
import console
import models { TargetFilter }
@ -40,21 +40,10 @@ pub fn cmd() cli.Command {
description: 'Only return targets that publish to this repo.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'query'
abbrev: 'q'
description: 'Search string to filter targets by.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'arch'
description: 'Only list targets that build for this arch.'
flag: cli.FlagType.string
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
mut filter := TargetFilter{}
@ -73,19 +62,9 @@ pub fn cmd() cli.Command {
filter.repo = repo
}
query := cmd.flags.get_string('query')!
if query != '' {
filter.query = query
}
arch := cmd.flags.get_string('arch')!
if arch != '' {
filter.arch = arch
}
raw := cmd.flags.get_bool('raw')!
list(conf_, filter, raw)!
list(conf, filter, raw)!
}
},
cli.Command{
@ -113,7 +92,7 @@ pub fn cmd() cli.Command {
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
t := NewTarget{
kind: cmd.flags.get_string('kind')!
@ -125,7 +104,7 @@ pub fn cmd() cli.Command {
raw := cmd.flags.get_bool('raw')!
add(conf_, t, raw)!
add(conf, t, raw)!
}
},
cli.Command{
@ -135,9 +114,9 @@ pub fn cmd() cli.Command {
description: 'Remove a target that matches the given id.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
remove(conf_, cmd.args[0])!
remove(conf, cmd.args[0])!
}
},
cli.Command{
@ -147,9 +126,9 @@ pub fn cmd() cli.Command {
description: 'Show detailed information for the target matching the id.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
info(conf_, cmd.args[0])!
info(conf, cmd.args[0])!
}
},
cli.Command{
@ -196,7 +175,7 @@ pub fn cmd() cli.Command {
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
found := cmd.flags.get_all_found()
@ -208,7 +187,7 @@ pub fn cmd() cli.Command {
}
}
patch(conf_, cmd.args[0], params)!
patch(conf, cmd.args[0], params)!
}
},
cli.Command{
@ -232,20 +211,13 @@ pub fn cmd() cli.Command {
description: 'Architecture to schedule build for. Required when using -remote.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'timeout'
description: 'After how many minutes to cancel the build. Only applies to local builds.'
flag: cli.FlagType.int
default_value: ['3600']
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
remote := cmd.flags.get_bool('remote')!
force := cmd.flags.get_bool('force')!
timeout := cmd.flags.get_int('timeout')!
target_id := cmd.args[0].int()
if remote {
@ -255,10 +227,10 @@ pub fn cmd() cli.Command {
return error('When scheduling the build remotely, you have to specify an architecture.')
}
c := client.new(conf_.address, conf_.api_key)
c := client.new(conf.address, conf.api_key)
c.queue_job(target_id, arch, force)!
} else {
build_target(conf_, target_id, force, timeout)!
build(conf, target_id, force)!
}
}
},
@ -267,8 +239,8 @@ pub fn cmd() cli.Command {
}
// list prints out a list of all repositories.
fn list(conf_ Config, filter TargetFilter, raw bool) ! {
c := client.new(conf_.address, conf_.api_key)
fn list(conf Config, filter TargetFilter, raw bool) ! {
c := client.new(conf.address, conf.api_key)
targets := c.get_targets(filter)!
data := targets.map([it.id.str(), it.kind, it.url, it.repo])
@ -280,40 +252,40 @@ fn list(conf_ Config, filter TargetFilter, raw bool) ! {
}
// add adds a new target to the server's list.
fn add(conf_ Config, t &NewTarget, raw bool) ! {
c := client.new(conf_.address, conf_.api_key)
fn add(conf Config, t &NewTarget, raw bool) ! {
c := client.new(conf.address, conf.api_key)
target_id := c.add_target(t)!
if raw {
println(target_id)
} else {
println('Target added with id ${target_id}')
println('Target added with id $target_id')
}
}
// remove removes a target from the server's list.
fn remove(conf_ Config, id string) ! {
c := client.new(conf_.address, conf_.api_key)
fn remove(conf Config, id string) ! {
c := client.new(conf.address, conf.api_key)
c.remove_target(id.int())!
}
// patch patches a given target with the provided params.
fn patch(conf_ Config, id string, params map[string]string) ! {
fn patch(conf Config, id string, params map[string]string) ! {
// We check the cron expression first because it's useless to send an
// invalid one to the server.
if 'schedule' in params && params['schedule'] != '' {
cron.parse_expression(params['schedule']) or {
return error('Invalid cron expression: ${err.msg()}')
parse_expression(params['schedule']) or {
return error('Invalid cron expression: $err.msg()')
}
}
c := client.new(conf_.address, conf_.api_key)
c := client.new(conf.address, conf.api_key)
c.patch_target(id.int(), params)!
}
// info shows detailed information for a given target.
fn info(conf_ Config, id string) ! {
c := client.new(conf_.address, conf_.api_key)
fn info(conf Config, id string) ! {
c := client.new(conf.address, conf.api_key)
target := c.get_target(id.int())!
println(target)
}

32
src/cron/cli.v 100644
View File

@ -0,0 +1,32 @@
module cron
import cli
import conf as vconf
struct Config {
pub:
log_level string = 'WARN'
api_key string
address string
data_dir string
base_image string = 'archlinux:base-devel'
max_concurrent_builds int = 1
api_update_frequency int = 15
image_rebuild_frequency int = 1440
// Replicates the behavior of the original cron system
global_schedule string = '0 3'
}
// cmd returns the cli module that handles the cron daemon.
pub fn cmd() cli.Command {
return cli.Command{
name: 'cron'
description: 'Start the cron service that periodically runs builds.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
cron(conf)!
}
}
}

33
src/cron/cron.v 100644
View File

@ -0,0 +1,33 @@
module cron
import log
import cron.daemon
import cron.expression
import os
const log_file_name = 'vieter.cron.log'
// cron starts a cron daemon & starts periodically scheduling builds.
pub fn cron(conf Config) ! {
// Configure logger
log_level := log.level_from_tag(conf.log_level) or {
return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
}
mut logger := log.Log{
level: log_level
}
log_file := os.join_path_single(conf.data_dir, cron.log_file_name)
logger.set_full_logpath(log_file)
logger.log_to_console_too()
ce := expression.parse_expression(conf.global_schedule) or {
return error('Error while parsing global cron expression: $err.msg()')
}
mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)!
d.run()
}

View File

@ -0,0 +1,115 @@
module daemon
import time
import sync.stdatomic
import build
import os
const (
build_empty = 0
build_running = 1
build_done = 2
)
// clean_finished_builds removes finished builds from the build slots & returns
// them.
fn (mut d Daemon) clean_finished_builds() []ScheduledBuild {
mut out := []ScheduledBuild{}
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
out << d.builds[i]
}
}
return out
}
// update_builds starts as many builds as possible.
fn (mut d Daemon) start_new_builds() {
now := time.now()
for d.queue.len() > 0 {
elem := d.queue.peek() or {
d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
break
}
if elem.timestamp < now {
sb := d.queue.pop() or {
d.lerror("queue.pop() unexpectedly returned an error. This shouldn't happen.")
break
}
// If this build couldn't be scheduled, no more will be possible.
if !d.start_build(sb) {
d.queue.insert(sb)
break
}
} else {
break
}
}
}
// start_build starts a build for the given ScheduledBuild object.
fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty {
stdatomic.store_u64(&d.atomics[i], daemon.build_running)
d.builds[i] = sb
go d.run_build(i, sb)
return true
}
}
return false
}
// run_build actually starts the build process for a given target.
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
d.linfo('started build: $sb.target.url -> $sb.target.repo')
// 0 means success, 1 means failure
mut status := 0
res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.target, false) or {
d.ldebug('build_target error: $err.msg()')
status = 1
build.BuildResult{}
}
if status == 0 {
d.linfo('finished build: $sb.target.url -> $sb.target.repo; uploading logs...')
build_arch := os.uname().machine
d.client.add_build_log(sb.target.id, res.start_time, res.end_time, build_arch,
res.exit_code, res.logs) or {
d.lerror('Failed to upload logs for build: $sb.target.url -> $sb.target.repo')
}
} else {
d.linfo('an error occured during build: $sb.target.url -> $sb.target.repo')
}
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
}
// current_build_count returns how many builds are currently running.
fn (mut d Daemon) current_build_count() int {
mut res := 0
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_running {
res += 1
}
}
return res
}

View File

@ -0,0 +1,274 @@
module daemon
import time
import log
import datatypes { MinHeap }
import cron.expression { CronExpression, parse_expression }
import math
import build
import docker
import os
import client
import models { Target }
const (
// How many seconds to wait before retrying to update API if failed
api_update_retry_timeout = 5
// How many seconds to wait before retrying to rebuild image if failed
rebuild_base_image_retry_timout = 30
)
struct ScheduledBuild {
pub:
target Target
timestamp time.Time
}
// Overloaded operator for comparing ScheduledBuild objects
fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
return r1.timestamp < r2.timestamp
}
pub struct Daemon {
mut:
client client.Client
base_image string
builder_images []string
global_schedule CronExpression
api_update_frequency int
image_rebuild_frequency int
// Targets currently loaded from API.
targets []Target
// At what point to update the list of targets.
api_update_timestamp time.Time
image_build_timestamp time.Time
queue MinHeap<ScheduledBuild>
// Which builds are currently running
builds []ScheduledBuild
// Atomic variables used to detect when a build has finished; length is the
// same as builds
atomics []u64
logger shared log.Log
}
// init_daemon initializes a new Daemon object. It renews the targets &
// populates the build queue for the first time.
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) !Daemon {
mut d := Daemon{
client: client.new(address, api_key)
base_image: base_image
global_schedule: global_schedule
api_update_frequency: api_update_frequency
image_rebuild_frequency: image_rebuild_frequency
atomics: []u64{len: max_concurrent_builds}
builds: []ScheduledBuild{len: max_concurrent_builds}
logger: logger
}
// Initialize the targets & queue
d.renew_targets()
d.renew_queue()
if !d.rebuild_base_image() {
return error('The base image failed to build. The Vieter cron daemon cannot run without an initial builder image.')
}
return d
}
// run starts the actual daemon process. It runs builds when possible &
// periodically refreshes the list of targets to ensure we stay in sync.
pub fn (mut d Daemon) run() {
for {
finished_builds := d.clean_finished_builds()
// Update the API's contents if needed & renew the queue
if time.now() >= d.api_update_timestamp {
d.renew_targets()
d.renew_queue()
}
// The finished builds should only be rescheduled if the API contents
// haven't been renewed.
else {
for sb in finished_builds {
d.schedule_build(sb.target)
}
}
// TODO remove old builder images.
// This issue is less trivial than it sounds, because a build could
// still be running when the image has to be rebuilt. That would
// prevent the image from being removed. Therefore, we will need to
// keep track of a list or something & remove an image once we have
// made sure it isn't being used anymore.
if time.now() >= d.image_build_timestamp {
d.rebuild_base_image()
// In theory, executing this function here allows an old builder
// image to exist for at most image_rebuild_frequency minutes.
d.clean_old_base_images()
}
// Schedules new builds when possible
d.start_new_builds()
// If there are builds currently running, the daemon should refresh
// every second to clean up any finished builds & start new ones.
mut delay := time.Duration(1 * time.second)
// Sleep either until we have to refresh the targets or when the next
// build has to start, with a minimum of 1 second.
if d.current_build_count() == 0 {
now := time.now()
delay = d.api_update_timestamp - now
if d.queue.len() > 0 {
elem := d.queue.peek() or {
d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
// This is just a fallback option. In theory, queue.peek()
// should *never* return an error or none, because we check
// its len beforehand.
time.sleep(1)
continue
}
time_until_next_job := elem.timestamp - now
delay = math.min(delay, time_until_next_job)
}
}
// We sleep for at least one second. This is to prevent the program
// from looping agressively when a cronjob can be scheduled, but
// there's no spots free for it to be started.
delay = math.max(delay, 1 * time.second)
d.ldebug('Sleeping for ${delay}...')
time.sleep(delay)
}
}
// schedule_build adds the next occurence of the given targets build to the
// queue.
fn (mut d Daemon) schedule_build(target Target) {
ce := if target.schedule != '' {
parse_expression(target.schedule) or {
// TODO This shouldn't return an error if the expression is empty.
d.lerror("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()")
d.global_schedule
}
} else {
d.global_schedule
}
// A target that can't be scheduled will just be skipped for now
timestamp := ce.next_from_now() or {
d.lerror("Couldn't calculate next timestamp from '$target.schedule'; skipping")
return
}
d.queue.insert(ScheduledBuild{
target: target
timestamp: timestamp
})
}
// renew_targets requests the newest list of targets from the server & replaces
// the old one.
fn (mut d Daemon) renew_targets() {
d.linfo('Renewing targets...')
mut new_targets := d.client.get_all_targets() or {
d.lerror('Failed to renew targets. Retrying in ${daemon.api_update_retry_timeout}s...')
d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout)
return
}
// Filter out any targets that shouldn't run on this architecture
cur_arch := os.uname().machine
new_targets = new_targets.filter(it.arch.any(it.value == cur_arch))
d.targets = new_targets
d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency)
}
// renew_queue replaces the old queue with a new one that reflects the newest
// values in targets.
fn (mut d Daemon) renew_queue() {
d.linfo('Renewing queue...')
mut new_queue := MinHeap<ScheduledBuild>{}
// Move any jobs that should have already started from the old queue onto
// the new one
now := time.now()
// For some reason, using
// ```v
// for d.queue.len() > 0 && d.queue.peek() !.timestamp < now {
//```
// here causes the function to prematurely just exit, without any errors or anything, very weird
// https://github.com/vlang/v/issues/14042
for d.queue.len() > 0 {
elem := d.queue.pop() or {
d.lerror("queue.pop() returned an error. This shouldn't happen.")
continue
}
if elem.timestamp < now {
new_queue.insert(elem)
} else {
break
}
}
d.queue = new_queue
// For each target in targets, parse their cron expression (or use the
// default one if not present) & add them to the queue
for target in d.targets {
d.schedule_build(target)
}
}
// rebuild_base_image recreates the builder image.
fn (mut d Daemon) rebuild_base_image() bool {
d.linfo('Rebuilding builder image....')
d.builder_images << build.create_build_image(d.base_image) or {
d.lerror('Failed to rebuild base image. Retrying in ${daemon.rebuild_base_image_retry_timout}s...')
d.image_build_timestamp = time.now().add_seconds(daemon.rebuild_base_image_retry_timout)
return false
}
d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency)
return true
}
// clean_old_base_images tries to remove any old but still present builder
// images.
fn (mut d Daemon) clean_old_base_images() {
mut i := 0
mut dd := docker.new_conn() or {
d.lerror('Failed to connect to Docker socket.')
return
}
defer {
dd.close() or {}
}
for i < d.builder_images.len - 1 {
// For each builder image, we try to remove it by calling the Docker
// API. If the function returns an error or false, that means the image
// wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't
// move the index.
dd.remove_image(d.builder_images[i]) or { i += 1 }
}
}

View File

@ -0,0 +1,35 @@
module daemon
import log
// log reate a log message with the given level
pub fn (mut d Daemon) log(msg string, level log.Level) {
lock d.logger {
d.logger.send_output(msg, level)
}
}
// lfatal create a log message with the fatal level
pub fn (mut d Daemon) lfatal(msg string) {
d.log(msg, log.Level.fatal)
}
// lerror create a log message with the error level
pub fn (mut d Daemon) lerror(msg string) {
d.log(msg, log.Level.error)
}
// lwarn create a log message with the warn level
pub fn (mut d Daemon) lwarn(msg string) {
d.log(msg, log.Level.warn)
}
// linfo create a log message with the info level
pub fn (mut d Daemon) linfo(msg string) {
d.log(msg, log.Level.info)
}
// ldebug create a log message with the debug level
pub fn (mut d Daemon) ldebug(msg string) {
d.log(msg, log.Level.debug)
}

View File

@ -1,101 +0,0 @@
module cron
#flag -I @VMODROOT/libvieter/include
#flag -L @VMODROOT/libvieter/build
#flag -lvieter
#include "vieter_cron.h"
[typedef]
pub struct C.vieter_cron_expression {
minutes &u8
hours &u8
days &u8
months &u8
minute_count u8
hour_count u8
day_count u8
month_count u8
}
pub type Expression = C.vieter_cron_expression
// == returns whether the two expressions are equal by value.
fn (ce1 Expression) == (ce2 Expression) bool {
if ce1.month_count != ce2.month_count || ce1.day_count != ce2.day_count
|| ce1.hour_count != ce2.hour_count || ce1.minute_count != ce2.minute_count {
return false
}
for i in 0 .. ce1.month_count {
unsafe {
if ce1.months[i] != ce2.months[i] {
return false
}
}
}
for i in 0 .. ce1.day_count {
unsafe {
if ce1.days[i] != ce2.days[i] {
return false
}
}
}
for i in 0 .. ce1.hour_count {
unsafe {
if ce1.hours[i] != ce2.hours[i] {
return false
}
}
}
for i in 0 .. ce1.minute_count {
unsafe {
if ce1.minutes[i] != ce2.minutes[i] {
return false
}
}
}
return true
}
[typedef]
struct C.vieter_cron_simple_time {
year int
month int
day int
hour int
minute int
}
type SimpleTime = C.vieter_cron_simple_time
enum ParseError as u8 {
ok = 0
invalid_expression = 1
invalid_number = 2
out_of_range = 3
too_many_parts = 4
not_enough_parts = 5
}
// str returns the string representation of a ParseError.
fn (e ParseError) str() string {
return match e {
.ok { '' }
.invalid_expression { 'Invalid expression' }
.invalid_number { 'Invalid number' }
.out_of_range { 'Out of range' }
.too_many_parts { 'Too many parts' }
.not_enough_parts { 'Not enough parts' }
}
}
fn C.vieter_cron_expr_init() &C.vieter_cron_expression
fn C.vieter_cron_expr_free(ce &C.vieter_cron_expression)
fn C.vieter_cron_expr_next(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression, ref &C.vieter_cron_simple_time)
fn C.vieter_cron_expr_next_from_now(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression)
fn C.vieter_cron_expr_parse(out &C.vieter_cron_expression, s &char) ParseError

View File

@ -1,73 +0,0 @@
module cron
import time
// free the memory associated with the Expression.
[unsafe]
pub fn (ce &Expression) free() {
C.vieter_cron_expr_free(ce)
}
// parse_expression parses a string into an Expression.
pub fn parse_expression(exp string) !&Expression {
out := C.vieter_cron_expr_init()
res := C.vieter_cron_expr_parse(out, exp.str)
if res != .ok {
return error(res.str())
}
return out
}
// next calculates the next occurence of the cron schedule, given a reference
// point.
pub fn (ce &Expression) next(ref time.Time) time.Time {
st := SimpleTime{
year: ref.year
month: ref.month
day: ref.day
hour: ref.hour
minute: ref.minute
}
out := SimpleTime{}
C.vieter_cron_expr_next(&out, ce, &st)
return time.new_time(time.Time{
year: out.year
month: out.month
day: out.day
hour: out.hour
minute: out.minute
})
}
// next_from_now calculates the next occurence of the cron schedule with the
// current time as reference.
pub fn (ce &Expression) next_from_now() time.Time {
out := SimpleTime{}
C.vieter_cron_expr_next_from_now(&out, ce)
return time.new_time(time.Time{
year: out.year
month: out.month
day: out.day
hour: out.hour
minute: out.minute
})
}
// next_n returns the n next occurences of the expression, given a starting
// time.
pub fn (ce &Expression) next_n(ref time.Time, n int) []time.Time {
mut times := []time.Time{cap: n}
times << ce.next(ref)
for i in 1 .. n {
times << ce.next(times[i - 1])
}
return times
}

View File

@ -0,0 +1,136 @@
module expression
import time
pub struct CronExpression {
minutes []int
hours []int
days []int
months []int
}
// next calculates the earliest time this cron expression is valid. It will
// always pick a moment in the future, even if ref matches completely up to the
// minute. This function conciously does not take gap years into account.
pub fn (ce &CronExpression) next(ref time.Time) !time.Time {
// If the given ref matches the next cron occurence up to the minute, it
// will return that value. Because we always want to return a value in the
// future, we artifically shift the ref 60 seconds to make sure we always
// match in the future. A shift of 60 seconds is enough because the cron
// expression does not allow for accuracy smaller than one minute.
sref := ref
// For all of these values, the rule is the following: if their value is
// the length of their respective array in the CronExpression object, that
// means we've looped back around. This means that the "bigger" value has
// to be incremented by one. For example, if the minutes have looped
// around, that means that the hour has to be incremented as well.
mut minute_index := 0
mut hour_index := 0
mut day_index := 0
mut month_index := 0
// This chain is the same logic multiple times, namely that if a "bigger"
// value loops around, then the smaller value will always reset as well.
// For example, if we're going to a new day, the hour & minute will always
// be their smallest value again.
for month_index < ce.months.len && sref.month > ce.months[month_index] {
month_index++
}
if month_index < ce.months.len && sref.month == ce.months[month_index] {
for day_index < ce.days.len && sref.day > ce.days[day_index] {
day_index++
}
if day_index < ce.days.len && ce.days[day_index] == sref.day {
for hour_index < ce.hours.len && sref.hour > ce.hours[hour_index] {
hour_index++
}
if hour_index < ce.hours.len && ce.hours[hour_index] == sref.hour {
// Minute is the only value where we explicitely make sure we
// can't match sref's value exactly. This is to ensure we only
// return values in the future.
for minute_index < ce.minutes.len && sref.minute >= ce.minutes[minute_index] {
minute_index++
}
}
}
}
// Here, we increment the "bigger" values by one if the smaller ones loop
// around. The order is important, as it allows a sort-of waterfall effect
// to occur which updates all values if required.
if minute_index == ce.minutes.len && hour_index < ce.hours.len {
hour_index += 1
}
if hour_index == ce.hours.len && day_index < ce.days.len {
day_index += 1
}
if day_index == ce.days.len && month_index < ce.months.len {
month_index += 1
}
mut minute := ce.minutes[minute_index % ce.minutes.len]
mut hour := ce.hours[hour_index % ce.hours.len]
mut day := ce.days[day_index % ce.days.len]
// Sometimes, we end up with a day that does not exist within the selected
// month, e.g. day 30 in February. When this occurs, we reset day back to
// the smallest value & loop over to the next month that does have this
// day.
if day > time.month_days[ce.months[month_index % ce.months.len] - 1] {
day = ce.days[0]
month_index += 1
for day > time.month_days[ce.months[month_index & ce.months.len] - 1] {
month_index += 1
// If for whatever reason the day value ends up being something
// that can't be scheduled in any month, we have to make sure we
// don't create an infinite loop.
if month_index == 2 * ce.months.len {
return error('No schedulable moment.')
}
}
}
month := ce.months[month_index % ce.months.len]
mut year := sref.year
// If the month loops over, we need to increment the year.
if month_index >= ce.months.len {
year++
}
return time.new_time(time.Time{
year: year
month: month
day: day
minute: minute
hour: hour
})
}
// next_from_now returns the result of ce.next(ref) where ref is the result of
// time.now().
pub fn (ce &CronExpression) next_from_now() !time.Time {
return ce.next(time.now())
}
// next_n returns the n next occurences of the expression, given a starting
// time.
pub fn (ce &CronExpression) next_n(ref time.Time, n int) ![]time.Time {
mut times := []time.Time{cap: n}
times << ce.next(ref)!
for i in 1 .. n {
times << ce.next(times[i - 1])!
}
return times
}

View File

@ -0,0 +1,146 @@
module expression
import bitfield
// parse_range parses a given string into a range of sorted integers. Its
// result is a BitField with set bits for all numbers in the result.
fn parse_range(s string, min int, max int) !bitfield.BitField {
mut start := min
mut end := max
mut interval := 1
mut bf := bitfield.new(max - min + 1)
exps := s.split('/')
if exps.len > 2 {
return error('Invalid expression.')
}
if exps[0] != '*' {
dash_parts := exps[0].split('-')
if dash_parts.len > 2 {
return error('Invalid expression.')
}
start = dash_parts[0].int()
// The builtin parsing functions return zero if the string can't be
// parsed into a number, so we have to explicitely check whether they
// actually entered zero or if it's an invalid number.
if start == 0 && dash_parts[0] != '0' {
return error('Invalid number.')
}
// Check whether the start value is out of range
if start < min || start > max {
return error('Out of range.')
}
if dash_parts.len == 2 {
end = dash_parts[1].int()
if end == 0 && dash_parts[1] != '0' {
return error('Invalid number.')
}
if end < start || end > max {
return error('Out of range.')
}
}
}
if exps.len > 1 {
interval = exps[1].int()
// interval being zero is always invalid, but we want to check why
// it's invalid for better error messages.
if interval == 0 {
if exps[1] != '0' {
return error('Invalid number.')
} else {
return error('Step size zero not allowed.')
}
}
if interval > max - min {
return error('Step size too large.')
}
}
// Here, s solely consists of a number, so that's the only value we
// should return.
else if exps[0] != '*' && !exps[0].contains('-') {
bf.set_bit(start - min)
return bf
}
for start <= end {
bf.set_bit(start - min)
start += interval
}
return bf
}
// bf_to_ints takes a BitField and converts it into the expected list of actual
// integers.
fn bf_to_ints(bf bitfield.BitField, min int) []int {
mut out := []int{}
for i in 0 .. bf.get_size() {
if bf.get_bit(i) == 1 {
out << min + i
}
}
return out
}
// parse_part parses a given part of a cron expression & returns the
// corresponding array of ints.
fn parse_part(s string, min int, max int) ![]int {
mut bf := bitfield.new(max - min + 1)
for range in s.split(',') {
bf2 := parse_range(range, min, max)!
bf = bitfield.bf_or(bf, bf2)
}
return bf_to_ints(bf, min)
}
// parse_expression parses an entire cron expression string into a
// CronExpression object, if possible.
pub fn parse_expression(exp string) !CronExpression {
// The filter allows for multiple spaces between parts
mut parts := exp.split(' ').filter(it != '')
if parts.len < 2 || parts.len > 4 {
return error('Expression must contain between 2 and 4 space-separated parts.')
}
// For ease of use, we allow the user to only specify as many parts as they
// need.
for parts.len < 4 {
parts << '*'
}
mut part_results := [][]int{}
mins := [0, 0, 1, 1]
maxs := [59, 23, 31, 12]
// This for loop allows us to more clearly propagate the error to the user.
for i, min in mins {
part_results << parse_part(parts[i], min, maxs[i]) or {
return error('An error occurred with part $i: $err.msg()')
}
}
return CronExpression{
minutes: part_results[0]
hours: part_results[1]
days: part_results[2]
months: part_results[3]
}
}

View File

@ -0,0 +1,89 @@
module expression
// parse_range_error returns the returned error message. If the result is '',
// that means the function didn't error.
fn parse_range_error(s string, min int, max int) string {
parse_range(s, min, max) or { return err.msg }
return ''
}
// =====parse_range=====
fn test_range_star_range() ! {
bf := parse_range('*', 0, 5)!
assert bf_to_ints(bf, 0) == [0, 1, 2, 3, 4, 5]
}
fn test_range_number() ! {
bf := parse_range('4', 0, 5)!
assert bf_to_ints(bf, 0) == [4]
}
fn test_range_number_too_large() ! {
assert parse_range_error('10', 0, 6) == 'Out of range.'
}
fn test_range_number_too_small() ! {
assert parse_range_error('0', 2, 6) == 'Out of range.'
}
fn test_range_number_invalid() ! {
assert parse_range_error('x', 0, 6) == 'Invalid number.'
}
fn test_range_step_star_1() ! {
bf := parse_range('*/4', 0, 20)!
assert bf_to_ints(bf, 0) == [0, 4, 8, 12, 16, 20]
}
fn test_range_step_star_2() ! {
bf := parse_range('*/3', 1, 8)!
assert bf_to_ints(bf, 1) == [1, 4, 7]
}
fn test_range_step_star_too_large() ! {
assert parse_range_error('*/21', 0, 20) == 'Step size too large.'
}
fn test_range_step_zero() ! {
assert parse_range_error('*/0', 0, 20) == 'Step size zero not allowed.'
}
fn test_range_step_number() ! {
bf := parse_range('5/4', 2, 22)!
assert bf_to_ints(bf, 2) == [5, 9, 13, 17, 21]
}
fn test_range_step_number_too_large() ! {
assert parse_range_error('10/4', 0, 5) == 'Out of range.'
}
fn test_range_step_number_too_small() ! {
assert parse_range_error('2/4', 5, 10) == 'Out of range.'
}
fn test_range_dash() ! {
bf := parse_range('4-8', 0, 9)!
assert bf_to_ints(bf, 0) == [4, 5, 6, 7, 8]
}
fn test_range_dash_step() ! {
bf := parse_range('4-8/2', 0, 9)!
assert bf_to_ints(bf, 0) == [4, 6, 8]
}
// =====parse_part=====
fn test_part_single() ! {
assert parse_part('*', 0, 5)! == [0, 1, 2, 3, 4, 5]
}
fn test_part_multiple() ! {
assert parse_part('*/2,2/3', 1, 8)! == [1, 2, 3, 5, 7, 8]
}

View File

@ -1,4 +1,4 @@
module cron
module expression
import time { parse }
@ -7,7 +7,7 @@ fn util_test_time(exp string, t1_str string, t2_str string) ! {
t1 := parse(t1_str)!
t2 := parse(t2_str)!
t3 := ce.next(t1)
t3 := ce.next(t1)!
assert t2.year == t3.year
assert t2.month == t3.month
@ -18,18 +18,17 @@ fn util_test_time(exp string, t1_str string, t2_str string) ! {
fn test_next_simple() ! {
// Very simple
// util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')!
util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')!
// Overlap to next day
mut exp := '0 3 '
util_test_time(exp, '2002-01-01 03:00:00', '2002-01-02 03:00:00')!
util_test_time(exp, '2002-01-01 04:00:00', '2002-01-02 03:00:00')!
util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')!
util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')!
util_test_time('0 3-7/4,7-19', '2002-01-01 04:00:00', '2002-01-01 07:00:00')!
util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')!
//// Overlap to next month
// Overlap to next month
util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')!
//// Overlap to next year
// Overlap to next year
util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')!
}

View File

@ -1,42 +0,0 @@
module cron
fn test_not_allowed() {
illegal_expressions := [
'4 *-7',
'4 *-7/4',
'4 7/*',
'0 0 30 2',
'0 /5',
'0 ',
'0',
' 0',
' 0 ',
'1 2 3 4~9',
'1 1-3-5',
'0 5/2-5',
'',
'1 1/2/3',
'*5 8',
'x 8',
]
mut res := false
for exp in illegal_expressions {
res = false
parse_expression(exp) or { res = true }
assert res, "'${exp}' should produce an error"
}
}
fn test_auto_extend() ! {
ce1 := parse_expression('5 5')!
ce2 := parse_expression('5 5 *')!
ce3 := parse_expression('5 5 * *')!
assert ce1 == ce2 && ce2 == ce3
}
fn test_four() {
parse_expression('0 1 2 3 ') or { assert false }
}

View File

@ -1,6 +1,6 @@
module dbms
module db
import db.sqlite
import sqlite
import time
pub struct VieterDb {
@ -49,13 +49,13 @@ pub fn init(db_path string) !VieterDb {
}
// Apply each migration in order
for i in cur_version.version .. dbms.migrations_up.len {
migration := dbms.migrations_up[i].to_string()
for i in cur_version.version .. db.migrations_up.len {
migration := db.migrations_up[i].to_string()
version_num := i + 1
// vfmt does not like these dots
println('Applying migration ${version_num}' + '...')
println('Applying migration $version_num' + '...')
// The sqlite library seems to not like it when multiple statements are
// passed in a single exec. Therefore, we split them & run them all
@ -64,7 +64,7 @@ pub fn init(db_path string) !VieterDb {
res := conn.exec_none(part)
if res != sqlite.sqlite_done {
return error('An error occurred while applying migration ${version_num}: SQLite error code ${res}')
return error('An error occurred while applying migration $version_num: SQLite error code $res')
}
}
@ -80,9 +80,9 @@ pub fn init(db_path string) !VieterDb {
}
}
// row_into[T] converts an sqlite.Row into a given type T by parsing each field
// row_into<T> converts an sqlite.Row into a given type T by parsing each field
// from a string according to its type.
pub fn row_into[T](row sqlite.Row) T {
pub fn row_into<T>(row sqlite.Row) T {
mut i := 0
mut out := T{}

View File

@ -1,4 +1,4 @@
module dbms
module db
import models { BuildLog, BuildLogFilter }
import time
@ -8,20 +8,20 @@ pub fn (db &VieterDb) get_build_logs(filter BuildLogFilter) []BuildLog {
mut where_parts := []string{}
if filter.target != 0 {
where_parts << 'target_id == ${filter.target}'
where_parts << 'target_id == $filter.target'
}
if filter.before != time.Time{} {
where_parts << 'start_time < ${filter.before.unix_time()}'
where_parts << 'start_time < $filter.before.unix_time()'
}
if filter.after != time.Time{} {
where_parts << 'start_time > ${filter.after.unix_time()}'
where_parts << 'start_time > $filter.after.unix_time()'
}
// NOTE: possible SQL injection
if filter.arch != '' {
where_parts << "arch == '${filter.arch}'"
where_parts << "arch == '$filter.arch'"
}
mut parts := []string{}
@ -30,27 +30,27 @@ pub fn (db &VieterDb) get_build_logs(filter BuildLogFilter) []BuildLog {
if exp[0] == `!` {
code := exp[1..].int()
parts << 'exit_code != ${code}'
parts << 'exit_code != $code'
} else {
code := exp.int()
parts << 'exit_code == ${code}'
parts << 'exit_code == $code'
}
}
if parts.len > 0 {
where_parts << parts.map('(${it})').join(' or ')
where_parts << parts.map('($it)').join(' or ')
}
mut where_str := ''
if where_parts.len > 0 {
where_str = 'where ' + where_parts.map('(${it})').join(' and ')
where_str = 'where ' + where_parts.map('($it)').join(' and ')
}
query := 'select * from BuildLog ${where_str} limit ${filter.limit} offset ${filter.offset}'
query := 'select * from BuildLog $where_str limit $filter.limit offset $filter.offset'
rows, _ := db.conn.exec(query)
res := rows.map(row_into[BuildLog](it))
res := rows.map(row_into<BuildLog>(it))
return res
}

View File

@ -1,6 +1,25 @@
module dbms
module db
import models { Target, TargetArch }
import models { Target, TargetArch, TargetFilter }
// get_targets returns all targets in the database.
pub fn (db &VieterDb) get_targets(filter TargetFilter) []Target {
// This seems to currently be blocked by a bug in the ORM, I'll have to ask
// around.
if filter.repo != '' {
res := sql db.conn {
select from Target where repo == filter.repo order by id limit filter.limit offset filter.offset
}
return res
}
res := sql db.conn {
select from Target order by id limit filter.limit offset filter.offset
}
return res
}
// get_target tries to return a specific target.
pub fn (db &VieterDb) get_target(target_id int) ?Target {
@ -49,13 +68,13 @@ pub fn (db &VieterDb) update_target(target_id int, params map[string]string) {
if field.name in params {
// Any fields that are array types require their own update method
$if field.typ is string {
values << "${field.name} = '${params[field.name]}'"
values << "$field.name = '${params[field.name]}'"
}
}
}
values_str := values.join(', ')
// I think this is actual SQL & not the ORM language
query := 'update Target set ${values_str} where id == ${target_id}'
query := 'update Target set $values_str where id == $target_id'
db.conn.exec_none(query)
}

View File

@ -1,129 +0,0 @@
module dbms
import models { Target, TargetFilter }
import db.sqlite
// Iterator providing a filtered view into the list of targets currently stored
// in the database. It replaces functionality usually performed in the database
// using SQL queries that can't currently be used due to missing stuff in V's
// ORM.
pub struct TargetsIterator {
conn sqlite.DB
filter TargetFilter
window_size int = 32
mut:
window []Target
window_index u64
// Offset in entire list of unfiltered targets
offset int
// Offset in filtered list of targets
filtered_offset u64
started bool
done bool
}
// targets returns an iterator allowing filtered access to the list of targets.
pub fn (db &VieterDb) targets(filter TargetFilter) TargetsIterator {
window_size := 32
return TargetsIterator{
conn: db.conn
filter: filter
window: []Target{cap: window_size}
window_size: window_size
}
}
// advance_window moves the sliding window over the filtered list of targets
// until it either reaches the end of the list of targets, or has encountered a
// non-empty window.
fn (mut ti TargetsIterator) advance_window() {
for {
ti.window = sql ti.conn {
select from Target order by id limit ti.window_size offset ti.offset
}
ti.offset += ti.window.len
if ti.window.len == 0 {
ti.done = true
return
}
if ti.filter.repo != '' {
ti.window = ti.window.filter(it.repo == ti.filter.repo)
}
if ti.filter.arch != '' {
ti.window = ti.window.filter(it.arch.any(it.value == ti.filter.arch))
}
if ti.filter.query != '' {
ti.window = ti.window.filter(it.url.contains(ti.filter.query)
|| it.path.contains(ti.filter.query) || it.branch.contains(ti.filter.query))
}
// We break out of the loop once we found a non-empty window
if ti.window.len > 0 {
break
}
}
}
// next returns the next target, if possible.
pub fn (mut ti TargetsIterator) next() ?Target {
if ti.done {
return none
}
// The first call to `next` will cause the sliding window to move to where
// the requested offset starts
if !ti.started {
ti.advance_window()
// Skip all matched targets until the requested offset
for !ti.done && ti.filtered_offset + u64(ti.window.len) <= ti.filter.offset {
ti.filtered_offset += u64(ti.window.len)
ti.advance_window()
}
if ti.done {
return none
}
left_inside_window := ti.filter.offset - ti.filtered_offset
ti.window_index = left_inside_window
ti.filtered_offset += left_inside_window
ti.started = true
}
return_value := ti.window[ti.window_index]
ti.window_index++
ti.filtered_offset++
// Next call will be past the requested offset
if ti.filter.limit > 0 && ti.filtered_offset == ti.filter.offset + ti.filter.limit {
ti.done = true
}
// Ensure the next call has a new valid window
if ti.window_index == u64(ti.window.len) {
ti.advance_window()
ti.window_index = 0
}
return return_value
}
// collect consumes the entire iterator & returns the result as an array.
pub fn (mut ti TargetsIterator) collect() []Target {
mut out := []Target{}
for t in ti {
out << t
}
return out
}

@ -1 +0,0 @@
Subproject commit 379a05a7b6b604c107360e0a679fb3ea5400e02c

View File

@ -9,6 +9,7 @@ import console.schedule
import console.man
import console.aur
import console.repos
import cron
import agent
fn main() {
@ -20,7 +21,7 @@ fn main() {
mut app := cli.Command{
name: 'vieter'
description: 'Vieter is a lightweight implementation of an Arch repository server.'
version: '0.6.0'
version: '0.5.0'
posix_mode: true
flags: [
cli.Flag{
@ -42,6 +43,7 @@ fn main() {
commands: [
server.cmd(),
targets.cmd(),
cron.cmd(),
logs.cmd(),
schedule.cmd(),
man.cmd(),

View File

@ -10,10 +10,9 @@ pub:
repo string
base_image string
force bool
timeout int
}
// str return a single-line string representation of a build log
pub fn (c BuildConfig) str() string {
return '{ target: ${c.target_id}, kind: ${c.kind}, url: ${c.url}, branch: ${c.branch}, path: ${c.path}, repo: ${c.repo}, base_image: ${c.base_image}, force: ${c.force}, timeout: ${c.timeout} }'
return '{ target: $c.target_id, kind: $c.kind, url: $c.url, branch: $c.branch, path: $c.path, repo: $c.repo, base_image: $c.base_image, force: $c.force }'
}

View File

@ -16,13 +16,13 @@ pub mut:
// str returns a string representation.
pub fn (bl &BuildLog) str() string {
mut parts := [
'id: ${bl.id}',
'target id: ${bl.target_id}',
'start time: ${bl.start_time.local()}',
'end time: ${bl.end_time.local()}',
'id: $bl.id',
'target id: $bl.target_id',
'start time: $bl.start_time.local()',
'end time: $bl.end_time.local()',
'duration: ${bl.end_time - bl.start_time}',
'arch: ${bl.arch}',
'exit code: ${bl.exit_code}',
'arch: $bl.arch',
'exit code: $bl.exit_code',
]
str := parts.join('\n')

View File

@ -2,19 +2,19 @@ module models
import time
// from_params[T] creates a new instance of T from the given map by parsing all
// from_params<T> creates a new instance of T from the given map by parsing all
// of its fields from the map.
pub fn from_params[T](params map[string]string) ?T {
pub fn from_params<T>(params map[string]string) ?T {
mut o := T{}
patch_from_params[T](mut o, params)?
patch_from_params<T>(mut o, params)?
return o
}
// patch_from_params[T] updates the given T object with the params defined in
// patch_from_params<T> updates the given T object with the params defined in
// the map.
pub fn patch_from_params[T](mut o T, params map[string]string) ? {
pub fn patch_from_params<T>(mut o T, params map[string]string) ? {
$for field in T.fields {
if field.name in params && params[field.name] != '' {
$if field.typ is string {
@ -36,8 +36,8 @@ pub fn patch_from_params[T](mut o T, params map[string]string) ? {
}
}
// params_from[T] converts a given T struct into a map of strings.
pub fn params_from[T](o &T) map[string]string {
// params_from<T> converts a given T struct into a map of strings.
pub fn params_from<T>(o &T) map[string]string {
mut out := map[string]string{}
$for field in T.fields {

View File

@ -38,13 +38,13 @@ pub mut:
// str returns a string representation.
pub fn (t &Target) str() string {
mut parts := [
'id: ${t.id}',
'kind: ${t.kind}',
'url: ${t.url}',
'branch: ${t.branch}',
'path: ${t.path}',
'repo: ${t.repo}',
'schedule: ${t.schedule}',
'id: $t.id',
'kind: $t.kind',
'url: $t.url',
'branch: $t.branch',
'path: $t.path',
'repo: $t.repo',
'schedule: $t.schedule',
'arch: ${t.arch.map(it.value).join(', ')}',
]
str := parts.join('\n')
@ -54,7 +54,7 @@ pub fn (t &Target) str() string {
// as_build_config converts a Target into a BuildConfig, given some extra
// needed information.
pub fn (t &Target) as_build_config(base_image string, force bool, timeout int) BuildConfig {
pub fn (t &Target) as_build_config(base_image string, force bool) BuildConfig {
return BuildConfig{
target_id: t.id
kind: t.kind
@ -64,7 +64,6 @@ pub fn (t &Target) as_build_config(base_image string, force bool, timeout int) B
repo: t.repo
base_image: base_image
force: force
timeout: timeout
}
}
@ -74,6 +73,4 @@ pub mut:
limit u64 = 25
offset u64
repo string
query string
arch string
}

View File

@ -3,14 +3,14 @@ module package
// format_entry returns a string properly formatted to be added to a desc file.
[inline]
fn format_entry(key string, value string) string {
return '\n%${key}%\n${value}\n'
return '\n%$key%\n$value\n'
}
// full_name returns the properly formatted name for the package, including
// version & architecture
pub fn (pkg &Pkg) full_name() string {
p := pkg.info
return '${p.name}-${p.version}-${p.arch}'
return '$p.name-$p.version-$p.arch'
}
// filename returns the correct filename of the package file
@ -20,10 +20,10 @@ pub fn (pkg &Pkg) filename() string {
1 { '.tar.gz' }
6 { '.tar.xz' }
14 { '.tar.zst' }
else { panic("Another compression code shouldn't be possible. Faulty code: ${pkg.compression}") }
else { panic("Another compression code shouldn't be possible. Faulty code: $pkg.compression") }
}
return '${pkg.full_name()}.pkg${ext}'
return '${pkg.full_name()}.pkg$ext'
}
// to_desc returns a desc file valid string representation
@ -31,7 +31,7 @@ pub fn (pkg &Pkg) to_desc() !string {
p := pkg.info
// filename
mut desc := '%FILENAME%\n${pkg.filename()}\n'
mut desc := '%FILENAME%\n$pkg.filename()\n'
desc += format_entry('NAME', p.name)
desc += format_entry('BASE', p.base)
@ -94,10 +94,10 @@ pub fn (pkg &Pkg) to_desc() !string {
desc += format_entry('CHECKDEPENDS', p.checkdepends.join_lines())
}
return '${desc}\n'
return '$desc\n'
}
// to_files returns a files file valid string representation
pub fn (pkg &Pkg) to_files() string {
return '%FILES%\n${pkg.files.join_lines()}\n'
return '%FILES%\n$pkg.files.join_lines()\n'
}

View File

@ -103,7 +103,7 @@ fn parse_pkg_info_string(pkg_info_str &string) !PkgInfo {
// NOTE: this command only supports zstd-, xz- & gzip-compressed tarballs.
pub fn read_pkg_archive(pkg_path string) !Pkg {
if !os.is_file(pkg_path) {
return error("'${pkg_path}' doesn't exist or isn't a file.")
return error("'$pkg_path' doesn't exist or isn't a file.")
}
a := C.archive_read_new()

View File

@ -31,15 +31,11 @@ pub:
// new creates a new RepoGroupManager & creates the directories as needed
pub fn new(repos_dir string, pkg_dir string, default_arch string) !RepoGroupManager {
if !os.is_dir(repos_dir) {
os.mkdir_all(repos_dir) or {
return error('Failed to create repos directory: ${err.msg()}')
}
os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') }
}
if !os.is_dir(pkg_dir) {
os.mkdir_all(pkg_dir) or {
return error('Failed to create package directory: ${err.msg()}')
}
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg()') }
}
return RepoGroupManager{
@ -55,7 +51,7 @@ pub fn new(repos_dir string, pkg_dir string, default_arch string) !RepoGroupMana
// the right subdirectories in r.pkg_dir if it was successfully added.
pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) !RepoAddResult {
pkg := package.read_pkg_archive(pkg_path) or {
return error('Failed to read package file: ${err.msg()}')
return error('Failed to read package file: $err.msg()')
}
archs := r.add_pkg_in_repo(repo, pkg)!
@ -133,7 +129,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]strin
// files, and afterwards updates the db & files archives to reflect these
// changes.
fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ! {
pkg_dir := os.join_path(r.repos_dir, repo, arch, '${pkg.info.name}-${pkg.info.version}')
pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version')
// Remove the previous version of the package, if present
r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)!

View File

@ -5,7 +5,7 @@ import os
// remove_pkg_from_arch_repo removes a package from an arch-repo's database. It
// returns false if the package wasn't present in the database. It also
// optionally re-syncs the repo archives.
pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, perform_sync bool) !bool {
pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) !bool {
repo_dir := os.join_path(r.repos_dir, repo, arch)
// If the repository doesn't exist yet, the result is automatically false
@ -39,7 +39,7 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string,
}
// Sync the db archives if requested
if perform_sync {
if sync {
r.sync(repo, arch)!
}

View File

@ -3,6 +3,7 @@ module server
import web
import net.urllib
import web.response { new_data_response, new_response }
import db
import time
import os
import util
@ -12,7 +13,7 @@ import models { BuildLog, BuildLogFilter }
// optionally be added to limit the list of build logs to that repository.
['/api/v1/logs'; auth; get; markused]
fn (mut app App) v1_get_logs() web.Result {
filter := models.from_params[BuildLogFilter](app.query) or {
filter := models.from_params<BuildLogFilter>(app.query) or {
return app.json(.bad_request, new_response('Invalid query parameters.'))
}
logs := app.db.get_build_logs(filter)
@ -100,7 +101,7 @@ fn (mut app App) v1_post_log() web.Result {
// Create the logs directory of it doesn't exist
if !os.exists(os.dir(log_file_path)) {
os.mkdir_all(os.dir(log_file_path)) or {
app.lerror('Error while creating log file: ${err.msg()}')
app.lerror('Error while creating log file: $err.msg()')
return app.status(.internal_server_error)
}
@ -108,7 +109,7 @@ fn (mut app App) v1_post_log() web.Result {
if length := app.req.header.get(.content_length) {
util.reader_to_file(mut app.reader, length.int(), log_file_path) or {
app.lerror('An error occured while receiving logs: ${err.msg()}')
app.lerror('An error occured while receiving logs: $err.msg()')
return app.status(.internal_server_error)
}
@ -126,7 +127,7 @@ fn (mut app App) v1_delete_log(id int) web.Result {
full_path := os.join_path(app.conf.data_dir, logs_dir_name, log.path())
os.rm(full_path) or {
app.lerror('Failed to remove log file ${full_path}: ${err.msg()}')
app.lerror('Failed to remove log file $full_path: $err.msg()')
return app.status(.internal_server_error)
}

View File

@ -1,19 +0,0 @@
module server
import metrics
import web
// v1_metrics serves a Prometheus-compatible metrics endpoint.
['/api/v1/metrics'; get; markused]
fn (mut app App) v1_metrics() web.Result {
if !app.conf.collect_metrics {
return app.status(.not_found)
}
mut exporter := metrics.new_prometheus_exporter()
exporter.load('vieter_', app.collector)
// TODO stream to connection instead
body := exporter.export_to_string() or { return app.status(.internal_server_error) }
return app.body(.ok, 'text/plain', body)
}

View File

@ -2,17 +2,18 @@ module server
import web
import web.response { new_data_response, new_response }
import db
import models { Target, TargetArch, TargetFilter }
// v1_get_targets returns the current list of targets.
['/api/v1/targets'; auth; get; markused]
fn (mut app App) v1_get_targets() web.Result {
filter := models.from_params[TargetFilter](app.query) or {
filter := models.from_params<TargetFilter>(app.query) or {
return app.json(.bad_request, new_response('Invalid query parameters.'))
}
mut iter := app.db.targets(filter)
targets := app.db.get_targets(filter)
return app.json(.ok, new_data_response(iter.collect()))
return app.json(.ok, new_data_response(targets))
}
// v1_get_single_target returns the information for a single target.
@ -34,7 +35,7 @@ fn (mut app App) v1_post_target() web.Result {
params['arch'] = app.conf.default_arch
}
mut new_target := models.from_params[Target](params) or {
mut new_target := models.from_params<Target>(params) or {
return app.json(.bad_request, new_response(err.msg()))
}

View File

@ -5,18 +5,16 @@ import conf as vconf
struct Config {
pub:
port int = 8000
log_level string = 'WARN'
pkg_dir string
data_dir string
api_key string
default_arch string
global_schedule string = '0 3'
base_image string = 'archlinux:base-devel'
max_log_age int [empty_default]
log_removal_schedule string = '0 0'
collect_metrics bool [empty_default]
default_build_timeout int = 3600
port int = 8000
log_level string = 'WARN'
pkg_dir string
data_dir string
api_key string
default_arch string
global_schedule string = '0 3'
base_image string = 'archlinux:base-devel'
max_log_age int [empty_default]
log_removal_schedule string = '0 0'
}
// cmd returns the cli submodule that handles starting the server
@ -26,9 +24,9 @@ pub fn cmd() cli.Command {
description: 'Start the Vieter server.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
server(conf_)!
server(conf)!
}
}
}

View File

@ -3,16 +3,20 @@ module server
import time
import models { BuildLog }
import os
import cron
import cron.expression { CronExpression }
const fallback_log_removal_frequency = 24 * time.hour
// log_removal_daemon removes old build logs every `log_removal_frequency`.
fn (mut app App) log_removal_daemon(schedule &cron.Expression) {
fn (mut app App) log_removal_daemon(schedule CronExpression) {
mut start_time := time.Time{}
for {
start_time = time.now()
mut too_old_timestamp := time.now().add_days(-app.conf.max_log_age)
app.linfo('Cleaning logs before ${too_old_timestamp}')
app.linfo('Cleaning logs before $too_old_timestamp')
mut logs := []BuildLog{}
mut counter := 0
@ -29,7 +33,7 @@ fn (mut app App) log_removal_daemon(schedule &cron.Expression) {
log_file_path := os.join_path(app.conf.data_dir, logs_dir_name, log.path())
os.rm(log_file_path) or {
app.lerror('Failed to remove log file ${log_file_path}: ${err.msg()}')
app.lerror('Failed to remove log file $log_file_path: $err.msg()')
failed += 1
continue
@ -44,10 +48,15 @@ fn (mut app App) log_removal_daemon(schedule &cron.Expression) {
}
}
app.linfo('Cleaned ${counter} logs (${failed} failed)')
app.linfo('Cleaned $counter logs ($failed failed)')
// Sleep until the next cycle
next_time := schedule.next_from_now()
next_time := schedule.next_from_now() or {
app.lerror("Log removal daemon couldn't calculate next time: $err.msg(); fallback to $server.fallback_log_removal_frequency")
start_time.add(server.fallback_log_removal_frequency)
}
time.sleep(next_time - time.now())
}
}

View File

@ -19,15 +19,15 @@ pub fn (mut app App) healthcheck() web.Result {
// repository's archives, but also package archives or the contents of a
// package's desc file.
['/:repo/:arch/:filename'; get; head; markused]
fn (mut app App) get_repo_file(repo_ string, arch string, filename string) web.Result {
fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Result {
mut full_path := ''
db_exts := ['.db', '.files', '.db.tar.gz', '.files.tar.gz']
// There's no point in having the ability to serve db archives with wrong
// filenames
if db_exts.any(filename == '${repo_}${it}') {
full_path = os.join_path(app.repo.repos_dir, repo_, arch, filename)
if db_exts.any(filename == '$repo$it') {
full_path = os.join_path(app.repo.repos_dir, repo, arch, filename)
// repo-add does this using symlinks, but we just change the requested
// path
@ -35,13 +35,13 @@ fn (mut app App) get_repo_file(repo_ string, arch string, filename string) web.R
full_path += '.tar.gz'
}
} else if filename.contains('.pkg') {
full_path = os.join_path(app.repo.pkg_dir, repo_, arch, filename)
full_path = os.join_path(app.repo.pkg_dir, repo, arch, filename)
}
// Default behavior is to return the desc file for the package, if present.
// This can then also be used by the build system to properly check whether
// a package is present in an arch-repo.
else {
full_path = os.join_path(app.repo.repos_dir, repo_, arch, filename, 'desc')
full_path = os.join_path(app.repo.repos_dir, repo, arch, filename, 'desc')
}
return app.file(full_path)
@ -49,10 +49,10 @@ fn (mut app App) get_repo_file(repo_ string, arch string, filename string) web.R
// put_package handles publishing a package to a repository.
['/:repo/publish'; auth; markused; post]
fn (mut app App) put_package(repo_ string) web.Result {
fn (mut app App) put_package(repo string) web.Result {
// api is a reserved keyword for api routes & should never be allowed to be
// a repository.
if repo_.to_lower() == 'api' {
if repo.to_lower() == 'api' {
return app.json(.bad_request, new_response("'api' is a reserved keyword & cannot be used as a repository name."))
}
@ -62,19 +62,19 @@ fn (mut app App) put_package(repo_ string) web.Result {
// Generate a random filename for the temp file
pkg_path = os.join_path_single(app.repo.pkg_dir, rand.uuid_v4())
app.ldebug("Uploading ${length} bytes (${util.pretty_bytes(length.int())}) to '${pkg_path}'.")
app.ldebug("Uploading $length bytes (${util.pretty_bytes(length.int())}) to '$pkg_path'.")
// This is used to time how long it takes to upload a file
mut sw := time.new_stopwatch(time.StopWatchOptions{ auto_start: true })
util.reader_to_file(mut app.reader, length.int(), pkg_path) or {
app.lwarn("Failed to upload '${pkg_path}': ${err.msg()}")
app.lwarn("Failed to upload '$pkg_path'")
return app.status(.internal_server_error)
}
sw.stop()
app.ldebug("Upload of '${pkg_path}' completed in ${sw.elapsed().seconds():.3}s.")
app.ldebug("Upload of '$pkg_path' completed in ${sw.elapsed().seconds():.3}s.")
} else {
app.lwarn('Tried to upload package without specifying a Content-Length.')
@ -82,15 +82,15 @@ fn (mut app App) put_package(repo_ string) web.Result {
return app.status(.length_required)
}
res := app.repo.add_pkg_from_path(repo_, pkg_path) or {
app.lerror('Error while adding package: ${err.msg()}')
res := app.repo.add_pkg_from_path(repo, pkg_path) or {
app.lerror('Error while adding package: $err.msg()')
os.rm(pkg_path) or { app.lerror("Failed to remove download '${pkg_path}': ${err.msg()}") }
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg()") }
return app.status(.internal_server_error)
}
app.linfo("Added '${res.name}-${res.version}' to '${repo_} (${res.archs.join(',')})'.")
app.linfo("Added '$res.name-$res.version' to '$repo (${res.archs.join(',')})'.")
return app.json(.ok, new_data_response(res))
}

View File

@ -6,17 +6,17 @@ import web
['/:repo/:arch/:pkg'; auth; delete; markused]
fn (mut app App) delete_package(repo string, arch string, pkg string) web.Result {
res := app.repo.remove_pkg_from_arch_repo(repo, arch, pkg, true) or {
app.lerror('Error while deleting package: ${err.msg()}')
app.lerror('Error while deleting package: $err.msg()')
return app.status(.internal_server_error)
}
if res {
app.linfo("Removed package '${pkg}' from '${repo}/${arch}'")
app.linfo("Removed package '$pkg' from '$repo/$arch'")
return app.status(.ok)
} else {
app.linfo("Tried removing package '${pkg}' from '${repo}/${arch}', but it doesn't exist.")
app.linfo("Tried removing package '$pkg' from '$repo/$arch', but it doesn't exist.")
return app.status(.not_found)
}
@ -26,17 +26,17 @@ fn (mut app App) delete_package(repo string, arch string, pkg string) web.Result
['/:repo/:arch'; auth; delete; markused]
fn (mut app App) delete_arch_repo(repo string, arch string) web.Result {
res := app.repo.remove_arch_repo(repo, arch) or {
app.lerror('Error while deleting arch-repo: ${err.msg()}')
app.lerror('Error while deleting arch-repo: $err.msg()')
return app.status(.internal_server_error)
}
if res {
app.linfo("Removed arch-repo '${repo}/${arch}'")
app.linfo("Removed arch-repo '$repo/$arch'")
return app.status(.ok)
} else {
app.linfo("Tried removing '${repo}/${arch}', but it doesn't exist.")
app.linfo("Tried removing '$repo/$arch', but it doesn't exist.")
return app.status(.not_found)
}
@ -46,17 +46,17 @@ fn (mut app App) delete_arch_repo(repo string, arch string) web.Result {
['/:repo'; auth; delete; markused]
fn (mut app App) delete_repo(repo string) web.Result {
res := app.repo.remove_repo(repo) or {
app.lerror('Error while deleting repo: ${err.msg()}')
app.lerror('Error while deleting repo: $err.msg()')
return app.status(.internal_server_error)
}
if res {
app.linfo("Removed repo '${repo}'")
app.linfo("Removed repo '$repo'")
return app.status(.ok)
} else {
app.linfo("Tried removing '${repo}', but it doesn't exist.")
app.linfo("Tried removing '$repo', but it doesn't exist.")
return app.status(.not_found)
}

View File

@ -5,10 +5,9 @@ import os
import log
import repo
import util
import dbms
import db
import build { BuildJobQueue }
import cron
import metrics
import cron.expression
const (
log_file_name = 'vieter.log'
@ -25,14 +24,23 @@ pub mut:
repo repo.RepoGroupManager [required; web_global]
// Keys are the various architectures for packages
job_queue BuildJobQueue [required; web_global]
db dbms.VieterDb
db db.VieterDb
}
// init_job_queue populates a fresh job queue with all the targets currently
// stored in the database.
fn (mut app App) init_job_queue() ! {
for target in app.db.targets(limit: 0) {
app.job_queue.insert_all(target)!
// Initialize build queues
mut targets := app.db.get_targets(limit: 25)
mut i := u64(0)
for targets.len > 0 {
for target in targets {
app.job_queue.insert_all(target)!
}
i += 25
targets = app.db.get_targets(limit: 25, offset: i)
}
}
@ -43,12 +51,12 @@ pub fn server(conf Config) ! {
util.exit_with_message(1, "'any' is not allowed as the value for default_arch.")
}
global_ce := cron.parse_expression(conf.global_schedule) or {
util.exit_with_message(1, 'Invalid global cron expression: ${err.msg()}')
global_ce := expression.parse_expression(conf.global_schedule) or {
util.exit_with_message(1, 'Invalid global cron expression: $err.msg()')
}
log_removal_ce := cron.parse_expression(conf.log_removal_schedule) or {
util.exit_with_message(1, 'Invalid log removal cron expression: ${err.msg()}')
log_removal_ce := expression.parse_expression(conf.log_removal_schedule) or {
util.exit_with_message(1, 'Invalid log removal cron expression: $err.msg()')
}
// Configure logger
@ -82,40 +90,30 @@ pub fn server(conf Config) ! {
repo_dir := os.join_path_single(conf.data_dir, server.repo_dir_name)
// This also creates the directories if needed
repo_ := repo.new(repo_dir, conf.pkg_dir, conf.default_arch) or {
repo := repo.new(repo_dir, conf.pkg_dir, conf.default_arch) or {
logger.error(err.msg())
exit(1)
}
db_file := os.join_path_single(conf.data_dir, server.db_file_name)
db := dbms.init(db_file) or {
util.exit_with_message(1, 'Failed to initialize database: ${err.msg()}')
db := db.init(db_file) or {
util.exit_with_message(1, 'Failed to initialize database: $err.msg()')
}
mut collector := if conf.collect_metrics {
&metrics.MetricsCollector(metrics.new_default_collector())
} else {
&metrics.MetricsCollector(metrics.new_null_collector())
}
collector.histogram_buckets_set('http_requests_duration_seconds', [0.001, 0.005, 0.01, 0.05,
0.1, 0.5, 1, 5, 10])
mut app := &App{
logger: logger
api_key: conf.api_key
conf: conf
repo: repo_
repo: repo
db: db
collector: collector
job_queue: build.new_job_queue(global_ce, conf.base_image, conf.default_build_timeout)
job_queue: build.new_job_queue(global_ce, conf.base_image)
}
app.init_job_queue() or {
util.exit_with_message(1, 'Failed to inialize job queue: ${err.msg()}')
util.exit_with_message(1, 'Failed to inialize job queue: $err.msg()')
}
if conf.max_log_age > 0 {
spawn app.log_removal_daemon(log_removal_ce)
go app.log_removal_daemon(log_removal_ce)
}
web.run(app, conf.port)

View File

@ -46,16 +46,12 @@ pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ! {
to_write = to_write - bytes_written
}
}
if bytes_left > 0 {
return error('Not all bytes were received.')
}
}
// match_array_in_array[T] returns how many elements of a2 overlap with a1. For
// match_array_in_array<T> returns how many elements of a2 overlap with a1. For
// example, if a1 = "abcd" & a2 = "cd", the result will be 2. If the match is
// not at the end of a1, the result is 0.
pub fn match_array_in_array[T](a1 []T, a2 []T) int {
pub fn match_array_in_array<T>(a1 []T, a2 []T) int {
mut i := 0
mut match_len := 0

View File

@ -2,7 +2,6 @@ Module {
dependencies: [
'https://git.rustybever.be/vieter-v/conf',
'https://git.rustybever.be/vieter-v/docker',
'https://git.rustybever.be/vieter-v/aur',
'https://git.rustybever.be/vieter-v/metrics'
'https://git.rustybever.be/vieter-v/aur'
]
}

View File

@ -1,36 +1,35 @@
module web
import log
// log reate a log message with the given level
pub fn (mut ctx Context) log(msg string, level log.Level) {
lock ctx.logger {
ctx.logger.send_output(msg, level)
}
}
// lfatal create a log message with the fatal level
pub fn (mut ctx Context) lfatal(msg string) {
lock ctx.logger {
ctx.logger.fatal(msg)
}
ctx.log(msg, log.Level.fatal)
}
// lerror create a log message with the error level
pub fn (mut ctx Context) lerror(msg string) {
lock ctx.logger {
ctx.logger.error(msg)
}
ctx.log(msg, log.Level.error)
}
// lwarn create a log message with the warn level
pub fn (mut ctx Context) lwarn(msg string) {
lock ctx.logger {
ctx.logger.warn(msg)
}
ctx.log(msg, log.Level.warn)
}
// linfo create a log message with the info level
pub fn (mut ctx Context) linfo(msg string) {
lock ctx.logger {
ctx.logger.info(msg)
}
ctx.log(msg, log.Level.info)
}
// ldebug create a log message with the debug level
pub fn (mut ctx Context) ldebug(msg string) {
lock ctx.logger {
ctx.logger.debug(msg)
}
ctx.log(msg, log.Level.debug)
}

View File

@ -10,7 +10,7 @@ const attrs_to_ignore = ['auth', 'markused']
// Parsing function attributes for methods and path.
fn parse_attrs(name string, attrs []string) !([]http.Method, string) {
if attrs.len == 0 {
return [http.Method.get], '/${name}'
return [http.Method.get], '/$name'
}
mut x := attrs.clone()
@ -45,7 +45,7 @@ fn parse_attrs(name string, attrs []string) !([]http.Method, string) {
methods = [http.Method.get]
}
if path == '' {
path = '/${name}'
path = '/$name'
}
// Make path lowercase for case-insensitive comparisons
return methods, path.to_lower()

View File

@ -1,6 +1,6 @@
module response
pub struct Response[T] {
pub struct Response<T> {
pub:
message string
data T
@ -8,26 +8,26 @@ pub:
// new_response constructs a new Response<String> object with the given message
// & an empty data field.
pub fn new_response(message string) Response[string] {
return Response[string]{
pub fn new_response(message string) Response<string> {
return Response<string>{
message: message
data: ''
}
}
// new_data_response[T] constructs a new Response<T> object with the given data
// new_data_response<T> constructs a new Response<T> object with the given data
// & an empty message field.
pub fn new_data_response[T](data T) Response[T] {
return Response[T]{
pub fn new_data_response<T>(data T) Response<T> {
return Response<T>{
message: ''
data: data
}
}
// new_full_response[T] constructs a new Response<T> object with the given
// new_full_response<T> constructs a new Response<T> object with the given
// message & data.
pub fn new_full_response[T](message string, data T) Response[T] {
return Response[T]{
pub fn new_full_response<T>(message string, data T) Response<T> {
return Response<T>{
message: message
data: data
}

View File

@ -11,7 +11,6 @@ import net.urllib
import time
import json
import log
import metrics
// The Context struct represents the Context which hold the HTTP request and response.
// It has fields for the query, form, files.
@ -28,8 +27,6 @@ pub mut:
conn &net.TcpConn = unsafe { nil }
// Gives access to a shared logger object
logger shared log.Log
// Used to collect metrics on the web server
collector &metrics.MetricsCollector
// time.ticks() from start of web connection handle.
// You can use it to determine how much time is spent on your request.
page_gen_start i64
@ -44,7 +41,7 @@ pub mut:
// Files from multipart-form.
files map[string][]http.FileData
// Allows reading the request body
reader &io.BufferedReader = unsafe { nil }
reader io.BufferedReader
// RESPONSE
status http.Status = http.Status.ok
content_type string = 'text/plain'
@ -148,17 +145,8 @@ pub fn (ctx &Context) is_authenticated() bool {
return false
}
// body sends the given body as an HTTP response.
pub fn (mut ctx Context) body(status http.Status, content_type string, body string) Result {
ctx.status = status
ctx.content_type = content_type
ctx.send_response(body)
return Result{}
}
// json[T] HTTP_OK with json_s as payload with content-type `application/json`
pub fn (mut ctx Context) json[T](status http.Status, j T) Result {
// json<T> HTTP_OK with json_s as payload with content-type `application/json`
pub fn (mut ctx Context) json<T>(status http.Status, j T) Result {
ctx.status = status
ctx.content_type = 'application/json'
@ -278,14 +266,14 @@ interface DbInterface {
// run runs the app
[manualfree]
pub fn run[T](global_app &T, port int) {
mut l := net.listen_tcp(.ip6, ':${port}') or { panic('failed to listen ${err.code()} ${err}') }
pub fn run<T>(global_app &T, port int) {
mut l := net.listen_tcp(.ip6, ':$port') or { panic('failed to listen $err.code() $err') }
// Parsing methods attributes
mut routes := map[string]Route{}
$for method in T.methods {
http_methods, route_path := parse_attrs(method.name, method.attrs) or {
eprintln('error parsing method attributes: ${err}')
eprintln('error parsing method attributes: $err')
return
}
@ -294,7 +282,7 @@ pub fn run[T](global_app &T, port int) {
path: route_path
}
}
println('[Vweb] Running app on http://localhost:${port}')
println('[Vweb] Running app on http://localhost:$port')
for {
// Create a new app object for each connection, copy global data like db connections
mut request_app := &T{}
@ -311,16 +299,16 @@ pub fn run[T](global_app &T, port int) {
request_app.Context = global_app.Context // copy the context ref that contains static files map etc
mut conn := l.accept() or {
// failures should not panic
eprintln('accept() failed with error: ${err.msg()}')
eprintln('accept() failed with error: $err.msg()')
continue
}
spawn handle_conn[T](mut conn, mut request_app, routes)
go handle_conn<T>(mut conn, mut request_app, routes)
}
}
// handle_conn handles a connection
[manualfree]
fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
fn handle_conn<T>(mut conn net.TcpConn, mut app T, routes map[string]Route) {
conn.set_read_timeout(30 * time.second)
conn.set_write_timeout(30 * time.second)
@ -331,23 +319,6 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
app.logger.flush()
}
// Record how long request took to process
path := urllib.parse(app.req.url) or { urllib.URL{} }.path
labels := [
['method', app.req.method.str()]!,
['path', path]!,
// Not all methods properly set this value yet I think
['status', app.status.int().str()]!,
]
app.collector.counter_increment(name: 'http_requests_total', labels: labels)
// Prometheus prefers metrics containing base units, as defined here
// https://prometheus.io/docs/practices/naming/
app.collector.histogram_record(f64(time.ticks() - app.page_gen_start) / 1000,
name: 'http_requests_duration_seconds'
labels: labels
)
unsafe {
free(app)
}
@ -363,8 +334,8 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
// Request parse
head := http.parse_request_head(mut reader) or {
// Prevents errors from being thrown when BufferedReader is empty
if '${err}' != 'none' {
eprintln('error parsing request head: ${err}')
if '$err' != 'none' {
eprintln('error parsing request head: $err')
}
return
}
@ -372,7 +343,7 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
// The healthcheck spams the logs, which isn't very useful
if head.url != '/health' {
lock app.logger {
app.logger.debug('${head.method} ${head.url} ${head.version}')
app.logger.debug('$head.method $head.url $head.version')
}
}
@ -386,7 +357,7 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
// URL Parse
url := urllib.parse(head.url) or {
eprintln('error parsing path: ${err}')
eprintln('error parsing path: $err')
return
}
@ -413,7 +384,6 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
static_mime_types: app.static_mime_types
reader: reader
logger: app.logger
collector: app.collector
api_key: app.api_key
}
@ -424,7 +394,7 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
$for method in T.methods {
$if method.return_type is Result {
route := routes[method.name] or {
eprintln('parsed attributes for the `${method.name}` are not found, skipping...')
eprintln('parsed attributes for the `$method.name` are not found, skipping...')
Route{}
}
@ -456,7 +426,7 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
method_args := params.clone()
if method_args.len != method.args.len {
eprintln('warning: uneven parameters count (${method.args.len}) in `${method.name}`, compared to the web route `${method.attrs}` (${method_args.len})')
eprintln('warning: uneven parameters count ($method.args.len) in `$method.name`, compared to the web route `$method.attrs` ($method_args.len)')
}
app.$method(method_args)
return

View File

@ -12,6 +12,4 @@ address = "http://localhost:8000"
api_update_frequency = 2
image_rebuild_frequency = 1
max_concurrent_builds = 3
# max_log_age = 64
log_removal_schedule = '* * *'
collect_metrics = true
max_log_age = 64