Compare commits

...

7 Commits

Author SHA1 Message Date
Jef Roosens cc061a38da
daemon: worked on daemon loop
ci/woodpecker/push/arch unknown status Details
ci/woodpecker/push/docker unknown status Details
ci/woodpecker/push/build_experimental Pipeline failed Details
ci/woodpecker/push/lint Pipeline failed Details
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/test Pipeline was successful Details
2022-04-30 12:03:14 +02:00
Jef Roosens ed02d82a0d
Added docs command & notice in README [CI SKIP] 2022-04-30 12:03:14 +02:00
Jef Roosens 488a172586
ci: run buils sequentially [CI SKIP] 2022-04-30 12:03:14 +02:00
Jef Roosens d5409201c7
Use dynamic_boehm everywhere 2022-04-30 12:03:14 +02:00
Jef Roosens ead4c5f4b7
ci: added dynamic boehm prod build to experimental builds 2022-04-30 12:03:14 +02:00
Jef Roosens 16151643e6
Add dynamic boehm compiler to tests as well 2022-04-30 12:03:14 +02:00
Jef Roosens 5ee6d553ac
Compile boehm gc dynamically for debug 2022-04-30 12:03:14 +02:00
7 changed files with 65 additions and 23 deletions

3
.gitignore vendored
View File

@ -23,3 +23,6 @@ v/
# gdb log file # gdb log file
gdb.txt gdb.txt
# Generated docs
_docs/

View File

@ -12,7 +12,6 @@ pipeline:
debug: debug:
image: 'chewingbever/vlang:latest' image: 'chewingbever/vlang:latest'
pull: true pull: true
group: 'build'
commands: commands:
- make - make
when: when:
@ -25,7 +24,6 @@ pipeline:
pull: true pull: true
environment: environment:
- LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static - LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static
group: 'build'
commands: commands:
- make prod - make prod
# Make sure the binary is actually statically built # Make sure the binary is actually statically built

View File

@ -3,13 +3,14 @@ SRC_DIR := src
SOURCES != find '$(SRC_DIR)' -iname '*.v' SOURCES != find '$(SRC_DIR)' -iname '*.v'
V_PATH ?= v V_PATH ?= v
V := $(V_PATH) -showcc -gc boehm V := $(V_PATH) -showcc -gc boehm -d dynamic_boehm
all: vieter all: vieter
# =====COMPILATION===== # =====COMPILATION=====
# Regular binary # We force the boehm gc to be compiled dynamically because otherwise, our CI
# build breaks.
vieter: $(SOURCES) vieter: $(SOURCES)
$(V) -g -o vieter $(SRC_DIR) $(V) -g -o vieter $(SRC_DIR)
@ -74,8 +75,14 @@ v/v:
git clone --single-branch https://git.rustybever.be/Chewing_Bever/v v git clone --single-branch https://git.rustybever.be/Chewing_Bever/v v
make -C v make -C v
.PHONY: clean
clean: clean:
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs'
.PHONY: docs
docs:
rm -rf '$(SRC_DIR)/_docs'
cd '$(SRC_DIR)' && v doc -all -f html -m -readme .
# =====EXPERIMENTAL===== # =====EXPERIMENTAL=====

View File

@ -2,7 +2,9 @@
## Documentation ## Documentation
I host documentation for Vieter over at https://rustybever.be/docs/vieter/. I host documentation for Vieter over at https://rustybever.be/docs/vieter/. API
documentation for the current codebase can be found at
https://rustybever.be/api-docs/vieter/.
## Overview ## Overview

View File

@ -11,8 +11,9 @@ pub:
address string address string
base_image string = 'archlinux:base-devel' base_image string = 'archlinux:base-devel'
max_concurrent_builds int = 1 max_concurrent_builds int = 1
api_update_frequency int = 60 api_update_frequency int = 15
global_schedule string // Replicates the behavior of the original cron system
global_schedule string = '0 3'
} }
// cmd returns the cli module that handles the cron daemon. // cmd returns the cli module that handles the cron daemon.

View File

@ -4,15 +4,17 @@ import time
import sync.stdatomic import sync.stdatomic
const build_empty = 0 const build_empty = 0
const build_running = 1 const build_running = 1
const build_done = 2 const build_done = 2
// reschedule_builds looks for any builds with status code 2 & re-adds them to // reschedule_builds looks for any builds with status code 2 & re-adds them to
// the queue. // the queue.
fn (mut d Daemon) reschedule_builds() ? { fn (mut d Daemon) reschedule_builds() ? {
for i in 0..d.atomics.len { for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == build_done { if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
stdatomic.store_u64(&d.atomics[i], build_empty) stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
sb := d.builds[i] sb := d.builds[i]
d.schedule_build(sb.repo_id, sb.repo) ? d.schedule_build(sb.repo_id, sb.repo) ?
@ -29,7 +31,8 @@ fn (mut d Daemon) update_builds() ? {
sb := d.queue.pop() ? sb := d.queue.pop() ?
// If this build couldn't be scheduled, no more will be possible. // If this build couldn't be scheduled, no more will be possible.
if !d.start_build(sb)? { // TODO a build that couldn't be scheduled should be re-added to the queue.
if !d.start_build(sb) {
break break
} }
} else { } else {
@ -39,10 +42,10 @@ fn (mut d Daemon) update_builds() ? {
} }
// start_build starts a build for the given ScheduledBuild object. // start_build starts a build for the given ScheduledBuild object.
fn (mut d Daemon) start_build(sb ScheduledBuild) ?bool { fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
for i in 0..d.atomics.len { for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == build_empty { if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty {
stdatomic.store_u64(&d.atomics[i], build_running) stdatomic.store_u64(&d.atomics[i], daemon.build_running)
d.builds[i] = sb d.builds[i] = sb
go d.run_build(i, sb) go d.run_build(i, sb)
@ -56,8 +59,8 @@ fn (mut d Daemon) start_build(sb ScheduledBuild) ?bool {
// run_build actually starts the build process for a given repo. // run_build actually starts the build process for a given repo.
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? { fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? {
d.linfo('build $sb.repo.url')
time.sleep(10 * time.second) time.sleep(10 * time.second)
stdatomic.store_u64(&d.atomics[build_index], build_done) stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
} }

View File

@ -5,6 +5,8 @@ import time
import log import log
import datatypes { MinHeap } import datatypes { MinHeap }
import cron.expression { CronExpression, parse_expression } import cron.expression { CronExpression, parse_expression }
import math
import arrays
struct ScheduledBuild { struct ScheduledBuild {
pub: pub:
@ -62,23 +64,47 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st
// periodically refreshes the list of repositories to ensure we stay in sync. // periodically refreshes the list of repositories to ensure we stay in sync.
pub fn (mut d Daemon) run() ? { pub fn (mut d Daemon) run() ? {
for { for {
println('1') // Update the API's contents if needed & renew the queue
if time.now() >= d.api_update_timestamp {
d.renew_repos() ?
d.renew_queue() ?
}
// Cleans up finished builds, opening up spots for new builds // Cleans up finished builds, opening up spots for new builds
d.reschedule_builds() ? d.reschedule_builds() ?
println('2')
// TODO rebuild builder image when needed
// Schedules new builds when possible // Schedules new builds when possible
d.update_builds() ? d.update_builds() ?
println(d.queue) // Sleep either until we have to refresh the repos or when the next
println(d.atomics) // build has to start, with a minimum of 1 second.
now := time.now()
time.sleep(10 * time.second) mut delay := d.api_update_timestamp - now
if d.queue.len() > 0 {
time_until_next_job := d.queue.peek() ?.timestamp - now
delay = math.min(delay, time_until_next_job)
}
d.ldebug('Sleeping for ${delay}...')
// TODO if there are builds active, the sleep time should be much lower to clean up the builds when they're finished.
// We sleep for at least one second. This is to prevent the program
// from looping agressively when a cronjob can be scheduled, but
// there's no spots free for it to be started.
time.sleep(math.max(delay, 1 * time.second))
} }
} }
// schedule_build adds the next occurence of the given repo build to the queue. // schedule_build adds the next occurence of the given repo build to the queue.
fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? { fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? {
ce := parse_expression(repo.schedule) or { ce := parse_expression(repo.schedule) or {
// TODO This shouldn't return an error if the expression is empty.
d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()") d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()")
d.global_schedule d.global_schedule
@ -94,6 +120,7 @@ fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? {
} }
fn (mut d Daemon) renew_repos() ? { fn (mut d Daemon) renew_repos() ? {
d.ldebug('Renewing repos...')
mut new_repos := git.get_repos(d.address, d.api_key) ? mut new_repos := git.get_repos(d.address, d.api_key) ?
d.repos_map = new_repos.move() d.repos_map = new_repos.move()
@ -104,6 +131,7 @@ fn (mut d Daemon) renew_repos() ? {
// renew_queue replaces the old queue with a new one that reflects the newest // renew_queue replaces the old queue with a new one that reflects the newest
// values in repos_map. // values in repos_map.
fn (mut d Daemon) renew_queue() ? { fn (mut d Daemon) renew_queue() ? {
d.ldebug('Renewing queue...')
mut new_queue := MinHeap<ScheduledBuild>{} mut new_queue := MinHeap<ScheduledBuild>{}
// Move any jobs that should have already started from the old queue onto // Move any jobs that should have already started from the old queue onto