feature(daemon): added api renewal & calculated sleep time
ci/woodpecker/push/arch unknown status
Details
ci/woodpecker/push/docker unknown status
Details
ci/woodpecker/push/build_experimental Pipeline failed
Details
ci/woodpecker/push/lint Pipeline failed
Details
ci/woodpecker/push/build Pipeline was successful
Details
ci/woodpecker/push/test Pipeline was successful
Details
ci/woodpecker/push/arch unknown status
Details
ci/woodpecker/push/docker unknown status
Details
ci/woodpecker/push/build_experimental Pipeline failed
Details
ci/woodpecker/push/lint Pipeline failed
Details
ci/woodpecker/push/build Pipeline was successful
Details
ci/woodpecker/push/test Pipeline was successful
Details
parent
f40cebad4f
commit
a5239ced1f
|
@ -11,8 +11,9 @@ pub:
|
||||||
address string
|
address string
|
||||||
base_image string = 'archlinux:base-devel'
|
base_image string = 'archlinux:base-devel'
|
||||||
max_concurrent_builds int = 1
|
max_concurrent_builds int = 1
|
||||||
api_update_frequency int = 60
|
api_update_frequency int = 15
|
||||||
global_schedule string
|
// Replicates the behavior of the original cron system
|
||||||
|
global_schedule string = '0 3'
|
||||||
}
|
}
|
||||||
|
|
||||||
// cmd returns the cli module that handles the cron daemon.
|
// cmd returns the cli module that handles the cron daemon.
|
||||||
|
|
|
@ -4,15 +4,17 @@ import time
|
||||||
import sync.stdatomic
|
import sync.stdatomic
|
||||||
|
|
||||||
const build_empty = 0
|
const build_empty = 0
|
||||||
|
|
||||||
const build_running = 1
|
const build_running = 1
|
||||||
|
|
||||||
const build_done = 2
|
const build_done = 2
|
||||||
|
|
||||||
// reschedule_builds looks for any builds with status code 2 & re-adds them to
|
// reschedule_builds looks for any builds with status code 2 & re-adds them to
|
||||||
// the queue.
|
// the queue.
|
||||||
fn (mut d Daemon) reschedule_builds() ? {
|
fn (mut d Daemon) reschedule_builds() ? {
|
||||||
for i in 0..d.atomics.len {
|
for i in 0 .. d.atomics.len {
|
||||||
if stdatomic.load_u64(&d.atomics[i]) == build_done {
|
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
|
||||||
stdatomic.store_u64(&d.atomics[i], build_empty)
|
stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
|
||||||
sb := d.builds[i]
|
sb := d.builds[i]
|
||||||
|
|
||||||
d.schedule_build(sb.repo_id, sb.repo) ?
|
d.schedule_build(sb.repo_id, sb.repo) ?
|
||||||
|
@ -29,7 +31,8 @@ fn (mut d Daemon) update_builds() ? {
|
||||||
sb := d.queue.pop() ?
|
sb := d.queue.pop() ?
|
||||||
|
|
||||||
// If this build couldn't be scheduled, no more will be possible.
|
// If this build couldn't be scheduled, no more will be possible.
|
||||||
if !d.start_build(sb)? {
|
// TODO a build that couldn't be scheduled should be re-added to the queue.
|
||||||
|
if !d.start_build(sb) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -39,10 +42,10 @@ fn (mut d Daemon) update_builds() ? {
|
||||||
}
|
}
|
||||||
|
|
||||||
// start_build starts a build for the given ScheduledBuild object.
|
// start_build starts a build for the given ScheduledBuild object.
|
||||||
fn (mut d Daemon) start_build(sb ScheduledBuild) ?bool {
|
fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
|
||||||
for i in 0..d.atomics.len {
|
for i in 0 .. d.atomics.len {
|
||||||
if stdatomic.load_u64(&d.atomics[i]) == build_empty {
|
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty {
|
||||||
stdatomic.store_u64(&d.atomics[i], build_running)
|
stdatomic.store_u64(&d.atomics[i], daemon.build_running)
|
||||||
d.builds[i] = sb
|
d.builds[i] = sb
|
||||||
|
|
||||||
go d.run_build(i, sb)
|
go d.run_build(i, sb)
|
||||||
|
@ -56,8 +59,8 @@ fn (mut d Daemon) start_build(sb ScheduledBuild) ?bool {
|
||||||
|
|
||||||
// run_build actually starts the build process for a given repo.
|
// run_build actually starts the build process for a given repo.
|
||||||
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? {
|
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? {
|
||||||
|
d.linfo('build $sb.repo.url')
|
||||||
time.sleep(10 * time.second)
|
time.sleep(10 * time.second)
|
||||||
|
|
||||||
stdatomic.store_u64(&d.atomics[build_index], build_done)
|
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,8 @@ import time
|
||||||
import log
|
import log
|
||||||
import datatypes { MinHeap }
|
import datatypes { MinHeap }
|
||||||
import cron.expression { CronExpression, parse_expression }
|
import cron.expression { CronExpression, parse_expression }
|
||||||
|
import math
|
||||||
|
import arrays
|
||||||
|
|
||||||
struct ScheduledBuild {
|
struct ScheduledBuild {
|
||||||
pub:
|
pub:
|
||||||
|
@ -62,23 +64,47 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st
|
||||||
// periodically refreshes the list of repositories to ensure we stay in sync.
|
// periodically refreshes the list of repositories to ensure we stay in sync.
|
||||||
pub fn (mut d Daemon) run() ? {
|
pub fn (mut d Daemon) run() ? {
|
||||||
for {
|
for {
|
||||||
println('1')
|
// Update the API's contents if needed & renew the queue
|
||||||
|
if time.now() >= d.api_update_timestamp {
|
||||||
|
d.renew_repos() ?
|
||||||
|
d.renew_queue() ?
|
||||||
|
}
|
||||||
|
|
||||||
// Cleans up finished builds, opening up spots for new builds
|
// Cleans up finished builds, opening up spots for new builds
|
||||||
d.reschedule_builds() ?
|
d.reschedule_builds() ?
|
||||||
println('2')
|
|
||||||
|
// TODO rebuild builder image when needed
|
||||||
|
|
||||||
// Schedules new builds when possible
|
// Schedules new builds when possible
|
||||||
d.update_builds() ?
|
d.update_builds() ?
|
||||||
|
|
||||||
println(d.queue)
|
// Sleep either until we have to refresh the repos or when the next
|
||||||
println(d.atomics)
|
// build has to start, with a minimum of 1 second.
|
||||||
|
now := time.now()
|
||||||
|
|
||||||
time.sleep(10 * time.second)
|
mut delay := d.api_update_timestamp - now
|
||||||
|
|
||||||
|
if d.queue.len() > 0 {
|
||||||
|
time_until_next_job := d.queue.peek() ?.timestamp - now
|
||||||
|
|
||||||
|
delay = math.min(delay, time_until_next_job)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.ldebug('Sleeping for ${delay}...')
|
||||||
|
|
||||||
|
// TODO if there are builds active, the sleep time should be much lower to clean up the builds when they're finished.
|
||||||
|
|
||||||
|
// We sleep for at least one second. This is to prevent the program
|
||||||
|
// from looping agressively when a cronjob can be scheduled, but
|
||||||
|
// there's no spots free for it to be started.
|
||||||
|
time.sleep(math.max(delay, 1 * time.second))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// schedule_build adds the next occurence of the given repo build to the queue.
|
// schedule_build adds the next occurence of the given repo build to the queue.
|
||||||
fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? {
|
fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? {
|
||||||
ce := parse_expression(repo.schedule) or {
|
ce := parse_expression(repo.schedule) or {
|
||||||
|
// TODO This shouldn't return an error if the expression is empty.
|
||||||
d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()")
|
d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()")
|
||||||
|
|
||||||
d.global_schedule
|
d.global_schedule
|
||||||
|
@ -94,6 +120,7 @@ fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut d Daemon) renew_repos() ? {
|
fn (mut d Daemon) renew_repos() ? {
|
||||||
|
d.ldebug('Renewing repos...')
|
||||||
mut new_repos := git.get_repos(d.address, d.api_key) ?
|
mut new_repos := git.get_repos(d.address, d.api_key) ?
|
||||||
|
|
||||||
d.repos_map = new_repos.move()
|
d.repos_map = new_repos.move()
|
||||||
|
@ -104,6 +131,7 @@ fn (mut d Daemon) renew_repos() ? {
|
||||||
// renew_queue replaces the old queue with a new one that reflects the newest
|
// renew_queue replaces the old queue with a new one that reflects the newest
|
||||||
// values in repos_map.
|
// values in repos_map.
|
||||||
fn (mut d Daemon) renew_queue() ? {
|
fn (mut d Daemon) renew_queue() ? {
|
||||||
|
d.ldebug('Renewing queue...')
|
||||||
mut new_queue := MinHeap<ScheduledBuild>{}
|
mut new_queue := MinHeap<ScheduledBuild>{}
|
||||||
|
|
||||||
// Move any jobs that should have already started from the old queue onto
|
// Move any jobs that should have already started from the old queue onto
|
||||||
|
|
Loading…
Reference in New Issue