refactor: renamed cron & build code to use "target" naming
ci/woodpecker/pr/docs Pipeline was successful Details
ci/woodpecker/pr/lint Pipeline was successful Details
ci/woodpecker/pr/build Pipeline was successful Details
ci/woodpecker/pr/docker Pipeline was successful Details
ci/woodpecker/pr/man Pipeline was successful Details
ci/woodpecker/pr/test Pipeline was successful Details

pull/254/head
Jef Roosens 2022-06-17 16:23:47 +02:00
parent 449656eb97
commit 5e11a91f3d
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
4 changed files with 51 additions and 48 deletions

View File

@ -90,10 +90,10 @@ pub:
logs string logs string
} }
// build_repo builds, packages & publishes a given Arch package based on the // build_target builds, packages & publishes a given Arch package based on the
// provided target. The base image ID should be of an image previously created // provided target. The base image ID should be of an image previously created
// by create_build_image. It returns the logs of the container. // by create_build_image. It returns the logs of the container.
pub fn build_repo(address string, api_key string, base_image_id string, repo &Target) ?BuildResult { pub fn build_target(address string, api_key string, base_image_id string, target &Target) ?BuildResult {
mut dd := docker.new_conn()? mut dd := docker.new_conn()?
defer { defer {
@ -101,7 +101,7 @@ pub fn build_repo(address string, api_key string, base_image_id string, repo &Ta
} }
build_arch := os.uname().machine build_arch := os.uname().machine
build_script := create_build_script(address, repo, build_arch) build_script := create_build_script(address, target, build_arch)
// We convert the build script into a base64 string, which then gets passed // We convert the build script into a base64 string, which then gets passed
// to the container as an env var // to the container as an env var

View File

@ -6,9 +6,9 @@ import os
import build import build
// build locally builds the target with the given id. // build locally builds the target with the given id.
fn build(conf Config, repo_id int) ? { fn build(conf Config, target_id int) ? {
c := client.new(conf.address, conf.api_key) c := client.new(conf.address, conf.api_key)
repo := c.get_target(repo_id)? target := c.get_target(target_id)?
build_arch := os.uname().machine build_arch := os.uname().machine
@ -16,7 +16,7 @@ fn build(conf Config, repo_id int) ? {
image_id := build.create_build_image(conf.base_image)? image_id := build.create_build_image(conf.base_image)?
println('Running build...') println('Running build...')
res := build.build_repo(conf.address, conf.api_key, image_id, repo)? res := build.build_target(conf.address, conf.api_key, image_id, target)?
println('Removing build image...') println('Removing build image...')
@ -29,6 +29,6 @@ fn build(conf Config, repo_id int) ? {
dd.remove_image(image_id)? dd.remove_image(image_id)?
println('Uploading logs to Vieter...') println('Uploading logs to Vieter...')
c.add_build_log(repo.id, res.start_time, res.end_time, build_arch, res.exit_code, c.add_build_log(target.id, res.start_time, res.end_time, build_arch, res.exit_code,
res.logs)? res.logs)?
} }

View File

@ -71,29 +71,31 @@ fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
return false return false
} }
// run_build actually starts the build process for a given repo. // run_build actually starts the build process for a given target.
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) { fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
d.linfo('started build: $sb.repo.url $sb.repo.branch') d.linfo('started build: $sb.target.url -> $sb.target.repo')
// 0 means success, 1 means failure // 0 means success, 1 means failure
mut status := 0 mut status := 0
res := build.build_repo(d.client.address, d.client.api_key, d.builder_images.last(), res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.repo) or { &sb.target) or {
d.ldebug('build_repo error: $err.msg()') d.ldebug('build_target error: $err.msg()')
status = 1 status = 1
build.BuildResult{} build.BuildResult{}
} }
if status == 0 { if status == 0 {
d.linfo('finished build: $sb.repo.url $sb.repo.branch; uploading logs...') d.linfo('finished build: $sb.target.url -> $sb.target.repo; uploading logs...')
build_arch := os.uname().machine build_arch := os.uname().machine
d.client.add_build_log(sb.repo.id, res.start_time, res.end_time, build_arch, res.exit_code, d.client.add_build_log(sb.target.id, res.start_time, res.end_time, build_arch,
res.logs) or { d.lerror('Failed to upload logs for $sb.repo.url $sb.repo.arch') } res.exit_code, res.logs) or {
d.lerror('Failed to upload logs for build: $sb.target.url -> $sb.target.repo')
}
} else { } else {
d.linfo('failed build: $sb.repo.url $sb.repo.branch') d.linfo('an error occured during build: $sb.target.url -> $sb.target.repo')
} }
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done) stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)

View File

@ -20,7 +20,7 @@ const (
struct ScheduledBuild { struct ScheduledBuild {
pub: pub:
repo Target target Target
timestamp time.Time timestamp time.Time
} }
@ -37,9 +37,9 @@ mut:
global_schedule CronExpression global_schedule CronExpression
api_update_frequency int api_update_frequency int
image_rebuild_frequency int image_rebuild_frequency int
// Repos currently loaded from API. // Targets currently loaded from API.
repos []Target targets []Target
// At what point to update the list of repositories. // At what point to update the list of targets.
api_update_timestamp time.Time api_update_timestamp time.Time
image_build_timestamp time.Time image_build_timestamp time.Time
queue MinHeap<ScheduledBuild> queue MinHeap<ScheduledBuild>
@ -51,7 +51,7 @@ mut:
logger shared log.Log logger shared log.Log
} }
// init_daemon initializes a new Daemon object. It renews the repositories & // init_daemon initializes a new Daemon object. It renews the targets &
// populates the build queue for the first time. // populates the build queue for the first time.
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon { pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon {
mut d := Daemon{ mut d := Daemon{
@ -65,8 +65,8 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st
logger: logger logger: logger
} }
// Initialize the repos & queue // Initialize the targets & queue
d.renew_repos() d.renew_targets()
d.renew_queue() d.renew_queue()
if !d.rebuild_base_image() { if !d.rebuild_base_image() {
return error('The base image failed to build. The Vieter cron daemon cannot run without an initial builder image.') return error('The base image failed to build. The Vieter cron daemon cannot run without an initial builder image.')
@ -76,21 +76,21 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st
} }
// run starts the actual daemon process. It runs builds when possible & // run starts the actual daemon process. It runs builds when possible &
// periodically refreshes the list of repositories to ensure we stay in sync. // periodically refreshes the list of targets to ensure we stay in sync.
pub fn (mut d Daemon) run() { pub fn (mut d Daemon) run() {
for { for {
finished_builds := d.clean_finished_builds() finished_builds := d.clean_finished_builds()
// Update the API's contents if needed & renew the queue // Update the API's contents if needed & renew the queue
if time.now() >= d.api_update_timestamp { if time.now() >= d.api_update_timestamp {
d.renew_repos() d.renew_targets()
d.renew_queue() d.renew_queue()
} }
// The finished builds should only be rescheduled if the API contents // The finished builds should only be rescheduled if the API contents
// haven't been renewed. // haven't been renewed.
else { else {
for sb in finished_builds { for sb in finished_builds {
d.schedule_build(sb.repo) d.schedule_build(sb.target)
} }
} }
@ -114,7 +114,7 @@ pub fn (mut d Daemon) run() {
// every second to clean up any finished builds & start new ones. // every second to clean up any finished builds & start new ones.
mut delay := time.Duration(1 * time.second) mut delay := time.Duration(1 * time.second)
// Sleep either until we have to refresh the repos or when the next // Sleep either until we have to refresh the targets or when the next
// build has to start, with a minimum of 1 second. // build has to start, with a minimum of 1 second.
if d.current_build_count() == 0 { if d.current_build_count() == 0 {
now := time.now() now := time.now()
@ -148,12 +148,13 @@ pub fn (mut d Daemon) run() {
} }
} }
// schedule_build adds the next occurence of the given repo build to the queue. // schedule_build adds the next occurence of the given targets build to the
fn (mut d Daemon) schedule_build(repo Target) { // queue.
ce := if repo.schedule != '' { fn (mut d Daemon) schedule_build(target Target) {
parse_expression(repo.schedule) or { ce := if target.schedule != '' {
parse_expression(target.schedule) or {
// TODO This shouldn't return an error if the expression is empty. // TODO This shouldn't return an error if the expression is empty.
d.lerror("Error while parsing cron expression '$repo.schedule' (id $repo.id): $err.msg()") d.lerror("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()")
d.global_schedule d.global_schedule
} }
@ -161,41 +162,41 @@ fn (mut d Daemon) schedule_build(repo Target) {
d.global_schedule d.global_schedule
} }
// A repo that can't be scheduled will just be skipped for now // A target that can't be scheduled will just be skipped for now
timestamp := ce.next_from_now() or { timestamp := ce.next_from_now() or {
d.lerror("Couldn't calculate next timestamp from '$repo.schedule'; skipping") d.lerror("Couldn't calculate next timestamp from '$target.schedule'; skipping")
return return
} }
d.queue.insert(ScheduledBuild{ d.queue.insert(ScheduledBuild{
repo: repo target: target
timestamp: timestamp timestamp: timestamp
}) })
} }
// renew_repos requests the newest list of Git repos from the server & replaces // renew_targets requests the newest list of targets from the server & replaces
// the old one. // the old one.
fn (mut d Daemon) renew_repos() { fn (mut d Daemon) renew_targets() {
d.linfo('Renewing repos...') d.linfo('Renewing targets...')
mut new_repos := d.client.get_all_targets() or { mut new_targets := d.client.get_all_targets() or {
d.lerror('Failed to renew repos. Retrying in ${daemon.api_update_retry_timeout}s...') d.lerror('Failed to renew targets. Retrying in ${daemon.api_update_retry_timeout}s...')
d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout) d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout)
return return
} }
// Filter out any repos that shouldn't run on this architecture // Filter out any targets that shouldn't run on this architecture
cur_arch := os.uname().machine cur_arch := os.uname().machine
new_repos = new_repos.filter(it.arch.any(it.value == cur_arch)) new_targets = new_targets.filter(it.arch.any(it.value == cur_arch))
d.repos = new_repos d.targets = new_targets
d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency) d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency)
} }
// renew_queue replaces the old queue with a new one that reflects the newest // renew_queue replaces the old queue with a new one that reflects the newest
// values in repos_map. // values in targets.
fn (mut d Daemon) renew_queue() { fn (mut d Daemon) renew_queue() {
d.linfo('Renewing queue...') d.linfo('Renewing queue...')
mut new_queue := MinHeap<ScheduledBuild>{} mut new_queue := MinHeap<ScheduledBuild>{}
@ -225,10 +226,10 @@ fn (mut d Daemon) renew_queue() {
d.queue = new_queue d.queue = new_queue
// For each repository in repos_map, parse their cron expression (or use // For each target in targets, parse their cron expression (or use the
// the default one if not present) & add them to the queue // default one if not present) & add them to the queue
for repo in d.repos { for target in d.targets {
d.schedule_build(repo) d.schedule_build(target)
} }
} }