Compare commits

...

3 Commits

11 changed files with 166 additions and 64 deletions

View File

@ -24,6 +24,7 @@ pub:
branch string
repo string
base_image string
force bool
}
// create_build_image creates a builder image given some base image which can
@ -104,7 +105,7 @@ pub:
}
// build_target builds the given target. Internally it calls `build_config`.
pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult {
pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool) !BuildResult {
config := BuildConfig{
target_id: target.id
kind: target.kind
@ -112,6 +113,7 @@ pub fn build_target(address string, api_key string, base_image_id string, target
branch: target.branch
repo: target.repo
base_image: base_image_id
force: force
}
return build_config(address, api_key, config)

View File

@ -7,7 +7,7 @@ import datatypes { MinHeap }
import util
struct BuildJob {
pub:
pub mut:
// Time at which this build job was created/queued
created time.Time
// Next timestamp from which point this job is allowed to be executed
@ -16,6 +16,8 @@ pub:
ce CronExpression
// Actual build config sent to the agent
config BuildConfig
// Whether this is a one-time job
single bool
}
// Allows BuildJob structs to be sorted according to their timestamp in
@ -53,45 +55,59 @@ pub fn new_job_queue(default_schedule CronExpression, default_base_image string)
// insert_all executes insert for each architecture of the given Target.
pub fn (mut q BuildJobQueue) insert_all(target Target) ! {
for arch in target.arch {
q.insert(target, arch.value)!
q.insert(target: target, arch: arch.value)!
}
}
[params]
pub struct InsertConfig {
target Target [required]
arch string [required]
single bool
force bool
now bool
}
// insert a new target's job into the queue for the given architecture. This
// job will then be endlessly rescheduled after being pop'ed, unless removed
// explicitely.
pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! {
pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! {
lock q.mutex {
if arch !in q.queues {
q.queues[arch] = MinHeap<BuildJob>{}
if input.arch !in q.queues {
q.queues[input.arch] = MinHeap<BuildJob>{}
}
ce := if target.schedule != '' {
parse_expression(target.schedule) or {
return error("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()")
}
} else {
q.default_schedule
}
timestamp := ce.next_from_now()!
job := BuildJob{
mut job := BuildJob{
created: time.now()
timestamp: timestamp
ce: ce
single: input.single
config: BuildConfig{
target_id: target.id
kind: target.kind
url: target.url
branch: target.branch
repo: target.repo
target_id: input.target.id
kind: input.target.kind
url: input.target.url
branch: input.target.branch
repo: input.target.repo
// TODO make this configurable
base_image: q.default_base_image
force: input.force
}
}
q.queues[arch].insert(job)
if !input.now {
ce := if input.target.schedule != '' {
parse_expression(input.target.schedule) or {
return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()")
}
} else {
q.default_schedule
}
job.timestamp = ce.next_from_now()!
job.ce = ce
} else {
job.timestamp = time.now()
}
q.queues[input.arch].insert(job)
}
}
@ -158,10 +174,12 @@ pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob {
if job.timestamp < time.now() {
job = q.queues[arch].pop()?
// TODO how do we handle this properly? Is it even possible for a
// cron expression to not return a next time if it's already been
// used before?
q.reschedule(job, arch) or {}
if !job.single {
// TODO how do we handle this properly? Is it even possible for a
// cron expression to not return a next time if it's already been
// used before?
q.reschedule(job, arch) or {}
}
return job
}
@ -186,8 +204,10 @@ pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob {
if job.timestamp < time.now() {
job = q.queues[arch].pop() or { break }
// TODO idem
q.reschedule(job, arch) or {}
if !job.single {
// TODO idem
q.reschedule(job, arch) or {}
}
out << job
} else {

View File

@ -63,14 +63,22 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
'cd repo',
'makepkg --nobuild --syncdeps --needed --noconfirm',
'source PKGBUILD',
]
if !config.force {
// The build container checks whether the package is already present on
// the server.
'curl -s --head --fail $repo_url/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
// If the above curl command succeeds, we don't need to rebuild the
// package. However, because we're in a su shell, the exit command will
// drop us back into the root shell. Therefore, we must check whether
// we're in root so we don't proceed.
'[ "\$(id -u)" == 0 ] && exit 0',
commands << [
'curl -s --head --fail $repo_url/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
// If the above curl command succeeds, we don't need to rebuild the
// package. However, because we're in a su shell, the exit command will
// drop us back into the root shell. Therefore, we must check whether
// we're in root so we don't proceed.
'[ "\$(id -u)" == 0 ] && exit 0',
]
}
commands << [
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $repo_url/publish; done',
]

View File

@ -1,6 +1,7 @@
module client
import build { BuildConfig }
import web.response { Response }
// poll_jobs requests a list of new build jobs from the server.
pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig {
@ -11,3 +12,13 @@ pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig {
return data.data
}
pub fn (c &Client) queue_job(target_id int, arch string, force bool) !Response<string> {
data := c.send_request<string>(.post, '/api/v1/jobs/queue', {
'target': target_id.str()
'arch': arch
'force': force.str()
})!
return data
}

View File

@ -6,7 +6,7 @@ import os
import build
// build locally builds the target with the given id.
fn build(conf Config, target_id int) ! {
fn build(conf Config, target_id int, force bool) ! {
c := client.new(conf.address, conf.api_key)
target := c.get_target(target_id)!
@ -16,7 +16,7 @@ fn build(conf Config, target_id int) ! {
image_id := build.create_build_image(conf.base_image)!
println('Running build...')
res := build.build_target(conf.address, conf.api_key, image_id, target)!
res := build.build_target(conf.address, conf.api_key, image_id, target, force)!
println('Removing build image...')

View File

@ -182,11 +182,45 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Build the target with the given id & publish it.'
flags: [
cli.Flag{
name: 'force'
description: 'Build the target without checking whether it needs to be renewed.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'remote'
description: 'Schedule the build on the server instead of running it locally.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'arch'
description: 'Architecture to schedule build for. Required when using -remote.'
flag: cli.FlagType.string
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
build(conf, cmd.args[0].int())!
remote := cmd.flags.get_bool('remote')!
force := cmd.flags.get_bool('force')!
target_id := cmd.args[0].int()
if remote {
arch := cmd.flags.get_string('arch')!
if arch == '' {
println('When scheduling the build remotely, you have to specify an architecture.')
exit(1)
}
c := client.new(conf.address, conf.api_key)
res := c.queue_job(target_id, arch, force)!
println(res.message)
} else {
build(conf, target_id, force)!
}
}
},
]

View File

@ -79,7 +79,7 @@ fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
mut status := 0
res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.target) or {
&sb.target, false) or {
d.ldebug('build_target error: $err.msg()')
status = 1

View File

@ -1,21 +0,0 @@
module server
import web
import web.response { new_data_response, new_response }
// v1_poll_job_queue allows agents to poll for new build jobs.
['/api/v1/jobs/poll'; auth; get]
fn (mut app App) v1_poll_job_queue() web.Result {
arch := app.query['arch'] or {
return app.json(.bad_request, new_response('Missing arch query arg.'))
}
max_str := app.query['max'] or {
return app.json(.bad_request, new_response('Missing max query arg.'))
}
max := max_str.int()
mut out := app.job_queue.pop_n(arch, max).map(it.config)
return app.json(.ok, new_data_response(out))
}

View File

@ -0,0 +1,48 @@
module server
import web
import web.response { new_data_response, new_response }
// v1_poll_job_queue allows agents to poll for new build jobs.
['/api/v1/jobs/poll'; auth; get]
fn (mut app App) v1_poll_job_queue() web.Result {
arch := app.query['arch'] or {
return app.json(.bad_request, new_response('Missing arch query arg.'))
}
max_str := app.query['max'] or {
return app.json(.bad_request, new_response('Missing max query arg.'))
}
max := max_str.int()
mut out := app.job_queue.pop_n(arch, max).map(it.config)
return app.json(.ok, new_data_response(out))
}
['/api/v1/jobs/queue'; auth; post]
fn (mut app App) v1_queue_job() web.Result {
target_id := app.query['target'] or {
return app.json(.bad_request, new_response('Missing target query arg.'))
}.int()
arch := app.query['arch'] or {
return app.json(.bad_request, new_response('Missing arch query arg.'))
}
if arch == '' {
app.json(.bad_request, new_response('Empty arch query arg.'))
}
force := 'force' in app.query
target := app.db.get_target(target_id) or {
return app.json(.bad_request, new_response('Unknown target id.'))
}
app.job_queue.insert(target: target, arch: arch, single: true, now: true, force: force) or {
return app.status(.internal_server_error)
}
return app.status(.ok)
}

View File

@ -37,7 +37,7 @@ fn (mut app App) init_job_queue() ! {
for targets.len > 0 {
for target in targets {
for arch in target.arch {
app.job_queue.insert(target, arch.value)!
app.job_queue.insert(target: target, arch: arch.value)!
}
}

View File

@ -8,7 +8,7 @@ arch = "x86_64"
address = "http://localhost:8000"
global_schedule = '* *'
# global_schedule = '* *'
api_update_frequency = 2
image_rebuild_frequency = 1
max_concurrent_builds = 3