Compare commits

..

1 Commits

Author SHA1 Message Date
Jef Roosens 6edd1b475d WIP: write agent 2022-12-12 22:09:57 +01:00
8 changed files with 40 additions and 74 deletions

View File

@ -5,9 +5,7 @@ import conf as vconf
struct Config {
pub:
log_level string = 'WARN'
// Architecture that the agent represents
arch string
log_level string = 'WARN'
api_key string
address string
data_dir string

View File

@ -5,7 +5,6 @@ import sync.stdatomic
import build { BuildConfig }
import client
import time
import os
const (
build_empty = 0
@ -41,10 +40,6 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon {
}
pub fn (mut d AgentDaemon) run() {
// This is just so that the very first time the loop is ran, the jobs are
// always polled
mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency)
for {
free_builds := d.update_atomics()
@ -58,37 +53,16 @@ pub fn (mut d AgentDaemon) run() {
d.images.clean_old_images()
// Poll for new jobs
if time.now() >= last_poll_time.add_seconds(d.conf.polling_frequency) {
new_configs := d.client.poll_jobs(d.conf.arch, free_builds) or {
d.lerror('Failed to poll jobs: $err.msg()')
time.sleep(5 * time.second)
continue
}
last_poll_time = time.now()
// Schedule new jobs
for config in new_configs {
// TODO handle this better than to just skip the config
// Make sure a recent build base image is available for building the config
d.images.refresh_image(config.base_image) or {
d.lerror(err.msg())
continue
}
d.start_build(config)
}
new_configs := d.client.poll_jobs(free_builds) or {
d.lerror('Failed to poll jobs: $err.msg()')
time.sleep(1 * time.second)
continue
}
// Builds are running, so check again after one second
else if free_builds < d.conf.max_concurrent_builds {
time.sleep(1 * time.second)
}
// The agent is not doing anything, so we just wait until the next poll
// time
else {
time_until_next_poll := time.now() - last_poll_time
time.sleep(time_until_next_poll)
// Schedule new jobs
for config in new_configs {
d.start_build(config)
}
}
}
@ -134,28 +108,24 @@ fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
// 0 means success, 1 means failure
mut status := 0
new_config := BuildConfig{
...config
base_image: d.images.get(config.base_image)
}
res := build.build_config(d.client.address, d.client.api_key, new_config) or {
d.ldebug('build_config error: $err.msg()')
res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.target) or {
d.ldebug('build_target error: $err.msg()')
status = 1
build.BuildResult{}
}
if status == 0 {
d.linfo('finished build: $config.url -> $config.repo; uploading logs...')
d.linfo('finished build: $sb.target.url -> $sb.target.repo; uploading logs...')
build_arch := os.uname().machine
d.client.add_build_log(config.target_id, res.start_time, res.end_time, build_arch,
d.client.add_build_log(sb.target.id, res.start_time, res.end_time, build_arch,
res.exit_code, res.logs) or {
d.lerror('Failed to upload logs for build: $config.url -> $config.repo')
d.lerror('Failed to upload logs for build: $sb.target.url -> $sb.target.repo')
}
} else {
d.linfo('an error occured during build: $config.url -> $config.repo')
d.linfo('an error occured during build: $sb.target.url -> $sb.target.repo')
}
stdatomic.store_u64(&d.atomics[build_index], agent.build_done)

View File

@ -19,10 +19,6 @@ fn new_image_manager(refresh_frequency int) ImageManager {
}
}
pub fn (m &ImageManager) get(base_image string) string {
return m.images[base_image].last()
}
fn (mut m ImageManager) refresh_image(base_image string) ! {
// No need to refresh the image if the previous one is still new enough
if base_image in m.timestamps

View File

@ -104,16 +104,16 @@ pub:
}
pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult {
config := BuildConfig{
target_id: target.id
kind: target.kind
url: target.url
branch: target.branch
repo: target.repo
base_image: base_image_id
}
config := BuildConfig{
target_id: target.id
kind: target.kind
url: target.url
branch: target.branch
repo: target.repo
base_image: base_image_id
}
return build_config(address, api_key, config)
return build_config(address, api_key, config)
}
// build_target builds, packages & publishes a given Arch package based on the
@ -127,14 +127,14 @@ pub fn build_config(address string, api_key string, config BuildConfig) !BuildRe
}
build_arch := os.uname().machine
build_script := create_build_script(address, config, build_arch)
build_script := create_build_script(address, target, build_arch)
// We convert the build script into a base64 string, which then gets passed
// to the container as an env var
base64_script := base64.encode_str(build_script)
c := docker.NewContainer{
image: '$config.base_image'
image: '$base_image_id'
env: [
'BUILD_SCRIPT=$base64_script',
'API_KEY=$api_key',

View File

@ -76,6 +76,7 @@ pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! {
}
}
dump(job)
q.queues[arch].insert(job)
}
}

View File

@ -1,5 +1,7 @@
module build
import models { Target }
// escape_shell_string escapes any characters that could be interpreted
// incorrectly by a shell. The resulting value should be safe to use inside an
// echo statement.
@ -21,13 +23,13 @@ pub fn echo_commands(cmds []string) []string {
}
// create_build_script generates a shell script that builds a given Target.
fn create_build_script(address string, config BuildConfig, build_arch string) string {
repo_url := '$address/$config.repo'
fn create_build_script(address string, target &Target, build_arch string) string {
repo_url := '$address/$target.repo'
mut commands := [
// This will later be replaced by a proper setting for changing the
// mirrorlist
"echo -e '[$config.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
"echo -e '[$target.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
// We need to update the package list of the repo we just added above.
// This should however not pull in a lot of packages as long as the
// builder image is rebuilt frequently.
@ -36,22 +38,22 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
'su builder',
]
commands << match config.kind {
commands << match target.kind {
'git' {
if config.branch == '' {
if target.branch == '' {
[
"git clone --single-branch --depth 1 '$config.url' repo",
"git clone --single-branch --depth 1 '$target.url' repo",
]
} else {
[
"git clone --single-branch --depth 1 --branch $config.branch '$config.url' repo",
"git clone --single-branch --depth 1 --branch $target.branch '$target.url' repo",
]
}
}
'url' {
[
'mkdir repo',
"curl -o repo/PKGBUILD -L '$config.url'",
"curl -o repo/PKGBUILD -L '$target.url'",
]
}
else {

View File

@ -2,10 +2,9 @@ module client
import build { BuildConfig }
pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig {
pub fn (c &Client) poll_jobs(max int) ![]BuildConfig {
data := c.send_request<[]BuildConfig>(.get, '/api/v1/jobs/poll', {
'arch': arch
'max': max.str()
'max': max.str()
})!
return data.data

View File

@ -4,7 +4,6 @@ data_dir = "data"
pkg_dir = "data/pkgs"
log_level = "DEBUG"
default_arch = "x86_64"
arch = "x86_64"
address = "http://localhost:8000"
@ -12,3 +11,4 @@ global_schedule = '* *'
api_update_frequency = 2
image_rebuild_frequency = 1
max_concurrent_builds = 3