Compare commits

..

1 Commits

Author SHA1 Message Date
Jef Roosens 6edd1b475d WIP: write agent 2022-12-12 22:09:57 +01:00
8 changed files with 40 additions and 74 deletions

View File

@ -5,9 +5,7 @@ import conf as vconf
struct Config { struct Config {
pub: pub:
log_level string = 'WARN' log_level string = 'WARN'
// Architecture that the agent represents
arch string
api_key string api_key string
address string address string
data_dir string data_dir string

View File

@ -5,7 +5,6 @@ import sync.stdatomic
import build { BuildConfig } import build { BuildConfig }
import client import client
import time import time
import os
const ( const (
build_empty = 0 build_empty = 0
@ -41,10 +40,6 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon {
} }
pub fn (mut d AgentDaemon) run() { pub fn (mut d AgentDaemon) run() {
// This is just so that the very first time the loop is ran, the jobs are
// always polled
mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency)
for { for {
free_builds := d.update_atomics() free_builds := d.update_atomics()
@ -58,37 +53,16 @@ pub fn (mut d AgentDaemon) run() {
d.images.clean_old_images() d.images.clean_old_images()
// Poll for new jobs // Poll for new jobs
if time.now() >= last_poll_time.add_seconds(d.conf.polling_frequency) { new_configs := d.client.poll_jobs(free_builds) or {
new_configs := d.client.poll_jobs(d.conf.arch, free_builds) or { d.lerror('Failed to poll jobs: $err.msg()')
d.lerror('Failed to poll jobs: $err.msg()')
time.sleep(5 * time.second)
continue
}
last_poll_time = time.now()
// Schedule new jobs
for config in new_configs {
// TODO handle this better than to just skip the config
// Make sure a recent build base image is available for building the config
d.images.refresh_image(config.base_image) or {
d.lerror(err.msg())
continue
}
d.start_build(config)
}
time.sleep(1 * time.second) time.sleep(1 * time.second)
continue
} }
// Builds are running, so check again after one second
else if free_builds < d.conf.max_concurrent_builds { // Schedule new jobs
time.sleep(1 * time.second) for config in new_configs {
} d.start_build(config)
// The agent is not doing anything, so we just wait until the next poll
// time
else {
time_until_next_poll := time.now() - last_poll_time
time.sleep(time_until_next_poll)
} }
} }
} }
@ -134,28 +108,24 @@ fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
// 0 means success, 1 means failure // 0 means success, 1 means failure
mut status := 0 mut status := 0
new_config := BuildConfig{ res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
...config &sb.target) or {
base_image: d.images.get(config.base_image) d.ldebug('build_target error: $err.msg()')
}
res := build.build_config(d.client.address, d.client.api_key, new_config) or {
d.ldebug('build_config error: $err.msg()')
status = 1 status = 1
build.BuildResult{} build.BuildResult{}
} }
if status == 0 { if status == 0 {
d.linfo('finished build: $config.url -> $config.repo; uploading logs...') d.linfo('finished build: $sb.target.url -> $sb.target.repo; uploading logs...')
build_arch := os.uname().machine build_arch := os.uname().machine
d.client.add_build_log(config.target_id, res.start_time, res.end_time, build_arch, d.client.add_build_log(sb.target.id, res.start_time, res.end_time, build_arch,
res.exit_code, res.logs) or { res.exit_code, res.logs) or {
d.lerror('Failed to upload logs for build: $config.url -> $config.repo') d.lerror('Failed to upload logs for build: $sb.target.url -> $sb.target.repo')
} }
} else { } else {
d.linfo('an error occured during build: $config.url -> $config.repo') d.linfo('an error occured during build: $sb.target.url -> $sb.target.repo')
} }
stdatomic.store_u64(&d.atomics[build_index], agent.build_done) stdatomic.store_u64(&d.atomics[build_index], agent.build_done)

View File

@ -19,10 +19,6 @@ fn new_image_manager(refresh_frequency int) ImageManager {
} }
} }
pub fn (m &ImageManager) get(base_image string) string {
return m.images[base_image].last()
}
fn (mut m ImageManager) refresh_image(base_image string) ! { fn (mut m ImageManager) refresh_image(base_image string) ! {
// No need to refresh the image if the previous one is still new enough // No need to refresh the image if the previous one is still new enough
if base_image in m.timestamps if base_image in m.timestamps

View File

@ -104,16 +104,16 @@ pub:
} }
pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult { pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult {
config := BuildConfig{ config := BuildConfig{
target_id: target.id target_id: target.id
kind: target.kind kind: target.kind
url: target.url url: target.url
branch: target.branch branch: target.branch
repo: target.repo repo: target.repo
base_image: base_image_id base_image: base_image_id
} }
return build_config(address, api_key, config) return build_config(address, api_key, config)
} }
// build_target builds, packages & publishes a given Arch package based on the // build_target builds, packages & publishes a given Arch package based on the
@ -127,14 +127,14 @@ pub fn build_config(address string, api_key string, config BuildConfig) !BuildRe
} }
build_arch := os.uname().machine build_arch := os.uname().machine
build_script := create_build_script(address, config, build_arch) build_script := create_build_script(address, target, build_arch)
// We convert the build script into a base64 string, which then gets passed // We convert the build script into a base64 string, which then gets passed
// to the container as an env var // to the container as an env var
base64_script := base64.encode_str(build_script) base64_script := base64.encode_str(build_script)
c := docker.NewContainer{ c := docker.NewContainer{
image: '$config.base_image' image: '$base_image_id'
env: [ env: [
'BUILD_SCRIPT=$base64_script', 'BUILD_SCRIPT=$base64_script',
'API_KEY=$api_key', 'API_KEY=$api_key',

View File

@ -76,6 +76,7 @@ pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! {
} }
} }
dump(job)
q.queues[arch].insert(job) q.queues[arch].insert(job)
} }
} }

View File

@ -1,5 +1,7 @@
module build module build
import models { Target }
// escape_shell_string escapes any characters that could be interpreted // escape_shell_string escapes any characters that could be interpreted
// incorrectly by a shell. The resulting value should be safe to use inside an // incorrectly by a shell. The resulting value should be safe to use inside an
// echo statement. // echo statement.
@ -21,13 +23,13 @@ pub fn echo_commands(cmds []string) []string {
} }
// create_build_script generates a shell script that builds a given Target. // create_build_script generates a shell script that builds a given Target.
fn create_build_script(address string, config BuildConfig, build_arch string) string { fn create_build_script(address string, target &Target, build_arch string) string {
repo_url := '$address/$config.repo' repo_url := '$address/$target.repo'
mut commands := [ mut commands := [
// This will later be replaced by a proper setting for changing the // This will later be replaced by a proper setting for changing the
// mirrorlist // mirrorlist
"echo -e '[$config.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf" "echo -e '[$target.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
// We need to update the package list of the repo we just added above. // We need to update the package list of the repo we just added above.
// This should however not pull in a lot of packages as long as the // This should however not pull in a lot of packages as long as the
// builder image is rebuilt frequently. // builder image is rebuilt frequently.
@ -36,22 +38,22 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
'su builder', 'su builder',
] ]
commands << match config.kind { commands << match target.kind {
'git' { 'git' {
if config.branch == '' { if target.branch == '' {
[ [
"git clone --single-branch --depth 1 '$config.url' repo", "git clone --single-branch --depth 1 '$target.url' repo",
] ]
} else { } else {
[ [
"git clone --single-branch --depth 1 --branch $config.branch '$config.url' repo", "git clone --single-branch --depth 1 --branch $target.branch '$target.url' repo",
] ]
} }
} }
'url' { 'url' {
[ [
'mkdir repo', 'mkdir repo',
"curl -o repo/PKGBUILD -L '$config.url'", "curl -o repo/PKGBUILD -L '$target.url'",
] ]
} }
else { else {

View File

@ -2,10 +2,9 @@ module client
import build { BuildConfig } import build { BuildConfig }
pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig { pub fn (c &Client) poll_jobs(max int) ![]BuildConfig {
data := c.send_request<[]BuildConfig>(.get, '/api/v1/jobs/poll', { data := c.send_request<[]BuildConfig>(.get, '/api/v1/jobs/poll', {
'arch': arch 'max': max.str()
'max': max.str()
})! })!
return data.data return data.data

View File

@ -4,7 +4,6 @@ data_dir = "data"
pkg_dir = "data/pkgs" pkg_dir = "data/pkgs"
log_level = "DEBUG" log_level = "DEBUG"
default_arch = "x86_64" default_arch = "x86_64"
arch = "x86_64"
address = "http://localhost:8000" address = "http://localhost:8000"
@ -12,3 +11,4 @@ global_schedule = '* *'
api_update_frequency = 2 api_update_frequency = 2
image_rebuild_frequency = 1 image_rebuild_frequency = 1
max_concurrent_builds = 3 max_concurrent_builds = 3