Compare commits

...

2 Commits

Author SHA1 Message Date
Jef Roosens 6edd1b475d WIP: write agent 2022-12-12 22:09:57 +01:00
Jef Roosens 18c94c8d0b feat(agent): wrote ImageManager 2022-12-12 21:50:34 +01:00
5 changed files with 140 additions and 37 deletions

View File

@ -13,7 +13,7 @@ pub:
polling_frequency int = 30
// Architecture of agent
// arch string
// image_rebuild_frequency int = 1440
image_rebuild_frequency int = 1440
}
// cmd returns the cli module that handles the cron daemon.

View File

@ -4,6 +4,7 @@ import log
import sync.stdatomic
import build { BuildConfig }
import client
import time
const (
build_empty = 0
@ -14,8 +15,8 @@ const (
struct AgentDaemon {
logger shared log.Log
conf Config
// List of last built builder images
builder_images []string
mut:
images ImageManager
// Which builds are currently running; length is same as
// conf.max_concurrent_builds
builds []BuildConfig
@ -30,6 +31,7 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon {
logger: logger
client: client.new(conf.address, conf.api_key)
conf: conf
images: new_image_manager(conf.image_rebuild_frequency)
builds: []BuildConfig{len: conf.max_concurrent_builds}
atomics: []u64{len: conf.max_concurrent_builds}
}
@ -41,13 +43,33 @@ pub fn (mut d AgentDaemon) run() {
for {
free_builds := d.update_atomics()
if free_builds > 0 {
// All build slots are taken, so there's nothing to be done
if free_builds == 0 {
time.sleep(1 * time.second)
continue
}
// Builds have finished, so old builder images might have freed up.
d.images.clean_old_images()
// Poll for new jobs
new_configs := d.client.poll_jobs(free_builds) or {
d.lerror('Failed to poll jobs: $err.msg()')
time.sleep(1 * time.second)
continue
}
// Schedule new jobs
for config in new_configs {
d.start_build(config)
}
}
}
// update_atomics checks for each build whether it's completed, and sets it to
// free again if so. The return value is how many fields are now set to free.
// free again if so. The return value is how many build slots are currently
// free.
fn (mut d AgentDaemon) update_atomics() int {
mut count := 0
@ -62,3 +84,49 @@ fn (mut d AgentDaemon) update_atomics() int {
return count
}
// start_build starts a build for the given BuildConfig object.
fn (mut d AgentDaemon) start_build(config BuildConfig) bool {
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty {
stdatomic.store_u64(&d.atomics[i], agent.build_running)
d.builds[i] = config
go d.run_build(i, config)
return true
}
}
return false
}
// run_build actually starts the build process for a given target.
fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
d.linfo('started build: $config.url -> $config.repo')
// 0 means success, 1 means failure
mut status := 0
res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.target) or {
d.ldebug('build_target error: $err.msg()')
status = 1
build.BuildResult{}
}
if status == 0 {
d.linfo('finished build: $sb.target.url -> $sb.target.repo; uploading logs...')
build_arch := os.uname().machine
d.client.add_build_log(sb.target.id, res.start_time, res.end_time, build_arch,
res.exit_code, res.logs) or {
d.lerror('Failed to upload logs for build: $sb.target.url -> $sb.target.repo')
}
} else {
d.linfo('an error occured during build: $sb.target.url -> $sb.target.repo')
}
stdatomic.store_u64(&d.atomics[build_index], agent.build_done)
}

View File

@ -2,48 +2,59 @@ module agent
import time
import docker
import build
struct ImageManager {
images map[string]string
timestamps map[string]time.Time
mut:
refresh_frequency int
images map[string][]string [required]
timestamps map[string]time.Time [required]
}
// clean_old_base_images tries to remove any old but still present builder
// images.
fn (mut d AgentDaemon) clean_old_base_images() {
mut i := 0
fn new_image_manager(refresh_frequency int) ImageManager {
return ImageManager{
refresh_frequency: refresh_frequency
images: map[string][]string{}
timestamps: map[string]time.Time{}
}
}
mut dd := docker.new_conn() or {
d.lerror('Failed to connect to Docker socket.')
fn (mut m ImageManager) refresh_image(base_image string) ! {
// No need to refresh the image if the previous one is still new enough
if base_image in m.timestamps
&& m.timestamps[base_image].add_seconds(m.refresh_frequency) > time.now() {
return
}
// TODO use better image tags for built images
new_image := build.create_build_image(base_image) or {
return error('Failed to build builder image from base image $base_image')
}
m.images[base_image] << new_image
m.timestamps[base_image] = time.now()
}
// clean_old_images tries to remove any old but still present builder images.
fn (mut m ImageManager) clean_old_images() {
mut dd := docker.new_conn() or { return }
defer {
dd.close() or {}
}
for i < d.builder_images.len - 1 {
// For each builder image, we try to remove it by calling the Docker
// API. If the function returns an error or false, that means the image
// wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't
// move the index.
dd.remove_image(d.builder_images[i]) or { i += 1 }
mut i := 0
for image in m.images.keys() {
i = 0
for i < m.images[image].len - 1 {
// For each builder image, we try to remove it by calling the Docker
// API. If the function returns an error or false, that means the image
// wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't
// move the index.
dd.remove_image(m.images[image][i]) or { i += 1 }
}
}
}
// rebuild_base_image builds a builder image from the given base image.
/* fn (mut d AgentDaemon) build_base_image(base_image string) bool { */
/* d.linfo('Rebuilding builder image....') */
/* d.builder_images << build.create_build_image(d.base_image) or { */
/* d.lerror('Failed to rebuild base image. Retrying in ${daemon.rebuild_base_image_retry_timout}s...') */
/* d.image_build_timestamp = time.now().add_seconds(daemon.rebuild_base_image_retry_timout) */
/* return false */
/* } */
/* d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency) */
/* return true */
/* } */

View File

@ -103,10 +103,23 @@ pub:
logs string
}
pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult {
config := BuildConfig{
target_id: target.id
kind: target.kind
url: target.url
branch: target.branch
repo: target.repo
base_image: base_image_id
}
return build_config(address, api_key, config)
}
// build_target builds, packages & publishes a given Arch package based on the
// provided target. The base image ID should be of an image previously created
// by create_build_image. It returns the logs of the container.
pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult {
pub fn build_config(address string, api_key string, config BuildConfig) !BuildResult {
mut dd := docker.new_conn()!
defer {

11
src/client/jobs.v 100644
View File

@ -0,0 +1,11 @@
module client
import build { BuildConfig }
pub fn (c &Client) poll_jobs(max int) ![]BuildConfig {
data := c.send_request<[]BuildConfig>(.get, '/api/v1/jobs/poll', {
'max': max.str()
})!
return data.data
}