forked from vieter-v/vieter
feat(cron): added automatic rebuilding of image; implemented builds
parent
98c0e52b08
commit
369b4458c5
|
@ -10,7 +10,7 @@ const container_build_dir = '/build'
|
||||||
|
|
||||||
const build_image_repo = 'vieter-build'
|
const build_image_repo = 'vieter-build'
|
||||||
|
|
||||||
fn create_build_image(base_image string) ?string {
|
pub fn create_build_image(base_image string) ?string {
|
||||||
commands := [
|
commands := [
|
||||||
// Update repos & install required packages
|
// Update repos & install required packages
|
||||||
'pacman -Syu --needed --noconfirm base-devel git'
|
'pacman -Syu --needed --noconfirm base-devel git'
|
||||||
|
@ -53,12 +53,13 @@ fn create_build_image(base_image string) ?string {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for 5 seconds
|
time.sleep(1 * time.second)
|
||||||
time.sleep(5000000000)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, we create the image from the container
|
// Finally, we create the image from the container
|
||||||
// As the tag, we use the epoch value
|
// As the tag, we use the epoch value
|
||||||
|
// TODO also add the base image's name into the image name to prevent
|
||||||
|
// conflicts.
|
||||||
tag := time.sys_mono_now().str()
|
tag := time.sys_mono_now().str()
|
||||||
image := docker.create_image_from_container(id, 'vieter-build', tag) ?
|
image := docker.create_image_from_container(id, 'vieter-build', tag) ?
|
||||||
docker.remove_container(id) ?
|
docker.remove_container(id) ?
|
||||||
|
@ -66,6 +67,52 @@ fn create_build_image(base_image string) ?string {
|
||||||
return image.id
|
return image.id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn build_repo(address string, api_key string, base_image_id string, repo &git.GitRepo) ? {
|
||||||
|
build_arch := os.uname().machine
|
||||||
|
|
||||||
|
// TODO what to do with PKGBUILDs that build multiple packages?
|
||||||
|
commands := [
|
||||||
|
'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
|
||||||
|
'cd repo',
|
||||||
|
'makepkg --nobuild --nodeps',
|
||||||
|
'source PKGBUILD',
|
||||||
|
// The build container checks whether the package is already
|
||||||
|
// present on the server
|
||||||
|
'curl --head --fail $address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
|
||||||
|
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $address/$repo.repo/publish; done',
|
||||||
|
]
|
||||||
|
|
||||||
|
// We convert the list of commands into a base64 string, which then gets
|
||||||
|
// passed to the container as an env var
|
||||||
|
cmds_str := base64.encode_str(commands.join('\n'))
|
||||||
|
|
||||||
|
c := docker.NewContainer{
|
||||||
|
image: '$base_image_id'
|
||||||
|
env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$api_key']
|
||||||
|
entrypoint: ['/bin/sh', '-c']
|
||||||
|
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e']
|
||||||
|
work_dir: '/build'
|
||||||
|
user: 'builder:builder'
|
||||||
|
}
|
||||||
|
|
||||||
|
id := docker.create_container(c) ?
|
||||||
|
docker.start_container(id) ?
|
||||||
|
|
||||||
|
// This loop waits until the container has stopped, so we can remove it after
|
||||||
|
for {
|
||||||
|
data := docker.inspect_container(id) ?
|
||||||
|
|
||||||
|
if !data.state.running {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for 5 seconds
|
||||||
|
time.sleep(1 * time.second)
|
||||||
|
}
|
||||||
|
|
||||||
|
docker.remove_container(id) ?
|
||||||
|
}
|
||||||
|
|
||||||
fn build(conf Config) ? {
|
fn build(conf Config) ? {
|
||||||
build_arch := os.uname().machine
|
build_arch := os.uname().machine
|
||||||
|
|
||||||
|
@ -85,47 +132,7 @@ fn build(conf Config) ? {
|
||||||
image_id := create_build_image(conf.base_image) ?
|
image_id := create_build_image(conf.base_image) ?
|
||||||
|
|
||||||
for repo in filtered_repos {
|
for repo in filtered_repos {
|
||||||
// TODO what to do with PKGBUILDs that build multiple packages?
|
build_repo(conf.address, conf.api_key, image_id, repo) ?
|
||||||
commands := [
|
|
||||||
'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
|
|
||||||
'cd repo',
|
|
||||||
'makepkg --nobuild --nodeps',
|
|
||||||
'source PKGBUILD',
|
|
||||||
// The build container checks whether the package is already
|
|
||||||
// present on the server
|
|
||||||
'curl --head --fail $conf.address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
|
|
||||||
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $conf.address/$repo.repo/publish; done',
|
|
||||||
]
|
|
||||||
|
|
||||||
// We convert the list of commands into a base64 string, which then gets
|
|
||||||
// passed to the container as an env var
|
|
||||||
cmds_str := base64.encode_str(commands.join('\n'))
|
|
||||||
|
|
||||||
c := docker.NewContainer{
|
|
||||||
image: '$image_id'
|
|
||||||
env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$conf.api_key']
|
|
||||||
entrypoint: ['/bin/sh', '-c']
|
|
||||||
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e']
|
|
||||||
work_dir: '/build'
|
|
||||||
user: 'builder:builder'
|
|
||||||
}
|
|
||||||
|
|
||||||
id := docker.create_container(c) ?
|
|
||||||
docker.start_container(id) ?
|
|
||||||
|
|
||||||
// This loop waits until the container has stopped, so we can remove it after
|
|
||||||
for {
|
|
||||||
data := docker.inspect_container(id) ?
|
|
||||||
|
|
||||||
if !data.state.running {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for 5 seconds
|
|
||||||
time.sleep(5000000000)
|
|
||||||
}
|
|
||||||
|
|
||||||
docker.remove_container(id) ?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, we remove the builder image
|
// Finally, we remove the builder image
|
||||||
|
|
|
@ -5,13 +5,14 @@ import env
|
||||||
|
|
||||||
struct Config {
|
struct Config {
|
||||||
pub:
|
pub:
|
||||||
log_level string = 'WARN'
|
log_level string = 'WARN'
|
||||||
log_file string = 'vieter.log'
|
log_file string = 'vieter.log'
|
||||||
api_key string
|
api_key string
|
||||||
address string
|
address string
|
||||||
base_image string = 'archlinux:base-devel'
|
base_image string = 'archlinux:base-devel'
|
||||||
max_concurrent_builds int = 1
|
max_concurrent_builds int = 1
|
||||||
api_update_frequency int = 15
|
api_update_frequency int = 15
|
||||||
|
image_rebuild_frequency int = 1440
|
||||||
// Replicates the behavior of the original cron system
|
// Replicates the behavior of the original cron system
|
||||||
global_schedule string = '0 3'
|
global_schedule string = '0 3'
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ pub fn cron(conf Config) ? {
|
||||||
}
|
}
|
||||||
|
|
||||||
mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
|
mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
|
||||||
conf.max_concurrent_builds, conf.api_update_frequency) ?
|
conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency) ?
|
||||||
|
|
||||||
d.run() ?
|
d.run() ?
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ module daemon
|
||||||
import time
|
import time
|
||||||
import sync.stdatomic
|
import sync.stdatomic
|
||||||
import rand
|
import rand
|
||||||
|
import build
|
||||||
|
|
||||||
const build_empty = 0
|
const build_empty = 0
|
||||||
|
|
||||||
|
@ -62,8 +63,9 @@ fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
|
||||||
|
|
||||||
// run_build actually starts the build process for a given repo.
|
// run_build actually starts the build process for a given repo.
|
||||||
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? {
|
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? {
|
||||||
d.linfo('build $sb.repo.url')
|
d.linfo('started build: ${sb.repo.url} ${sb.repo.branch}')
|
||||||
time.sleep(rand.int_in_range(1, 6) ? * time.second)
|
|
||||||
|
build.build_repo(d.address, d.api_key, d.builder_image, &sb.repo) ?
|
||||||
|
|
||||||
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
|
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import log
|
||||||
import datatypes { MinHeap }
|
import datatypes { MinHeap }
|
||||||
import cron.expression { CronExpression, parse_expression }
|
import cron.expression { CronExpression, parse_expression }
|
||||||
import math
|
import math
|
||||||
|
import build
|
||||||
|
|
||||||
struct ScheduledBuild {
|
struct ScheduledBuild {
|
||||||
pub:
|
pub:
|
||||||
|
@ -20,16 +21,19 @@ fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
|
||||||
|
|
||||||
pub struct Daemon {
|
pub struct Daemon {
|
||||||
mut:
|
mut:
|
||||||
address string
|
address string
|
||||||
api_key string
|
api_key string
|
||||||
base_image string
|
base_image string
|
||||||
global_schedule CronExpression
|
builder_image string
|
||||||
api_update_frequency int
|
global_schedule CronExpression
|
||||||
|
api_update_frequency int
|
||||||
|
image_rebuild_frequency int
|
||||||
// Repos currently loaded from API.
|
// Repos currently loaded from API.
|
||||||
repos_map map[string]git.GitRepo
|
repos_map map[string]git.GitRepo
|
||||||
// At what point to update the list of repositories.
|
// At what point to update the list of repositories.
|
||||||
api_update_timestamp time.Time
|
api_update_timestamp time.Time
|
||||||
queue MinHeap<ScheduledBuild>
|
image_build_timestamp time.Time
|
||||||
|
queue MinHeap<ScheduledBuild>
|
||||||
// Which builds are currently running
|
// Which builds are currently running
|
||||||
builds []ScheduledBuild
|
builds []ScheduledBuild
|
||||||
// Atomic variables used to detect when a build has finished; length is the
|
// Atomic variables used to detect when a build has finished; length is the
|
||||||
|
@ -40,13 +44,14 @@ mut:
|
||||||
|
|
||||||
// init_daemon initializes a new Daemon object. It renews the repositories &
|
// init_daemon initializes a new Daemon object. It renews the repositories &
|
||||||
// populates the build queue for the first time.
|
// populates the build queue for the first time.
|
||||||
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int) ?Daemon {
|
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon {
|
||||||
mut d := Daemon{
|
mut d := Daemon{
|
||||||
address: address
|
address: address
|
||||||
api_key: api_key
|
api_key: api_key
|
||||||
base_image: base_image
|
base_image: base_image
|
||||||
global_schedule: global_schedule
|
global_schedule: global_schedule
|
||||||
api_update_frequency: api_update_frequency
|
api_update_frequency: api_update_frequency
|
||||||
|
image_rebuild_frequency: image_rebuild_frequency
|
||||||
atomics: []u64{len: max_concurrent_builds}
|
atomics: []u64{len: max_concurrent_builds}
|
||||||
builds: []ScheduledBuild{len: max_concurrent_builds}
|
builds: []ScheduledBuild{len: max_concurrent_builds}
|
||||||
logger: logger
|
logger: logger
|
||||||
|
@ -55,6 +60,7 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st
|
||||||
// Initialize the repos & queue
|
// Initialize the repos & queue
|
||||||
d.renew_repos() ?
|
d.renew_repos() ?
|
||||||
d.renew_queue() ?
|
d.renew_queue() ?
|
||||||
|
d.rebuild_base_image() ?
|
||||||
|
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
@ -78,7 +84,15 @@ pub fn (mut d Daemon) run() ? {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO rebuild builder image when needed
|
// TODO remove old builder images.
|
||||||
|
// This issue is less trivial than it sounds, because a build could
|
||||||
|
// still be running when the image has to be rebuilt. That would
|
||||||
|
// prevent the image from being removed. Therefore, we will need to
|
||||||
|
// keep track of a list or something & remove an image once we have
|
||||||
|
// made sure it isn't being used anymore.
|
||||||
|
if time.now() >= d.image_build_timestamp {
|
||||||
|
d.rebuild_base_image() ?
|
||||||
|
}
|
||||||
|
|
||||||
// Schedules new builds when possible
|
// Schedules new builds when possible
|
||||||
d.start_new_builds() ?
|
d.start_new_builds() ?
|
||||||
|
@ -170,3 +184,10 @@ fn (mut d Daemon) renew_queue() ? {
|
||||||
d.schedule_build(id, repo) ?
|
d.schedule_build(id, repo) ?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn (mut d Daemon) rebuild_base_image() ? {
|
||||||
|
d.linfo("Rebuilding builder image....")
|
||||||
|
|
||||||
|
d.builder_image = build.create_build_image(d.base_image) ?
|
||||||
|
d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency)
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue