Compare commits

...

4 Commits

9 changed files with 172 additions and 95 deletions

View File

@ -7,7 +7,5 @@ pipeline:
lint:
image: 'chewingbever/vlang:latest'
pull: true
group: lint
commands:
- make lint
- make vet

View File

@ -53,16 +53,15 @@ run-prod: prod
.PHONY: lint
lint:
$(V) fmt -verify $(SRC_DIR)
$(V) vet -W $(SRC_DIR)
$(V_PATH) missdoc -p $(SRC_DIR)
@ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ]
# Format the V codebase
.PHONY: fmt
fmt:
$(V) fmt -w $(SRC_DIR)
.PHONY: vet
vet:
$(V) vet -W $(SRC_DIR)
.PHONY: test
test:
$(V) test $(SRC_DIR)

View File

@ -10,7 +10,7 @@ const container_build_dir = '/build'
const build_image_repo = 'vieter-build'
fn create_build_image(base_image string) ?string {
pub fn create_build_image(base_image string) ?string {
commands := [
// Update repos & install required packages
'pacman -Syu --needed --noconfirm base-devel git'
@ -53,12 +53,13 @@ fn create_build_image(base_image string) ?string {
break
}
// Wait for 5 seconds
time.sleep(5000000000)
time.sleep(1 * time.second)
}
// Finally, we create the image from the container
// As the tag, we use the epoch value
// TODO also add the base image's name into the image name to prevent
// conflicts.
tag := time.sys_mono_now().str()
image := docker.create_image_from_container(id, 'vieter-build', tag) ?
docker.remove_container(id) ?
@ -66,6 +67,52 @@ fn create_build_image(base_image string) ?string {
return image.id
}
pub fn build_repo(address string, api_key string, base_image_id string, repo &git.GitRepo) ? {
build_arch := os.uname().machine
// TODO what to do with PKGBUILDs that build multiple packages?
commands := [
'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
'cd repo',
'makepkg --nobuild --nodeps',
'source PKGBUILD',
// The build container checks whether the package is already
// present on the server
'curl --head --fail $address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $address/$repo.repo/publish; done',
]
// We convert the list of commands into a base64 string, which then gets
// passed to the container as an env var
cmds_str := base64.encode_str(commands.join('\n'))
c := docker.NewContainer{
image: '$base_image_id'
env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$api_key']
entrypoint: ['/bin/sh', '-c']
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e']
work_dir: '/build'
user: 'builder:builder'
}
id := docker.create_container(c) ?
docker.start_container(id) ?
// This loop waits until the container has stopped, so we can remove it after
for {
data := docker.inspect_container(id) ?
if !data.state.running {
break
}
// Wait for 5 seconds
time.sleep(1 * time.second)
}
docker.remove_container(id) ?
}
fn build(conf Config) ? {
build_arch := os.uname().machine
@ -85,47 +132,7 @@ fn build(conf Config) ? {
image_id := create_build_image(conf.base_image) ?
for repo in filtered_repos {
// TODO what to do with PKGBUILDs that build multiple packages?
commands := [
'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
'cd repo',
'makepkg --nobuild --nodeps',
'source PKGBUILD',
// The build container checks whether the package is already
// present on the server
'curl --head --fail $conf.address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $conf.address/$repo.repo/publish; done',
]
// We convert the list of commands into a base64 string, which then gets
// passed to the container as an env var
cmds_str := base64.encode_str(commands.join('\n'))
c := docker.NewContainer{
image: '$image_id'
env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$conf.api_key']
entrypoint: ['/bin/sh', '-c']
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e']
work_dir: '/build'
user: 'builder:builder'
}
id := docker.create_container(c) ?
docker.start_container(id) ?
// This loop waits until the container has stopped, so we can remove it after
for {
data := docker.inspect_container(id) ?
if !data.state.running {
break
}
// Wait for 5 seconds
time.sleep(5000000000)
}
docker.remove_container(id) ?
build_repo(conf.address, conf.api_key, image_id, repo) ?
}
// Finally, we remove the builder image

View File

@ -5,13 +5,14 @@ import env
struct Config {
pub:
log_level string = 'WARN'
log_file string = 'vieter.log'
api_key string
address string
base_image string = 'archlinux:base-devel'
max_concurrent_builds int = 1
api_update_frequency int = 15
log_level string = 'WARN'
log_file string = 'vieter.log'
api_key string
address string
base_image string = 'archlinux:base-devel'
max_concurrent_builds int = 1
api_update_frequency int = 15
image_rebuild_frequency int = 1440
// Replicates the behavior of the original cron system
global_schedule string = '0 3'
}

View File

@ -23,7 +23,7 @@ pub fn cron(conf Config) ? {
}
mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
conf.max_concurrent_builds, conf.api_update_frequency) ?
conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency) ?
d.run() ?
}

View File

@ -2,6 +2,7 @@ module daemon
import time
import sync.stdatomic
import build
const build_empty = 0
@ -9,21 +10,23 @@ const build_running = 1
const build_done = 2
// reschedule_builds looks for any builds with status code 2 & re-adds them to
// the queue.
fn (mut d Daemon) reschedule_builds() ? {
// clean_finished_builds removes finished builds from the build slots & returns
// them.
fn (mut d Daemon) clean_finished_builds() ?[]ScheduledBuild {
mut out := []ScheduledBuild{}
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
sb := d.builds[i]
d.schedule_build(sb.repo_id, sb.repo) ?
out << d.builds[i]
}
}
return out
}
// update_builds starts as many builds as possible.
fn (mut d Daemon) update_builds() ? {
fn (mut d Daemon) start_new_builds() ? {
now := time.now()
for d.queue.len() > 0 {
@ -31,8 +34,8 @@ fn (mut d Daemon) update_builds() ? {
sb := d.queue.pop() ?
// If this build couldn't be scheduled, no more will be possible.
// TODO a build that couldn't be scheduled should be re-added to the queue.
if !d.start_build(sb) {
d.queue.insert(sb)
break
}
} else {
@ -59,8 +62,22 @@ fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
// run_build actually starts the build process for a given repo.
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? {
d.linfo('build $sb.repo.url')
time.sleep(10 * time.second)
d.linfo('started build: $sb.repo.url $sb.repo.branch')
build.build_repo(d.address, d.api_key, d.builder_images.last(), &sb.repo) ?
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
}
// current_build_count returns how many builds are currently running.
fn (mut d Daemon) current_build_count() int {
mut res := 0
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_running {
res += 1
}
}
return res
}

View File

@ -6,7 +6,8 @@ import log
import datatypes { MinHeap }
import cron.expression { CronExpression, parse_expression }
import math
import arrays
import build
import docker
struct ScheduledBuild {
pub:
@ -21,16 +22,19 @@ fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
pub struct Daemon {
mut:
address string
api_key string
base_image string
global_schedule CronExpression
api_update_frequency int
address string
api_key string
base_image string
builder_images []string
global_schedule CronExpression
api_update_frequency int
image_rebuild_frequency int
// Repos currently loaded from API.
repos_map map[string]git.GitRepo
// At what point to update the list of repositories.
api_update_timestamp time.Time
queue MinHeap<ScheduledBuild>
api_update_timestamp time.Time
image_build_timestamp time.Time
queue MinHeap<ScheduledBuild>
// Which builds are currently running
builds []ScheduledBuild
// Atomic variables used to detect when a build has finished; length is the
@ -41,13 +45,14 @@ mut:
// init_daemon initializes a new Daemon object. It renews the repositories &
// populates the build queue for the first time.
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int) ?Daemon {
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon {
mut d := Daemon{
address: address
api_key: api_key
base_image: base_image
global_schedule: global_schedule
api_update_frequency: api_update_frequency
image_rebuild_frequency: image_rebuild_frequency
atomics: []u64{len: max_concurrent_builds}
builds: []ScheduledBuild{len: max_concurrent_builds}
logger: logger
@ -56,6 +61,7 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st
// Initialize the repos & queue
d.renew_repos() ?
d.renew_queue() ?
d.rebuild_base_image() ?
return d
}
@ -64,40 +70,62 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st
// periodically refreshes the list of repositories to ensure we stay in sync.
pub fn (mut d Daemon) run() ? {
for {
finished_builds := d.clean_finished_builds() ?
// Update the API's contents if needed & renew the queue
if time.now() >= d.api_update_timestamp {
d.renew_repos() ?
d.renew_queue() ?
}
// The finished builds should only be rescheduled if the API contents
// haven't been renewed.
else {
for sb in finished_builds {
d.schedule_build(sb.repo_id, sb.repo) ?
}
}
// Cleans up finished builds, opening up spots for new builds
d.reschedule_builds() ?
// TODO rebuild builder image when needed
// TODO remove old builder images.
// This issue is less trivial than it sounds, because a build could
// still be running when the image has to be rebuilt. That would
// prevent the image from being removed. Therefore, we will need to
// keep track of a list or something & remove an image once we have
// made sure it isn't being used anymore.
if time.now() >= d.image_build_timestamp {
d.rebuild_base_image() ?
// In theory, executing this function here allows an old builder
// image to exist for at most image_rebuild_frequency minutes.
d.clean_old_base_images()
}
// Schedules new builds when possible
d.update_builds() ?
d.start_new_builds() ?
// If there are builds currently running, the daemon should refresh
// every second to clean up any finished builds & start new ones.
mut delay := time.Duration(1 * time.second)
// Sleep either until we have to refresh the repos or when the next
// build has to start, with a minimum of 1 second.
now := time.now()
if d.current_build_count() == 0 {
now := time.now()
delay = d.api_update_timestamp - now
mut delay := d.api_update_timestamp - now
if d.queue.len() > 0 {
time_until_next_job := d.queue.peek() ?.timestamp - now
if d.queue.len() > 0 {
time_until_next_job := d.queue.peek() ?.timestamp - now
delay = math.min(delay, time_until_next_job)
delay = math.min(delay, time_until_next_job)
}
}
d.ldebug('Sleeping for ${delay}...')
// TODO if there are builds active, the sleep time should be much lower to clean up the builds when they're finished.
// We sleep for at least one second. This is to prevent the program
// from looping agressively when a cronjob can be scheduled, but
// there's no spots free for it to be started.
time.sleep(math.max(delay, 1 * time.second))
delay = math.max(delay, 1 * time.second)
d.ldebug('Sleeping for ${delay}...')
time.sleep(delay)
}
}
@ -120,7 +148,7 @@ fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? {
}
fn (mut d Daemon) renew_repos() ? {
d.ldebug('Renewing repos...')
d.linfo('Renewing repos...')
mut new_repos := git.get_repos(d.address, d.api_key) ?
d.repos_map = new_repos.move()
@ -131,7 +159,7 @@ fn (mut d Daemon) renew_repos() ? {
// renew_queue replaces the old queue with a new one that reflects the newest
// values in repos_map.
fn (mut d Daemon) renew_queue() ? {
d.ldebug('Renewing queue...')
d.linfo('Renewing queue...')
mut new_queue := MinHeap<ScheduledBuild>{}
// Move any jobs that should have already started from the old queue onto
@ -160,3 +188,25 @@ fn (mut d Daemon) renew_queue() ? {
d.schedule_build(id, repo) ?
}
}
fn (mut d Daemon) rebuild_base_image() ? {
d.linfo('Rebuilding builder image....')
d.builder_images << build.create_build_image(d.base_image) ?
d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency)
}
fn (mut d Daemon) clean_old_base_images() {
mut i := 0
for i < d.builder_images.len - 1 {
// For each builder image, we try to remove it by calling the Docker
// API. If the function returns an error or false, that means the image
// wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't
// move the index.
if !docker.remove_image(d.builder_images[i]) or { false } {
i += 1
}
}
}

View File

@ -114,6 +114,8 @@ pub fn (ce &CronExpression) next(ref time.Time) ?time.Time {
})
}
// next_from_now returns the result of ce.next(ref) where ref is the result of
// time.now().
pub fn (ce &CronExpression) next_from_now() ?time.Time {
return ce.next(time.now())
}

View File

@ -10,4 +10,7 @@ default_arch = "x86_64"
address = "http://localhost:8000"
global_schedule = '* *'
api_update_frequency = 2
image_rebuild_frequency = 1
max_concurrent_builds = 3