Compare commits

..

1 Commits

Author SHA1 Message Date
Jef Roosens 7ba1727a9f
fix(web): don't log new metric for every query param 2023-02-19 16:35:07 +01:00
15 changed files with 50 additions and 96 deletions

View File

@ -7,26 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/vieter-v/vieter/src/branch/dev)
## [0.6.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.6.0)
### Added
* Metrics endpoint for Prometheus integration
* Search in list of targets using API & CLI
* Allow filtering targets by arch value
* Configurable global timeout for builds
### Changed
* Rewrote cron expression logic in C
* Updated codebase to V commit after 0.3.3
* Agents now use worker threads and no longer spawn a new thread for every
build
### Fixed
* Package upload now fails if TCP connection is closed before all bytes have
been received
### Removed

View File

@ -3,7 +3,7 @@
pkgbase='vieter'
pkgname='vieter'
pkgver='0.6.0'
pkgver='0.5.0'
pkgrel=1
pkgdesc="Lightweight Arch repository server & package build system"
depends=('glibc' 'openssl' 'libarchive' 'sqlite')
@ -11,23 +11,13 @@ makedepends=('git' 'vieter-vlang')
arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter-v/vieter'
license=('AGPL3')
source=(
"$pkgname::git+https://git.rustybever.be/vieter-v/vieter#tag=${pkgver//_/-}"
"libvieter::git+https://git.rustybever.be/vieter-v/libvieter"
)
md5sums=('SKIP' 'SKIP')
source=("$pkgname::git+https://git.rustybever.be/vieter-v/vieter#tag=${pkgver//_/-}")
md5sums=('SKIP')
prepare() {
cd "${pkgname}"
export VMODULES="$srcdir/.vmodules"
# Add the libvieter submodule
git submodule init
git config submodules.src/libvieter.url "${srcdir}/libvieter"
git -c protocol.file.allow=always submodule update
export VMODULES="${srcdir}/.vmodules"
cd src && v install
cd "$pkgname/src" && v install
}
build() {

View File

@ -20,13 +20,11 @@ struct AgentDaemon {
client client.Client
mut:
images ImageManager
// Which builds are currently running; length is conf.max_concurrent_builds
builds []BuildConfig
// Atomic variables used to detect when a build has finished; length is
// conf.max_concurrent_builds. This approach is used as the difference
// between a recently finished build and an empty build slot is important
// for knowing whether the agent is currently "active".
// conf.max_concurrent_builds
atomics []u64
// Channel used to send builds to worker threads
build_channel chan BuildConfig
}
// agent_init initializes a new agent
@ -36,8 +34,8 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon {
client: client.new(conf.address, conf.api_key)
conf: conf
images: new_image_manager(conf.image_rebuild_frequency * 60)
builds: []BuildConfig{len: conf.max_concurrent_builds}
atomics: []u64{len: conf.max_concurrent_builds}
build_channel: chan BuildConfig{cap: conf.max_concurrent_builds}
}
return d
@ -45,11 +43,6 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon {
// run starts the actual agent daemon. This function will run forever.
pub fn (mut d AgentDaemon) run() {
// Spawn worker threads
for builder_index in 0 .. d.conf.max_concurrent_builds {
spawn d.builder_thread(d.build_channel, builder_index)
}
// This is just so that the very first time the loop is ran, the jobs are
// always polled
mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency)
@ -114,10 +107,10 @@ pub fn (mut d AgentDaemon) run() {
// It's technically still possible that the build image is
// removed in the very short period between building the
// builder image and starting a build container with it. If
// this happens, fate really just didn't want you to do this
// this happens, faith really just didn't want you to do this
// build.
d.build_channel <- config
d.start_build(config)
running++
}
}
@ -154,6 +147,22 @@ fn (mut d AgentDaemon) update_atomics() (int, int) {
return finished, empty
}
// start_build starts a build for the given BuildConfig.
fn (mut d AgentDaemon) start_build(config BuildConfig) bool {
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty {
stdatomic.store_u64(&d.atomics[i], agent.build_running)
d.builds[i] = config
spawn d.run_build(i, config)
return true
}
}
return false
}
// run_build actually starts the build process for a given target.
fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
d.linfo('started build: ${config}')
@ -186,12 +195,3 @@ fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
stdatomic.store_u64(&d.atomics[build_index], agent.build_done)
}
// builder_thread is a thread that constantly listens for builds to process
fn (mut d AgentDaemon) builder_thread(ch chan BuildConfig, builder_index int) {
for {
build_config := <-ch or { break }
d.run_build(builder_index, build_config)
}
}

View File

@ -94,8 +94,8 @@ pub:
}
// build_target builds the given target. Internally it calls `build_config`.
pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool, timeout int) !BuildResult {
config := target.as_build_config(base_image_id, force, timeout)
pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool) !BuildResult {
config := target.as_build_config(base_image_id, force)
return build_config(address, api_key, config)
}
@ -136,17 +136,9 @@ pub fn build_config(address string, api_key string, config BuildConfig) !BuildRe
dd.container_start(id)!
mut data := dd.container_inspect(id)!
start_time := time.now()
// This loop waits until the container has stopped, so we can remove it after
for data.state.running {
if time.now() - start_time > config.timeout * time.second {
dd.container_kill(id)!
dd.container_remove(id)!
return error('Build killed due to timeout (${config.timeout}s)')
}
time.sleep(1 * time.second)
data = dd.container_inspect(id)!

View File

@ -33,8 +33,6 @@ pub struct BuildJobQueue {
default_schedule &cron.Expression
// Base image to use for targets without defined base image
default_base_image string
// After how many minutes a build should be forcefully cancelled
default_build_timeout int
mut:
mutex shared util.Dummy
// For each architecture, a priority queue is tracked
@ -46,11 +44,10 @@ mut:
}
// new_job_queue initializes a new job queue
pub fn new_job_queue(default_schedule &cron.Expression, default_base_image string, default_build_timeout int) BuildJobQueue {
pub fn new_job_queue(default_schedule &cron.Expression, default_base_image string) BuildJobQueue {
return BuildJobQueue{
default_schedule: unsafe { default_schedule }
default_base_image: default_base_image
default_build_timeout: default_build_timeout
invalidated: map[int]time.Time{}
}
}
@ -83,7 +80,7 @@ pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! {
mut job := BuildJob{
created: time.now()
single: input.single
config: input.target.as_build_config(q.default_base_image, input.force, q.default_build_timeout)
config: input.target.as_build_config(q.default_base_image, input.force)
}
if !input.now {

View File

@ -6,7 +6,7 @@ import os
import build
// build locally builds the target with the given id.
fn build_target(conf Config, target_id int, force bool, timeout int) ! {
fn build_target(conf Config, target_id int, force bool) ! {
c := client.new(conf.address, conf.api_key)
target := c.get_target(target_id)!
@ -16,7 +16,7 @@ fn build_target(conf Config, target_id int, force bool, timeout int) ! {
image_id := build.create_build_image(conf.base_image)!
println('Running build...')
res := build.build_target(conf.address, conf.api_key, image_id, target, force, timeout)!
res := build.build_target(conf.address, conf.api_key, image_id, target, force)!
println('Removing build image...')

View File

@ -232,12 +232,6 @@ pub fn cmd() cli.Command {
description: 'Architecture to schedule build for. Required when using -remote.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'timeout'
description: 'After how many minutes to cancel the build. Only applies to local builds.'
flag: cli.FlagType.int
default_value: ['3600']
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
@ -245,7 +239,6 @@ pub fn cmd() cli.Command {
remote := cmd.flags.get_bool('remote')!
force := cmd.flags.get_bool('force')!
timeout := cmd.flags.get_int('timeout')!
target_id := cmd.args[0].int()
if remote {
@ -258,7 +251,7 @@ pub fn cmd() cli.Command {
c := client.new(conf_.address, conf_.api_key)
c.queue_job(target_id, arch, force)!
} else {
build_target(conf_, target_id, force, timeout)!
build_target(conf_, target_id, force)!
}
}
},

View File

@ -20,7 +20,7 @@ fn main() {
mut app := cli.Command{
name: 'vieter'
description: 'Vieter is a lightweight implementation of an Arch repository server.'
version: '0.6.0'
version: '0.5.0'
posix_mode: true
flags: [
cli.Flag{

View File

@ -10,10 +10,9 @@ pub:
repo string
base_image string
force bool
timeout int
}
// str return a single-line string representation of a build log
pub fn (c BuildConfig) str() string {
return '{ target: ${c.target_id}, kind: ${c.kind}, url: ${c.url}, branch: ${c.branch}, path: ${c.path}, repo: ${c.repo}, base_image: ${c.base_image}, force: ${c.force}, timeout: ${c.timeout} }'
return '{ target: ${c.target_id}, kind: ${c.kind}, url: ${c.url}, branch: ${c.branch}, path: ${c.path}, repo: ${c.repo}, base_image: ${c.base_image}, force: ${c.force} }'
}

View File

@ -54,7 +54,7 @@ pub fn (t &Target) str() string {
// as_build_config converts a Target into a BuildConfig, given some extra
// needed information.
pub fn (t &Target) as_build_config(base_image string, force bool, timeout int) BuildConfig {
pub fn (t &Target) as_build_config(base_image string, force bool) BuildConfig {
return BuildConfig{
target_id: t.id
kind: t.kind
@ -64,7 +64,6 @@ pub fn (t &Target) as_build_config(base_image string, force bool, timeout int) B
repo: t.repo
base_image: base_image
force: force
timeout: timeout
}
}

View File

@ -16,7 +16,6 @@ pub:
max_log_age int [empty_default]
log_removal_schedule string = '0 0'
collect_metrics bool [empty_default]
default_build_timeout int = 3600
}
// cmd returns the cli submodule that handles starting the server

View File

@ -68,7 +68,7 @@ fn (mut app App) put_package(repo_ string) web.Result {
mut sw := time.new_stopwatch(time.StopWatchOptions{ auto_start: true })
util.reader_to_file(mut app.reader, length.int(), pkg_path) or {
app.lwarn("Failed to upload '${pkg_path}': ${err.msg()}")
app.lwarn("Failed to upload '${pkg_path}'")
return app.status(.internal_server_error)
}

View File

@ -108,7 +108,7 @@ pub fn server(conf Config) ! {
repo: repo_
db: db
collector: collector
job_queue: build.new_job_queue(global_ce, conf.base_image, conf.default_build_timeout)
job_queue: build.new_job_queue(global_ce, conf.base_image)
}
app.init_job_queue() or {
util.exit_with_message(1, 'Failed to inialize job queue: ${err.msg()}')

View File

@ -46,10 +46,6 @@ pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ! {
to_write = to_write - bytes_written
}
}
if bytes_left > 0 {
return error('Not all bytes were received.')
}
}
// match_array_in_array[T] returns how many elements of a2 overlap with a1. For

View File

@ -331,7 +331,6 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
app.logger.flush()
}
// Record how long request took to process
path := urllib.parse(app.req.url) or { urllib.URL{} }.path
labels := [
['method', app.req.method.str()]!,