From 132a7a8ba5a80938993db7fa712048ea885283c4 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Wed, 13 Apr 2022 14:51:01 +0200 Subject: [PATCH 01/21] Added int support to env; fixed apparently broken defaults --- src/env/env.v | 40 +++++++++++++++++++++++++++------------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/src/env/env.v b/src/env/env.v index cbde67e..0124850 100644 --- a/src/env/env.v +++ b/src/env/env.v @@ -55,27 +55,41 @@ pub fn load(path string) ?T { $for field in T.fields { s := doc.value(field.name) - // We currently only support strings - if s.type_name() == 'string' { - res.$(field.name) = s.string() + if s !is toml.Null { + $if field.typ is string { + res.$(field.name) = s.string() + }$else $if field.typ is int { + res.$(field.name) = s.int() + } } } } $for field in T.fields { - $if field.typ is string { - env_value := get_env_var(field.name) ? + env_value := get_env_var(field.name) ? - // The value of the env var will always be chosen over the config - // file - if env_value != '' { + // The value of an env var will always take precedence over the toml + // file. + if env_value != '' { + $if field.typ is string { res.$(field.name) = env_value + } $else $if field.typ is int { + res.$(field.name) = env_value.int() } - // If there's no value from the toml file either, we try to find a - // default value - else if res.$(field.name) == '' { - return error("Missing config variable '$field.name' with no provided default. Either add it to the config file or provide it using an environment variable.") - } + } + + // Now, we check whether a value is present. If there isn't, that means + // it isn't in the config file, nor is there a default or an env var. + mut has_value := false + + $if field.typ is string { + has_value = res.$(field.name) != '' + } $else $if field.typ is int { + has_value = res.$(field.name) != 0 + } + + if !has_value { + return error("Missing config variable '$field.name' with no provided default. Either add it to the config file or provide it using an environment variable.") } } return res From ff57d7399838e5f6edb2c26d46812ddb35d9da01 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Wed, 13 Apr 2022 15:24:55 +0200 Subject: [PATCH 02/21] Start of daemon (not working) [CI SKIP] --- src/cron/cli.v | 3 +++ src/cron/cron.v | 26 +++++++++++-------- src/cron/daemon/daemon.v | 54 ++++++++++++++++++++++++++++++++++++++++ src/cron/daemon/log.v | 35 ++++++++++++++++++++++++++ src/cron/expression.v | 2 +- src/git/git.v | 2 ++ 6 files changed, 111 insertions(+), 11 deletions(-) create mode 100644 src/cron/daemon/daemon.v create mode 100644 src/cron/daemon/log.v diff --git a/src/cron/cli.v b/src/cron/cli.v index 8e6b0f1..4d2b133 100644 --- a/src/cron/cli.v +++ b/src/cron/cli.v @@ -10,6 +10,9 @@ pub: api_key string address string base_image string = 'archlinux:base-devel' + max_concurrent_builds int = 1 + api_update_frequency int = 60 + global_schedule string } // cmd returns the cli module that handles the cron daemon. diff --git a/src/cron/cron.v b/src/cron/cron.v index 3ba9d0f..3d3ea9a 100644 --- a/src/cron/cron.v +++ b/src/cron/cron.v @@ -2,17 +2,23 @@ module cron import git import time - -struct ScheduledBuild { - repo git.GitRepo - timestamp time.Time -} - -fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool { - return r1.timestamp < r2.timestamp -} +import log +import util +import cron.daemon // cron starts a cron daemon & starts periodically scheduling builds. pub fn cron(conf Config) ? { - println('WIP') + // Configure logger + log_level := log.level_from_tag(conf.log_level) or { + util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') + } + + mut logger := log.Log{ + level: log_level + } + + logger.set_full_logpath(conf.log_file) + logger.log_to_console_too() + + d := daemon.init(conf) } diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v new file mode 100644 index 0000000..a887717 --- /dev/null +++ b/src/cron/daemon/daemon.v @@ -0,0 +1,54 @@ +module daemon + +import git +import time +import log +import datatypes + +struct ScheduledBuild { + repo git.GitRepo + timestamp time.Time +} + +fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool { + return r1.timestamp < r2.timestamp +} + +pub struct Daemon { +mut: + conf Config + // Repos currently loaded from API. + repos_map map[string]git.GitRepo + // At what point to update the list of repositories. + api_update_timestamp time.Time + queue datatypes.MinHeap + // Which builds are currently running + builds []git.GitRepo + // Atomic variables used to detect when a build has finished; length is the + // same as builds + atomics []u64 + logger shared log.Log +} + +// init +pub fn init(conf Config) Daemon { + return Daemon{ + conf: conf + atomics: [conf.max_concurrent_builds]u64{} + } +} + +fn (mut d Daemon) run() ? { + d.renew_repos() ? + d.renew_queue() ? +} + +fn (mut d Daemon) renew_repos() ? { + mut new_repos := git.get_repos(d.conf.address, d.conf.api_key) ? + + d.repos_map = new_repos.move() +} + +fn (mut d Daemon) renew_queue() ? { + +} diff --git a/src/cron/daemon/log.v b/src/cron/daemon/log.v new file mode 100644 index 0000000..003898b --- /dev/null +++ b/src/cron/daemon/log.v @@ -0,0 +1,35 @@ +module daemon + +import log + +// log reate a log message with the given level +pub fn (mut d Daemon) log(msg &string, level log.Level) { + lock d.logger { + d.logger.send_output(msg, level) + } +} + +// lfatal create a log message with the fatal level +pub fn (mut d Daemon) lfatal(msg &string) { + d.log(msg, log.Level.fatal) +} + +// lerror create a log message with the error level +pub fn (mut d Daemon) lerror(msg &string) { + d.log(msg, log.Level.error) +} + +// lwarn create a log message with the warn level +pub fn (mut d Daemon) lwarn(msg &string) { + d.log(msg, log.Level.warn) +} + +// linfo create a log message with the info level +pub fn (mut d Daemon) linfo(msg &string) { + d.log(msg, log.Level.info) +} + +// ldebug create a log message with the debug level +pub fn (mut d Daemon) ldebug(msg &string) { + d.log(msg, log.Level.debug) +} diff --git a/src/cron/expression.v b/src/cron/expression.v index 0a35541..b35c568 100644 --- a/src/cron/expression.v +++ b/src/cron/expression.v @@ -241,7 +241,7 @@ fn parse_expression(exp string) ?CronExpression { // This for loop allows us to more clearly propagate the error to the user. for i, min in mins { part_results << parse_part(parts[i], min, maxs[i]) or { - return error('An error occurred with part $i: $err.msg') + return error('An error occurred with part $i: $err.msg()') } } diff --git a/src/git/git.v b/src/git/git.v index eaec895..45aed60 100644 --- a/src/git/git.v +++ b/src/git/git.v @@ -14,6 +14,8 @@ pub mut: arch []string // Which repo the builder should publish packages to repo string + // Cron schedule describing how frequently to build the repo. + schedule string } // patch_from_params patches a GitRepo from a map[string]string, usually From f7e1aba30bc8d95c28632ace658998804f60763f Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Wed, 13 Apr 2022 16:12:22 +0200 Subject: [PATCH 03/21] Attempt at writing renew_queue function; seems to just stop in the middle --- src/cron/cli.v | 16 ++--- src/cron/cron.v | 12 +++- src/cron/daemon/daemon.v | 72 +++++++++++++++---- src/cron/{ => expression}/expression.v | 6 +- .../{ => expression}/expression_parse_test.v | 2 +- src/cron/{ => expression}/expression_test.v | 2 +- src/env/env.v | 2 +- src/git/git.v | 4 +- src/v.mod | 0 vieter.toml | 3 + 10 files changed, 89 insertions(+), 30 deletions(-) rename src/cron/{ => expression}/expression.v (98%) rename src/cron/{ => expression}/expression_parse_test.v (99%) rename src/cron/{ => expression}/expression_test.v (97%) create mode 100644 src/v.mod diff --git a/src/cron/cli.v b/src/cron/cli.v index 4d2b133..f4b20ec 100644 --- a/src/cron/cli.v +++ b/src/cron/cli.v @@ -5,14 +5,14 @@ import env struct Config { pub: - log_level string = 'WARN' - log_file string = 'vieter.log' - api_key string - address string - base_image string = 'archlinux:base-devel' - max_concurrent_builds int = 1 - api_update_frequency int = 60 - global_schedule string + log_level string = 'WARN' + log_file string = 'vieter.log' + api_key string + address string + base_image string = 'archlinux:base-devel' + max_concurrent_builds int = 1 + api_update_frequency int = 60 + global_schedule string } // cmd returns the cli module that handles the cron daemon. diff --git a/src/cron/cron.v b/src/cron/cron.v index 3d3ea9a..d8b4d95 100644 --- a/src/cron/cron.v +++ b/src/cron/cron.v @@ -5,12 +5,13 @@ import time import log import util import cron.daemon +import cron.expression // cron starts a cron daemon & starts periodically scheduling builds. pub fn cron(conf Config) ? { // Configure logger log_level := log.level_from_tag(conf.log_level) or { - util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') + return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') } mut logger := log.Log{ @@ -20,5 +21,12 @@ pub fn cron(conf Config) ? { logger.set_full_logpath(conf.log_file) logger.log_to_console_too() - d := daemon.init(conf) + ce := expression.parse_expression(conf.global_schedule) or { + return error('Error while parsing global cron expression: $err.msg()') + } + + mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce, + conf.max_concurrent_builds, conf.api_update_frequency) ? + + d.run() ? } diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index a887717..ede9320 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -3,9 +3,12 @@ module daemon import git import time import log -import datatypes +import datatypes { MinHeap } +import cron.expression { CronExpression, parse_expression } struct ScheduledBuild { +pub: + repo_id string repo git.GitRepo timestamp time.Time } @@ -16,39 +19,84 @@ fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool { pub struct Daemon { mut: - conf Config + address string + api_key string + base_image string + global_schedule CronExpression + api_update_frequency int // Repos currently loaded from API. repos_map map[string]git.GitRepo // At what point to update the list of repositories. api_update_timestamp time.Time - queue datatypes.MinHeap + queue MinHeap // Which builds are currently running builds []git.GitRepo // Atomic variables used to detect when a build has finished; length is the // same as builds atomics []u64 - logger shared log.Log + logger shared log.Log } -// init -pub fn init(conf Config) Daemon { - return Daemon{ - conf: conf - atomics: [conf.max_concurrent_builds]u64{} +pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int) ?Daemon { + mut d := Daemon{ + address: address + api_key: api_key + base_image: base_image + global_schedule: global_schedule + api_update_frequency: api_update_frequency + atomics: []u64{len: max_concurrent_builds} + builds: []git.GitRepo{len: max_concurrent_builds} + logger: logger } -} -fn (mut d Daemon) run() ? { + // Initialize the repos & queue d.renew_repos() ? d.renew_queue() ? + + return d +} + +pub fn (mut d Daemon) run() ? { + println(d.queue) } fn (mut d Daemon) renew_repos() ? { - mut new_repos := git.get_repos(d.conf.address, d.conf.api_key) ? + mut new_repos := git.get_repos(d.address, d.api_key) ? d.repos_map = new_repos.move() + + d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency) } +// renew_queue replaces the old queue with a new one that reflects the newest +// values in repos_map. fn (mut d Daemon) renew_queue() ? { + mut new_queue := MinHeap{} + // Move any jobs that should have already started from the old queue onto + // the new one + now := time.now() + + for d.queue.len() > 0 && d.queue.peek() ?.timestamp < now { + new_queue.insert(d.queue.pop() ?) + } + + println('hey') + println(d.repos_map) + // For each repository in repos_map, parse their cron expression (or use + // the default one if not present) & add them to the queue + for id, repo in d.repos_map { + println('hey') + ce := parse_expression(repo.schedule) or { d.global_schedule } + // A repo that can't be scheduled will just be skipped for now + timestamp := ce.next(now) or { continue } + + new_queue.insert(ScheduledBuild{ + repo_id: id + repo: repo + timestamp: timestamp + }) + } + + d.queue = new_queue } diff --git a/src/cron/expression.v b/src/cron/expression/expression.v similarity index 98% rename from src/cron/expression.v rename to src/cron/expression/expression.v index b35c568..c122585 100644 --- a/src/cron/expression.v +++ b/src/cron/expression/expression.v @@ -1,8 +1,8 @@ -module cron +module expression import time -struct CronExpression { +pub struct CronExpression { minutes []int hours []int days []int @@ -219,7 +219,7 @@ fn parse_part(s string, min int, max int) ?[]int { // parse_expression parses an entire cron expression string into a // CronExpression object, if possible. -fn parse_expression(exp string) ?CronExpression { +pub fn parse_expression(exp string) ?CronExpression { // The filter allows for multiple spaces between parts mut parts := exp.split(' ').filter(it != '') diff --git a/src/cron/expression_parse_test.v b/src/cron/expression/expression_parse_test.v similarity index 99% rename from src/cron/expression_parse_test.v rename to src/cron/expression/expression_parse_test.v index 8f3ac38..18531c0 100644 --- a/src/cron/expression_parse_test.v +++ b/src/cron/expression/expression_parse_test.v @@ -1,4 +1,4 @@ -module cron +module expression // parse_range_error returns the returned error message. If the result is '', // that means the function didn't error. diff --git a/src/cron/expression_test.v b/src/cron/expression/expression_test.v similarity index 97% rename from src/cron/expression_test.v rename to src/cron/expression/expression_test.v index 0be9a64..ef0283a 100644 --- a/src/cron/expression_test.v +++ b/src/cron/expression/expression_test.v @@ -1,4 +1,4 @@ -module cron +module expression import time { parse } diff --git a/src/env/env.v b/src/env/env.v index 0124850..88f1650 100644 --- a/src/env/env.v +++ b/src/env/env.v @@ -58,7 +58,7 @@ pub fn load(path string) ?T { if s !is toml.Null { $if field.typ is string { res.$(field.name) = s.string() - }$else $if field.typ is int { + } $else $if field.typ is int { res.$(field.name) = s.int() } } diff --git a/src/git/git.v b/src/git/git.v index 45aed60..2023f34 100644 --- a/src/git/git.v +++ b/src/git/git.v @@ -15,7 +15,7 @@ pub mut: // Which repo the builder should publish packages to repo string // Cron schedule describing how frequently to build the repo. - schedule string + schedule string [optional] } // patch_from_params patches a GitRepo from a map[string]string, usually @@ -74,7 +74,7 @@ pub fn repo_from_params(params map[string]string) ?GitRepo { // If we're creating a new GitRepo, we want all fields to be present before // "patching". $for field in GitRepo.fields { - if field.name !in params { + if field.name !in params && !field.attrs.contains('optional') { return error('Missing parameter: ${field.name}.') } } diff --git a/src/v.mod b/src/v.mod new file mode 100644 index 0000000..e69de29 diff --git a/vieter.toml b/vieter.toml index 8e0447b..e646739 100644 --- a/vieter.toml +++ b/vieter.toml @@ -8,3 +8,6 @@ repos_file = "data/repos.json" default_arch = "x86_64" address = "http://localhost:8000" + +global_schedule = '0 3' + From 78b477fb9254df38656a8e69f4e9989ce8f5b131 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Wed, 13 Apr 2022 22:20:05 +0200 Subject: [PATCH 04/21] Removed deprecated err.msg & err.code --- src/cron/cron.v | 3 --- src/cron/daemon/daemon.v | 7 ++++--- src/env/env.v | 2 +- src/repo/repo.v | 6 +++--- src/server/git.v | 4 ++-- src/server/routes.v | 6 +++--- src/server/server.v | 2 +- src/web/web.v | 8 ++++---- 8 files changed, 18 insertions(+), 20 deletions(-) diff --git a/src/cron/cron.v b/src/cron/cron.v index d8b4d95..cb5bcd7 100644 --- a/src/cron/cron.v +++ b/src/cron/cron.v @@ -1,9 +1,6 @@ module cron -import git -import time import log -import util import cron.daemon import cron.expression diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index ede9320..9931d4f 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -58,6 +58,7 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st pub fn (mut d Daemon) run() ? { println(d.queue) + println('i am running') } fn (mut d Daemon) renew_repos() ? { @@ -81,12 +82,12 @@ fn (mut d Daemon) renew_queue() ? { new_queue.insert(d.queue.pop() ?) } - println('hey') - println(d.repos_map) + eprintln('hey') + eprintln(d.repos_map) // For each repository in repos_map, parse their cron expression (or use // the default one if not present) & add them to the queue for id, repo in d.repos_map { - println('hey') + eprintln('hey') ce := parse_expression(repo.schedule) or { d.global_schedule } // A repo that can't be scheduled will just be skipped for now timestamp := ce.next(now) or { continue } diff --git a/src/env/env.v b/src/env/env.v index 88f1650..b2b5f44 100644 --- a/src/env/env.v +++ b/src/env/env.v @@ -36,7 +36,7 @@ fn get_env_var(field_name string) ?string { // Otherwise, we process the file return os.read_file(env_file) or { - error('Failed to read file defined in $env_file_name: ${err.msg}.') + error('Failed to read file defined in $env_file_name: ${err.msg()}.') } } diff --git a/src/repo/repo.v b/src/repo/repo.v index f439f58..e27e232 100644 --- a/src/repo/repo.v +++ b/src/repo/repo.v @@ -30,11 +30,11 @@ pub: // new creates a new RepoGroupManager & creates the directories as needed pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager { if !os.is_dir(repos_dir) { - os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg') } + os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') } } if !os.is_dir(pkg_dir) { - os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg') } + os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg()') } } return RepoGroupManager{ @@ -50,7 +50,7 @@ pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupMana // the right subdirectories in r.pkg_dir if it was successfully added. pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult { pkg := package.read_pkg_archive(pkg_path) or { - return error('Failed to read package file: $err.msg') + return error('Failed to read package file: $err.msg()') } added := r.add_pkg_in_repo(repo, pkg) ? diff --git a/src/server/git.v b/src/server/git.v index 2a682d8..a9d6f50 100644 --- a/src/server/git.v +++ b/src/server/git.v @@ -16,7 +16,7 @@ fn (mut app App) get_repos() web.Result { repos := rlock app.git_mutex { git.read_repos(app.conf.repos_file) or { - app.lerror('Failed to read repos file: $err.msg') + app.lerror('Failed to read repos file: $err.msg()') return app.status(http.Status.internal_server_error) } @@ -55,7 +55,7 @@ fn (mut app App) post_repo() web.Result { } new_repo := git.repo_from_params(app.query) or { - return app.json(http.Status.bad_request, new_response(err.msg)) + return app.json(http.Status.bad_request, new_response(err.msg())) } id := rand.uuid_v4() diff --git a/src/server/routes.v b/src/server/routes.v index 138f253..4f6c4f0 100644 --- a/src/server/routes.v +++ b/src/server/routes.v @@ -87,15 +87,15 @@ fn (mut app App) put_package(repo string) web.Result { } res := app.repo.add_pkg_from_path(repo, pkg_path) or { - app.lerror('Error while adding package: $err.msg') + app.lerror('Error while adding package: $err.msg()') - os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") } + os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg()") } return app.json(http.Status.internal_server_error, new_response('Failed to add package.')) } if !res.added { - os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") } + os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg()") } app.lwarn("Duplicate package '$res.pkg.full_name()' in repo '$repo'.") diff --git a/src/server/server.v b/src/server/server.v index 5bf9a87..c4317c5 100644 --- a/src/server/server.v +++ b/src/server/server.v @@ -45,7 +45,7 @@ pub fn server(conf Config) ? { // This also creates the directories if needed repo := repo.new(conf.repos_dir, conf.pkg_dir, conf.default_arch) or { - logger.error(err.msg) + logger.error(err.msg()) exit(1) } diff --git a/src/web/web.v b/src/web/web.v index 000c6a6..688f854 100644 --- a/src/web/web.v +++ b/src/web/web.v @@ -249,7 +249,7 @@ pub fn (mut ctx Context) file(f_path string) Result { // ext := os.file_ext(f_path) // data := os.read_file(f_path) or { - // eprint(err.msg) + // eprint(err.msg()) // ctx.server_error(500) // return Result{} // } @@ -267,7 +267,7 @@ pub fn (mut ctx Context) file(f_path string) Result { file_size := os.file_size(f_path) file := os.open(f_path) or { - eprintln(err.msg) + eprintln(err.msg()) ctx.server_error(500) return Result{} } @@ -361,7 +361,7 @@ interface DbInterface { // run runs the app [manualfree] pub fn run(global_app &T, port int) { - mut l := net.listen_tcp(.ip6, ':$port') or { panic('failed to listen $err.code $err') } + mut l := net.listen_tcp(.ip6, ':$port') or { panic('failed to listen $err.code() $err') } // Parsing methods attributes mut routes := map[string]Route{} @@ -393,7 +393,7 @@ pub fn run(global_app &T, port int) { request_app.Context = global_app.Context // copy the context ref that contains static files map etc mut conn := l.accept() or { // failures should not panic - eprintln('accept() failed with error: $err.msg') + eprintln('accept() failed with error: $err.msg()') continue } go handle_conn(mut conn, mut request_app, routes) From c8af362a4aba256fd8cdb13f853ceb15684f2296 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Thu, 14 Apr 2022 20:38:14 +0200 Subject: [PATCH 05/21] Workaround for weird bug --- src/cron/daemon/daemon.v | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 9931d4f..eadd04c 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -37,6 +37,8 @@ mut: logger shared log.Log } +// init_daemon initializes a new Daemon object. It renews the repositories & +// populates the build queue for the first time. pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int) ?Daemon { mut d := Daemon{ address: address @@ -56,6 +58,8 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st return d } +// run starts the actual daemon process. It runs builds when possible & +// periodically refreshes the list of repositories to ensure we stay in sync. pub fn (mut d Daemon) run() ? { println(d.queue) println('i am running') @@ -78,16 +82,23 @@ fn (mut d Daemon) renew_queue() ? { // the new one now := time.now() - for d.queue.len() > 0 && d.queue.peek() ?.timestamp < now { - new_queue.insert(d.queue.pop() ?) + // For some reason, using + // ```v + // for d.queue.len() > 0 && d.queue.peek() ?.timestamp < now { + //``` + // here causes the function to prematurely just exit, without any errors or anything, very weird + // https://github.com/vlang/v/issues/14042 + for d.queue.len() > 0 { + if d.queue.peek() ?.timestamp < now { + new_queue.insert(d.queue.pop() ?) + } else { + break + } } - eprintln('hey') - eprintln(d.repos_map) // For each repository in repos_map, parse their cron expression (or use // the default one if not present) & add them to the queue for id, repo in d.repos_map { - eprintln('hey') ce := parse_expression(repo.schedule) or { d.global_schedule } // A repo that can't be scheduled will just be skipped for now timestamp := ce.next(now) or { continue } From c8fc683384d2fe6b313508d1ae700bd9b36815fa Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Thu, 14 Apr 2022 21:20:10 +0200 Subject: [PATCH 06/21] Eh don't feel like writing scheduler rn --- src/cron/daemon/build.v | 45 ++++++++++++++++++++++++++++++++++++++++ src/cron/daemon/daemon.v | 9 ++++++-- vieter.toml | 2 +- 3 files changed, 53 insertions(+), 3 deletions(-) create mode 100644 src/cron/daemon/build.v diff --git a/src/cron/daemon/build.v b/src/cron/daemon/build.v new file mode 100644 index 0000000..e7e5ac3 --- /dev/null +++ b/src/cron/daemon/build.v @@ -0,0 +1,45 @@ +module daemon + +import git +import time +import sync.stdatomic + +// update_builds starts as many builds as possible. +fn (mut d Daemon) update_builds() ? { + now := time.now() + + for d.queue.len() > 0 { + if d.queue.peek() ?.timestamp < now { + sb := d.queue.pop() ? + + // If this build couldn't be scheduled, no more will be possible. + if !d.start_build(sb.repo_id)? { + break + } + } else { + break + } + } +} + +// start_build starts a build for the given repo_id. +fn (mut d Daemon) start_build(repo_id string) ?bool { + for i in 0..d.atomics.len { + if stdatomic.load_u64(&d.atomics[i]) == 0 { + stdatomic.store_u64(&d.atomics[i], 1) + + go d.run_build(i, d.repos_map[repo_id]) + + return true + } + } + + return false +} + +fn (mut d Daemon) run_build(build_index int, repo git.GitRepo) ? { + time.sleep(10 * time.second) + + stdatomic.store_u64(&d.atomics[build_index], 2) +} + diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index eadd04c..fc917e4 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -61,8 +61,13 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st // run starts the actual daemon process. It runs builds when possible & // periodically refreshes the list of repositories to ensure we stay in sync. pub fn (mut d Daemon) run() ? { - println(d.queue) - println('i am running') + for { + d.update_builds() ? + println(d.queue) + println(d.atomics) + + time.sleep(60 * time.second) + } } fn (mut d Daemon) renew_repos() ? { diff --git a/vieter.toml b/vieter.toml index e646739..452500f 100644 --- a/vieter.toml +++ b/vieter.toml @@ -9,5 +9,5 @@ default_arch = "x86_64" address = "http://localhost:8000" -global_schedule = '0 3' +global_schedule = '* *' From cd8fd786168433c1fe79940e1235cfafcd3e5d15 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Thu, 14 Apr 2022 23:15:19 +0200 Subject: [PATCH 07/21] Added experimental builds to CI --- .woodpecker/.build_experimental.yml | 25 +++++++++++++++++++++++++ Makefile | 21 ++++++++++++++++++--- 2 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 .woodpecker/.build_experimental.yml diff --git a/.woodpecker/.build_experimental.yml b/.woodpecker/.build_experimental.yml new file mode 100644 index 0000000..0d07962 --- /dev/null +++ b/.woodpecker/.build_experimental.yml @@ -0,0 +1,25 @@ +# These builds are not important for the project, but might be valuable for +# fixing bugs in the V compiler. + +platform: linux/amd64 +branches: + exclude: [master, dev] + +pipeline: + autofree: + image: 'chewingbever/vlang:latest' + pull: true + group: 'build' + commands: + - make autofree + when: + event: push + + skip-unused: + image: 'chewingbever/vlang:latest' + pull: true + group: 'build' + commands: + - make skip-unused + when: + event: push diff --git a/Makefile b/Makefile index 9421fb6..6f2921a 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,7 @@ V := $(V_PATH) -showcc -gc boehm all: vieter + # =====COMPILATION===== # Regular binary vieter: $(SOURCES) @@ -33,19 +34,21 @@ pvieter: $(SOURCES) # Only generate C code .PHONY: c -c: +c: $(SOURCES) $(V) -o vieter.c $(SRC_DIR) + # =====EXECUTION===== # Run the server in the default 'data' directory .PHONY: run run: vieter - ./vieter -f vieter.toml server + ./vieter -f vieter.toml server .PHONY: run-prod run-prod: prod ./pvieter -f vieter.toml server + # =====OTHER===== .PHONY: lint lint: @@ -72,4 +75,16 @@ v/v: make -C v clean: - rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' + rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'pkg' 'src/vieter' 'afvieter' 'suvieter' + + +# =====EXPERIMENTAL===== +.PHONY: autofree +autofree: afvieter +afvieter: $(SOURCES) + $(V_PATH) -showcc -autofree -o afvieter $(SRC_DIR) + +.PHONY: skip-unused +skip-unused: suvieter +suvieter: $(SOURCES) + $(V_PATH) -showcc -skip-unused -o suvieter $(SRC_DIR) From cf77037188124f5643048724f5400b165c007603 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Thu, 14 Apr 2022 23:17:52 +0200 Subject: [PATCH 08/21] Some more experimental builds --- .woodpecker/.build_experimental.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.woodpecker/.build_experimental.yml b/.woodpecker/.build_experimental.yml index 0d07962..032a42b 100644 --- a/.woodpecker/.build_experimental.yml +++ b/.woodpecker/.build_experimental.yml @@ -12,6 +12,8 @@ pipeline: group: 'build' commands: - make autofree + - readelf -d afvieter + - du -h afvieter when: event: push @@ -21,5 +23,20 @@ pipeline: group: 'build' commands: - make skip-unused + - readelf -d suvieter + - du -h suvieter + when: + event: push + + skip-unused-static: + image: 'chewingbever/vlang:latest' + pull: true + environment: + - LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static + group: 'build' + commands: + - make skip-unused + - readelf -d suvieter + - du -h suvieter when: event: push From 20707f6af14ebce35476f7ddffa07b60c415ea86 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Fri, 15 Apr 2022 11:38:06 +0200 Subject: [PATCH 09/21] chore(ci): change debug build used chore(ci): removed skip-unused-static experimental build chore: updated Makefile --- .gitignore | 4 ++-- .woodpecker/.build.yml | 5 +++-- .woodpecker/.build_experimental.yml | 13 ------------- Makefile | 4 ++-- 4 files changed, 7 insertions(+), 19 deletions(-) diff --git a/.gitignore b/.gitignore index 7847b3f..a3f6afc 100644 --- a/.gitignore +++ b/.gitignore @@ -5,8 +5,8 @@ data/ vieter dvieter pvieter -dvieterctl -vieterctl +suvieter +afvieter vieter.c # Ignore testing files diff --git a/.woodpecker/.build.yml b/.woodpecker/.build.yml index e68c4c9..c612737 100644 --- a/.woodpecker/.build.yml +++ b/.woodpecker/.build.yml @@ -9,15 +9,16 @@ matrix: platform: ${PLATFORM} pipeline: - # The default build isn't needed, as alpine switches to gcc for the compiler anyways debug: image: 'chewingbever/vlang:latest' pull: true group: 'build' commands: - - make debug + - make when: event: push + branch: + exclude: [main, dev] prod: image: 'chewingbever/vlang:latest' diff --git a/.woodpecker/.build_experimental.yml b/.woodpecker/.build_experimental.yml index 032a42b..0129d2b 100644 --- a/.woodpecker/.build_experimental.yml +++ b/.woodpecker/.build_experimental.yml @@ -27,16 +27,3 @@ pipeline: - du -h suvieter when: event: push - - skip-unused-static: - image: 'chewingbever/vlang:latest' - pull: true - environment: - - LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static - group: 'build' - commands: - - make skip-unused - - readelf -d suvieter - - du -h suvieter - when: - event: push diff --git a/Makefile b/Makefile index 2f39983..fb97ec2 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ dvieter: $(SOURCES) # Run the debug build inside gdb .PHONY: gdb gdb: dvieter - gdb --args './dvieter -f vieter.toml server' + gdb --args ./dvieter -f vieter.toml server # Optimised production build .PHONY: prod @@ -75,7 +75,7 @@ v/v: make -C v clean: - rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst + rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' # =====EXPERIMENTAL===== From 7722d5a7e41cdaed0569acdfd51e21e9acb45734 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Sat, 30 Apr 2022 11:43:06 +0200 Subject: [PATCH 10/21] fix: replace byte with u8 BREAKING: the V compiler removed the byte type alias in favor of u8. --- src/docker/docker.v | 6 +++--- src/repo/sync.v | 2 +- src/util/util.v | 4 ++-- src/web/web.v | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/docker/docker.v b/src/docker/docker.v index a6f7640..07ceb8e 100644 --- a/src/docker/docker.v +++ b/src/docker/docker.v @@ -28,8 +28,8 @@ fn send(req &string) ?http.Response { s.wait_for_write() ? mut c := 0 - mut buf := []byte{len: docker.buf_len} - mut res := []byte{} + mut buf := []u8{len: docker.buf_len} + mut res := []u8{} for { c = s.read(mut buf) or { return error('Failed to read data from socket ${docker.socket}.') } @@ -52,7 +52,7 @@ fn send(req &string) ?http.Response { // We loop until we've encountered the end of the chunked response // A chunked HTTP response always ends with '0\r\n\r\n'. - for res.len < 5 || res#[-5..] != [byte(`0`), `\r`, `\n`, `\r`, `\n`] { + for res.len < 5 || res#[-5..] != [u8(`0`), `\r`, `\n`, `\r`, `\n`] { // Wait for the server to respond s.wait_for_write() ? diff --git a/src/repo/sync.v b/src/repo/sync.v index e2b7aac..12756b7 100644 --- a/src/repo/sync.v +++ b/src/repo/sync.v @@ -19,7 +19,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri } // Write the file to the archive - buf := [8192]byte{} + buf := [8192]u8{} mut len := C.read(fd, &buf, sizeof(buf)) for len > 0 { diff --git a/src/util/util.v b/src/util/util.v index 228f584..c1af30e 100644 --- a/src/util/util.v +++ b/src/util/util.v @@ -30,7 +30,7 @@ pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? { file.close() } - mut buf := []byte{len: util.reader_buf_size} + mut buf := []u8{len: util.reader_buf_size} mut bytes_left := length // Repeat as long as the stream still has data @@ -60,7 +60,7 @@ pub fn hash_file(path &string) ?(string, string) { mut sha256sum := sha256.new() buf_size := int(1_000_000) - mut buf := []byte{len: buf_size} + mut buf := []u8{len: buf_size} mut bytes_left := os.file_size(path) for bytes_left > 0 { diff --git a/src/web/web.v b/src/web/web.v index 688f854..3e7b047 100644 --- a/src/web/web.v +++ b/src/web/web.v @@ -285,7 +285,7 @@ pub fn (mut ctx Context) file(f_path string) Result { resp.set_status(ctx.status) send_string(mut ctx.conn, resp.bytestr()) or { return Result{} } - mut buf := []byte{len: 1_000_000} + mut buf := []u8{len: 1_000_000} mut bytes_left := file_size // Repeat as long as the stream still has data From 4d26797453c432297073e512942c2216c35c5edc Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Thu, 21 Apr 2022 09:07:16 +0200 Subject: [PATCH 11/21] chore(ci): Updated PKGBUILD to use vieter-v package --- .woodpecker/.arch.yml | 2 ++ PKGBUILD | 9 +++------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.woodpecker/.arch.yml b/.woodpecker/.arch.yml index ab3c6ea..e37dc1a 100644 --- a/.woodpecker/.arch.yml +++ b/.woodpecker/.arch.yml @@ -10,6 +10,8 @@ pipeline: build: image: 'menci/archlinuxarm:base-devel' commands: + # Add the vieter repository so we can use the compiler + - echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf # Update packages - pacman -Syu --noconfirm # Create non-root user to perform build & switch to their home diff --git a/PKGBUILD b/PKGBUILD index 0c558b4..3f8c480 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -2,10 +2,10 @@ pkgbase='vieter' pkgname='vieter' -pkgver=0.2.0.r24.g9a56bd0 +pkgver=0.2.0.r25.g20112b8 pkgrel=1 depends=('glibc' 'openssl' 'libarchive' 'gc') -makedepends=('git' 'gcc') +makedepends=('git' 'gcc' 'vieter-v') arch=('x86_64' 'aarch64' 'armv7') url='https://git.rustybever.be/Chewing_Bever/vieter' license=('AGPL3') @@ -20,10 +20,7 @@ pkgver() { build() { cd "$pkgname" - # Build the compiler - CFLAGS= make v - - V_PATH=v/v make prod + make prod } package() { From 6f9e1b5f3cf02a5f60c419da6c71523a790d19bf Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Sat, 30 Apr 2022 11:31:14 +0200 Subject: [PATCH 12/21] feat(cron): start of working loop --- src/cron/daemon/build.v | 36 +++++++++++++++++++------- src/cron/daemon/daemon.v | 43 +++++++++++++++++++++----------- src/cron/expression/expression.v | 2 +- 3 files changed, 57 insertions(+), 24 deletions(-) diff --git a/src/cron/daemon/build.v b/src/cron/daemon/build.v index e7e5ac3..73ba183 100644 --- a/src/cron/daemon/build.v +++ b/src/cron/daemon/build.v @@ -1,9 +1,25 @@ module daemon -import git import time import sync.stdatomic +const build_empty = 0 +const build_running = 1 +const build_done = 2 + +// reschedule_builds looks for any builds with status code 2 & re-adds them to +// the queue. +fn (mut d Daemon) reschedule_builds() ? { + for i in 0..d.atomics.len { + if stdatomic.load_u64(&d.atomics[i]) == build_done { + stdatomic.store_u64(&d.atomics[i], build_empty) + sb := d.builds[i] + + d.schedule_build(sb.repo_id, sb.repo) ? + } + } +} + // update_builds starts as many builds as possible. fn (mut d Daemon) update_builds() ? { now := time.now() @@ -13,7 +29,7 @@ fn (mut d Daemon) update_builds() ? { sb := d.queue.pop() ? // If this build couldn't be scheduled, no more will be possible. - if !d.start_build(sb.repo_id)? { + if !d.start_build(sb)? { break } } else { @@ -22,13 +38,14 @@ fn (mut d Daemon) update_builds() ? { } } -// start_build starts a build for the given repo_id. -fn (mut d Daemon) start_build(repo_id string) ?bool { +// start_build starts a build for the given ScheduledBuild object. +fn (mut d Daemon) start_build(sb ScheduledBuild) ?bool { for i in 0..d.atomics.len { - if stdatomic.load_u64(&d.atomics[i]) == 0 { - stdatomic.store_u64(&d.atomics[i], 1) + if stdatomic.load_u64(&d.atomics[i]) == build_empty { + stdatomic.store_u64(&d.atomics[i], build_running) + d.builds[i] = sb - go d.run_build(i, d.repos_map[repo_id]) + go d.run_build(i, sb) return true } @@ -37,9 +54,10 @@ fn (mut d Daemon) start_build(repo_id string) ?bool { return false } -fn (mut d Daemon) run_build(build_index int, repo git.GitRepo) ? { +// run_build actually starts the build process for a given repo. +fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? { time.sleep(10 * time.second) - stdatomic.store_u64(&d.atomics[build_index], 2) + stdatomic.store_u64(&d.atomics[build_index], build_done) } diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index fc917e4..816bc15 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -30,7 +30,7 @@ mut: api_update_timestamp time.Time queue MinHeap // Which builds are currently running - builds []git.GitRepo + builds []ScheduledBuild // Atomic variables used to detect when a build has finished; length is the // same as builds atomics []u64 @@ -47,7 +47,7 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st global_schedule: global_schedule api_update_frequency: api_update_frequency atomics: []u64{len: max_concurrent_builds} - builds: []git.GitRepo{len: max_concurrent_builds} + builds: []ScheduledBuild{len: max_concurrent_builds} logger: logger } @@ -62,14 +62,37 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st // periodically refreshes the list of repositories to ensure we stay in sync. pub fn (mut d Daemon) run() ? { for { + println('1') + // Cleans up finished builds, opening up spots for new builds + d.reschedule_builds() ? + println('2') + // Schedules new builds when possible d.update_builds() ? + println(d.queue) println(d.atomics) - time.sleep(60 * time.second) + time.sleep(10 * time.second) } } +// schedule_build adds the next occurence of the given repo build to the queue. +fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? { + ce := parse_expression(repo.schedule) or { + d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()") + + d.global_schedule + } + // A repo that can't be scheduled will just be skipped for now + timestamp := ce.next_from_now() ? + + d.queue.insert(ScheduledBuild{ + repo_id: repo_id + repo: repo + timestamp: timestamp + }) +} + fn (mut d Daemon) renew_repos() ? { mut new_repos := git.get_repos(d.address, d.api_key) ? @@ -101,19 +124,11 @@ fn (mut d Daemon) renew_queue() ? { } } + d.queue = new_queue + // For each repository in repos_map, parse their cron expression (or use // the default one if not present) & add them to the queue for id, repo in d.repos_map { - ce := parse_expression(repo.schedule) or { d.global_schedule } - // A repo that can't be scheduled will just be skipped for now - timestamp := ce.next(now) or { continue } - - new_queue.insert(ScheduledBuild{ - repo_id: id - repo: repo - timestamp: timestamp - }) + d.schedule_build(id, repo) ? } - - d.queue = new_queue } diff --git a/src/cron/expression/expression.v b/src/cron/expression/expression.v index c122585..6e11da2 100644 --- a/src/cron/expression/expression.v +++ b/src/cron/expression/expression.v @@ -114,7 +114,7 @@ pub fn (ce &CronExpression) next(ref time.Time) ?time.Time { }) } -fn (ce &CronExpression) next_from_now() ?time.Time { +pub fn (ce &CronExpression) next_from_now() ?time.Time { return ce.next(time.now()) } From 5287067ea7470259417440a128d64135b41d135e Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Thu, 21 Apr 2022 16:49:39 +0200 Subject: [PATCH 13/21] chore(ci): run builds sequentially --- .woodpecker/.build.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.woodpecker/.build.yml b/.woodpecker/.build.yml index c612737..e7341fd 100644 --- a/.woodpecker/.build.yml +++ b/.woodpecker/.build.yml @@ -12,7 +12,6 @@ pipeline: debug: image: 'chewingbever/vlang:latest' pull: true - group: 'build' commands: - make when: @@ -25,7 +24,6 @@ pipeline: pull: true environment: - LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static - group: 'build' commands: - make prod # Make sure the binary is actually statically built From 11ac3c0470d4bb3052a8a308a0a03facbd03d74d Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Fri, 29 Apr 2022 10:34:12 +0200 Subject: [PATCH 14/21] docs: added docs command & notice in README --- .gitignore | 3 +++ Makefile | 8 +++++++- README.md | 4 +++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index a3f6afc..6a06eb2 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,6 @@ v/ # gdb log file gdb.txt + +# Generated docs +_docs/ diff --git a/Makefile b/Makefile index fb97ec2..1793640 100644 --- a/Makefile +++ b/Makefile @@ -74,8 +74,14 @@ v/v: git clone --single-branch https://git.rustybever.be/Chewing_Bever/v v make -C v +.PHONY: clean clean: - rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' + rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' + +.PHONY: docs +docs: + rm -rf '$(SRC_DIR)/_docs' + cd '$(SRC_DIR)' && v doc -all -f html -m -readme . # =====EXPERIMENTAL===== diff --git a/README.md b/README.md index 96b104d..08f1e75 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,9 @@ ## Documentation -I host documentation for Vieter over at https://rustybever.be/docs/vieter/. +I host documentation for Vieter over at https://rustybever.be/docs/vieter/. API +documentation for the current codebase can be found at +https://rustybever.be/api-docs/vieter/. ## Overview From a1c308f29ddb2bd92cc9f0e4b8195452be3d7043 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Sat, 30 Apr 2022 10:40:29 +0200 Subject: [PATCH 15/21] feature(daemon): added api renewal & calculated sleep time --- src/cron/cli.v | 5 +++-- src/cron/daemon/build.v | 23 +++++++++++++---------- src/cron/daemon/daemon.v | 38 +++++++++++++++++++++++++++++++++----- 3 files changed, 49 insertions(+), 17 deletions(-) diff --git a/src/cron/cli.v b/src/cron/cli.v index f4b20ec..3b836dd 100644 --- a/src/cron/cli.v +++ b/src/cron/cli.v @@ -11,8 +11,9 @@ pub: address string base_image string = 'archlinux:base-devel' max_concurrent_builds int = 1 - api_update_frequency int = 60 - global_schedule string + api_update_frequency int = 15 + // Replicates the behavior of the original cron system + global_schedule string = '0 3' } // cmd returns the cli module that handles the cron daemon. diff --git a/src/cron/daemon/build.v b/src/cron/daemon/build.v index 73ba183..c5ef428 100644 --- a/src/cron/daemon/build.v +++ b/src/cron/daemon/build.v @@ -4,15 +4,17 @@ import time import sync.stdatomic const build_empty = 0 + const build_running = 1 + const build_done = 2 // reschedule_builds looks for any builds with status code 2 & re-adds them to // the queue. fn (mut d Daemon) reschedule_builds() ? { - for i in 0..d.atomics.len { - if stdatomic.load_u64(&d.atomics[i]) == build_done { - stdatomic.store_u64(&d.atomics[i], build_empty) + for i in 0 .. d.atomics.len { + if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done { + stdatomic.store_u64(&d.atomics[i], daemon.build_empty) sb := d.builds[i] d.schedule_build(sb.repo_id, sb.repo) ? @@ -29,7 +31,8 @@ fn (mut d Daemon) update_builds() ? { sb := d.queue.pop() ? // If this build couldn't be scheduled, no more will be possible. - if !d.start_build(sb)? { + // TODO a build that couldn't be scheduled should be re-added to the queue. + if !d.start_build(sb) { break } } else { @@ -39,10 +42,10 @@ fn (mut d Daemon) update_builds() ? { } // start_build starts a build for the given ScheduledBuild object. -fn (mut d Daemon) start_build(sb ScheduledBuild) ?bool { - for i in 0..d.atomics.len { - if stdatomic.load_u64(&d.atomics[i]) == build_empty { - stdatomic.store_u64(&d.atomics[i], build_running) +fn (mut d Daemon) start_build(sb ScheduledBuild) bool { + for i in 0 .. d.atomics.len { + if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty { + stdatomic.store_u64(&d.atomics[i], daemon.build_running) d.builds[i] = sb go d.run_build(i, sb) @@ -56,8 +59,8 @@ fn (mut d Daemon) start_build(sb ScheduledBuild) ?bool { // run_build actually starts the build process for a given repo. fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? { + d.linfo('build $sb.repo.url') time.sleep(10 * time.second) - stdatomic.store_u64(&d.atomics[build_index], build_done) + stdatomic.store_u64(&d.atomics[build_index], daemon.build_done) } - diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 816bc15..7253e94 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -5,6 +5,8 @@ import time import log import datatypes { MinHeap } import cron.expression { CronExpression, parse_expression } +import math +import arrays struct ScheduledBuild { pub: @@ -62,23 +64,47 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st // periodically refreshes the list of repositories to ensure we stay in sync. pub fn (mut d Daemon) run() ? { for { - println('1') + // Update the API's contents if needed & renew the queue + if time.now() >= d.api_update_timestamp { + d.renew_repos() ? + d.renew_queue() ? + } + // Cleans up finished builds, opening up spots for new builds d.reschedule_builds() ? - println('2') + + // TODO rebuild builder image when needed + // Schedules new builds when possible d.update_builds() ? - println(d.queue) - println(d.atomics) + // Sleep either until we have to refresh the repos or when the next + // build has to start, with a minimum of 1 second. + now := time.now() - time.sleep(10 * time.second) + mut delay := d.api_update_timestamp - now + + if d.queue.len() > 0 { + time_until_next_job := d.queue.peek() ?.timestamp - now + + delay = math.min(delay, time_until_next_job) + } + + d.ldebug('Sleeping for ${delay}...') + + // TODO if there are builds active, the sleep time should be much lower to clean up the builds when they're finished. + + // We sleep for at least one second. This is to prevent the program + // from looping agressively when a cronjob can be scheduled, but + // there's no spots free for it to be started. + time.sleep(math.max(delay, 1 * time.second)) } } // schedule_build adds the next occurence of the given repo build to the queue. fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? { ce := parse_expression(repo.schedule) or { + // TODO This shouldn't return an error if the expression is empty. d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()") d.global_schedule @@ -94,6 +120,7 @@ fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? { } fn (mut d Daemon) renew_repos() ? { + d.ldebug('Renewing repos...') mut new_repos := git.get_repos(d.address, d.api_key) ? d.repos_map = new_repos.move() @@ -104,6 +131,7 @@ fn (mut d Daemon) renew_repos() ? { // renew_queue replaces the old queue with a new one that reflects the newest // values in repos_map. fn (mut d Daemon) renew_queue() ? { + d.ldebug('Renewing queue...') mut new_queue := MinHeap{} // Move any jobs that should have already started from the old queue onto From caee56efd4632f86287c47343e5357e8d1a6fdcb Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Sat, 30 Apr 2022 16:08:35 +0200 Subject: [PATCH 16/21] feat(cron): improve sleep calculation; prevent invalid rescheduling of finished builds --- src/cron/daemon/build.v | 34 +++++++++++++++++++------- src/cron/daemon/daemon.v | 42 ++++++++++++++++++++------------ src/cron/expression/expression.v | 2 ++ vieter.toml | 3 ++- 4 files changed, 55 insertions(+), 26 deletions(-) diff --git a/src/cron/daemon/build.v b/src/cron/daemon/build.v index c5ef428..ea3e6ca 100644 --- a/src/cron/daemon/build.v +++ b/src/cron/daemon/build.v @@ -2,6 +2,7 @@ module daemon import time import sync.stdatomic +import rand const build_empty = 0 @@ -9,21 +10,23 @@ const build_running = 1 const build_done = 2 -// reschedule_builds looks for any builds with status code 2 & re-adds them to -// the queue. -fn (mut d Daemon) reschedule_builds() ? { +// clean_finished_builds removes finished builds from the build slots & returns +// them. +fn (mut d Daemon) clean_finished_builds() ?[]ScheduledBuild { + mut out := []ScheduledBuild{} + for i in 0 .. d.atomics.len { if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done { stdatomic.store_u64(&d.atomics[i], daemon.build_empty) - sb := d.builds[i] - - d.schedule_build(sb.repo_id, sb.repo) ? + out << d.builds[i] } } + + return out } // update_builds starts as many builds as possible. -fn (mut d Daemon) update_builds() ? { +fn (mut d Daemon) start_new_builds() ? { now := time.now() for d.queue.len() > 0 { @@ -31,8 +34,8 @@ fn (mut d Daemon) update_builds() ? { sb := d.queue.pop() ? // If this build couldn't be scheduled, no more will be possible. - // TODO a build that couldn't be scheduled should be re-added to the queue. if !d.start_build(sb) { + d.queue.insert(sb) break } } else { @@ -60,7 +63,20 @@ fn (mut d Daemon) start_build(sb ScheduledBuild) bool { // run_build actually starts the build process for a given repo. fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? { d.linfo('build $sb.repo.url') - time.sleep(10 * time.second) + time.sleep(rand.int_in_range(1, 6) ? * time.second) stdatomic.store_u64(&d.atomics[build_index], daemon.build_done) } + +// current_build_count returns how many builds are currently running. +fn (mut d Daemon) current_build_count() int { + mut res := 0 + + for i in 0 .. d.atomics.len { + if stdatomic.load_u64(&d.atomics[i]) == daemon.build_running { + res += 1 + } + } + + return res +} diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 7253e94..25d3887 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -6,7 +6,6 @@ import log import datatypes { MinHeap } import cron.expression { CronExpression, parse_expression } import math -import arrays struct ScheduledBuild { pub: @@ -64,40 +63,51 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st // periodically refreshes the list of repositories to ensure we stay in sync. pub fn (mut d Daemon) run() ? { for { + finished_builds := d.clean_finished_builds() ? + // Update the API's contents if needed & renew the queue if time.now() >= d.api_update_timestamp { d.renew_repos() ? d.renew_queue() ? } - - // Cleans up finished builds, opening up spots for new builds - d.reschedule_builds() ? + // The finished builds should only be rescheduled if the API contents + // haven't been renewed. + else { + for sb in finished_builds { + d.schedule_build(sb.repo_id, sb.repo) ? + } + } // TODO rebuild builder image when needed // Schedules new builds when possible - d.update_builds() ? + d.start_new_builds() ? + + // If there are builds currently running, the daemon should refresh + // every second to clean up any finished builds & start new ones. + mut delay := time.Duration(1 * time.second) // Sleep either until we have to refresh the repos or when the next // build has to start, with a minimum of 1 second. - now := time.now() + if d.current_build_count() == 0 { + now := time.now() + delay = d.api_update_timestamp - now - mut delay := d.api_update_timestamp - now + if d.queue.len() > 0 { + time_until_next_job := d.queue.peek() ?.timestamp - now - if d.queue.len() > 0 { - time_until_next_job := d.queue.peek() ?.timestamp - now - - delay = math.min(delay, time_until_next_job) + delay = math.min(delay, time_until_next_job) + } } - d.ldebug('Sleeping for ${delay}...') - - // TODO if there are builds active, the sleep time should be much lower to clean up the builds when they're finished. - // We sleep for at least one second. This is to prevent the program // from looping agressively when a cronjob can be scheduled, but // there's no spots free for it to be started. - time.sleep(math.max(delay, 1 * time.second)) + delay = math.max(delay, 1 * time.second) + + d.ldebug('Sleeping for ${delay}...') + + time.sleep(delay) } } diff --git a/src/cron/expression/expression.v b/src/cron/expression/expression.v index 6e11da2..652870d 100644 --- a/src/cron/expression/expression.v +++ b/src/cron/expression/expression.v @@ -114,6 +114,8 @@ pub fn (ce &CronExpression) next(ref time.Time) ?time.Time { }) } +// next_from_now returns the result of ce.next(ref) where ref is the result of +// time.now(). pub fn (ce &CronExpression) next_from_now() ?time.Time { return ce.next(time.now()) } diff --git a/vieter.toml b/vieter.toml index 452500f..c5ddf9f 100644 --- a/vieter.toml +++ b/vieter.toml @@ -10,4 +10,5 @@ default_arch = "x86_64" address = "http://localhost:8000" global_schedule = '* *' - +api_update_frequency = 2 +max_concurrent_builds = 3 From 98c0e52b088bf2a2b88478f68f3120a23c52d451 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Sat, 30 Apr 2022 16:41:12 +0200 Subject: [PATCH 17/21] chore(ci): added missdoc -p check; merged lint commands --- .woodpecker/.lint.yml | 2 -- Makefile | 7 +++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.woodpecker/.lint.yml b/.woodpecker/.lint.yml index ce000cd..b1c16fd 100644 --- a/.woodpecker/.lint.yml +++ b/.woodpecker/.lint.yml @@ -7,7 +7,5 @@ pipeline: lint: image: 'chewingbever/vlang:latest' pull: true - group: lint commands: - make lint - - make vet diff --git a/Makefile b/Makefile index 1793640..c4d496a 100644 --- a/Makefile +++ b/Makefile @@ -53,16 +53,15 @@ run-prod: prod .PHONY: lint lint: $(V) fmt -verify $(SRC_DIR) + $(V) vet -W $(SRC_DIR) + $(V_PATH) missdoc -p $(SRC_DIR) + @ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ] # Format the V codebase .PHONY: fmt fmt: $(V) fmt -w $(SRC_DIR) -.PHONY: vet -vet: - $(V) vet -W $(SRC_DIR) - .PHONY: test test: $(V) test $(SRC_DIR) From 369b4458c5751015e0140538379281f22db6c3c6 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Sat, 30 Apr 2022 17:56:35 +0200 Subject: [PATCH 18/21] feat(cron): added automatic rebuilding of image; implemented builds --- src/build/build.v | 95 +++++++++++++++++++++------------------- src/cron/cli.v | 15 ++++--- src/cron/cron.v | 2 +- src/cron/daemon/build.v | 6 ++- src/cron/daemon/daemon.v | 39 +++++++++++++---- 5 files changed, 94 insertions(+), 63 deletions(-) diff --git a/src/build/build.v b/src/build/build.v index 942ce8a..5f54564 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -10,7 +10,7 @@ const container_build_dir = '/build' const build_image_repo = 'vieter-build' -fn create_build_image(base_image string) ?string { +pub fn create_build_image(base_image string) ?string { commands := [ // Update repos & install required packages 'pacman -Syu --needed --noconfirm base-devel git' @@ -53,12 +53,13 @@ fn create_build_image(base_image string) ?string { break } - // Wait for 5 seconds - time.sleep(5000000000) + time.sleep(1 * time.second) } // Finally, we create the image from the container // As the tag, we use the epoch value + // TODO also add the base image's name into the image name to prevent + // conflicts. tag := time.sys_mono_now().str() image := docker.create_image_from_container(id, 'vieter-build', tag) ? docker.remove_container(id) ? @@ -66,6 +67,52 @@ fn create_build_image(base_image string) ?string { return image.id } +pub fn build_repo(address string, api_key string, base_image_id string, repo &git.GitRepo) ? { + build_arch := os.uname().machine + + // TODO what to do with PKGBUILDs that build multiple packages? + commands := [ + 'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo', + 'cd repo', + 'makepkg --nobuild --nodeps', + 'source PKGBUILD', + // The build container checks whether the package is already + // present on the server + 'curl --head --fail $address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0', + 'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $address/$repo.repo/publish; done', + ] + + // We convert the list of commands into a base64 string, which then gets + // passed to the container as an env var + cmds_str := base64.encode_str(commands.join('\n')) + + c := docker.NewContainer{ + image: '$base_image_id' + env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$api_key'] + entrypoint: ['/bin/sh', '-c'] + cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e'] + work_dir: '/build' + user: 'builder:builder' + } + + id := docker.create_container(c) ? + docker.start_container(id) ? + + // This loop waits until the container has stopped, so we can remove it after + for { + data := docker.inspect_container(id) ? + + if !data.state.running { + break + } + + // Wait for 5 seconds + time.sleep(1 * time.second) + } + + docker.remove_container(id) ? +} + fn build(conf Config) ? { build_arch := os.uname().machine @@ -85,47 +132,7 @@ fn build(conf Config) ? { image_id := create_build_image(conf.base_image) ? for repo in filtered_repos { - // TODO what to do with PKGBUILDs that build multiple packages? - commands := [ - 'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo', - 'cd repo', - 'makepkg --nobuild --nodeps', - 'source PKGBUILD', - // The build container checks whether the package is already - // present on the server - 'curl --head --fail $conf.address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0', - 'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $conf.address/$repo.repo/publish; done', - ] - - // We convert the list of commands into a base64 string, which then gets - // passed to the container as an env var - cmds_str := base64.encode_str(commands.join('\n')) - - c := docker.NewContainer{ - image: '$image_id' - env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$conf.api_key'] - entrypoint: ['/bin/sh', '-c'] - cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e'] - work_dir: '/build' - user: 'builder:builder' - } - - id := docker.create_container(c) ? - docker.start_container(id) ? - - // This loop waits until the container has stopped, so we can remove it after - for { - data := docker.inspect_container(id) ? - - if !data.state.running { - break - } - - // Wait for 5 seconds - time.sleep(5000000000) - } - - docker.remove_container(id) ? + build_repo(conf.address, conf.api_key, image_id, repo) ? } // Finally, we remove the builder image diff --git a/src/cron/cli.v b/src/cron/cli.v index 3b836dd..24cbe2c 100644 --- a/src/cron/cli.v +++ b/src/cron/cli.v @@ -5,13 +5,14 @@ import env struct Config { pub: - log_level string = 'WARN' - log_file string = 'vieter.log' - api_key string - address string - base_image string = 'archlinux:base-devel' - max_concurrent_builds int = 1 - api_update_frequency int = 15 + log_level string = 'WARN' + log_file string = 'vieter.log' + api_key string + address string + base_image string = 'archlinux:base-devel' + max_concurrent_builds int = 1 + api_update_frequency int = 15 + image_rebuild_frequency int = 1440 // Replicates the behavior of the original cron system global_schedule string = '0 3' } diff --git a/src/cron/cron.v b/src/cron/cron.v index cb5bcd7..49a379e 100644 --- a/src/cron/cron.v +++ b/src/cron/cron.v @@ -23,7 +23,7 @@ pub fn cron(conf Config) ? { } mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce, - conf.max_concurrent_builds, conf.api_update_frequency) ? + conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency) ? d.run() ? } diff --git a/src/cron/daemon/build.v b/src/cron/daemon/build.v index ea3e6ca..afe5044 100644 --- a/src/cron/daemon/build.v +++ b/src/cron/daemon/build.v @@ -3,6 +3,7 @@ module daemon import time import sync.stdatomic import rand +import build const build_empty = 0 @@ -62,8 +63,9 @@ fn (mut d Daemon) start_build(sb ScheduledBuild) bool { // run_build actually starts the build process for a given repo. fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? { - d.linfo('build $sb.repo.url') - time.sleep(rand.int_in_range(1, 6) ? * time.second) + d.linfo('started build: ${sb.repo.url} ${sb.repo.branch}') + + build.build_repo(d.address, d.api_key, d.builder_image, &sb.repo) ? stdatomic.store_u64(&d.atomics[build_index], daemon.build_done) } diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 25d3887..4eccfa0 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -6,6 +6,7 @@ import log import datatypes { MinHeap } import cron.expression { CronExpression, parse_expression } import math +import build struct ScheduledBuild { pub: @@ -20,16 +21,19 @@ fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool { pub struct Daemon { mut: - address string - api_key string - base_image string - global_schedule CronExpression - api_update_frequency int + address string + api_key string + base_image string + builder_image string + global_schedule CronExpression + api_update_frequency int + image_rebuild_frequency int // Repos currently loaded from API. repos_map map[string]git.GitRepo // At what point to update the list of repositories. - api_update_timestamp time.Time - queue MinHeap + api_update_timestamp time.Time + image_build_timestamp time.Time + queue MinHeap // Which builds are currently running builds []ScheduledBuild // Atomic variables used to detect when a build has finished; length is the @@ -40,13 +44,14 @@ mut: // init_daemon initializes a new Daemon object. It renews the repositories & // populates the build queue for the first time. -pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int) ?Daemon { +pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon { mut d := Daemon{ address: address api_key: api_key base_image: base_image global_schedule: global_schedule api_update_frequency: api_update_frequency + image_rebuild_frequency: image_rebuild_frequency atomics: []u64{len: max_concurrent_builds} builds: []ScheduledBuild{len: max_concurrent_builds} logger: logger @@ -55,6 +60,7 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st // Initialize the repos & queue d.renew_repos() ? d.renew_queue() ? + d.rebuild_base_image() ? return d } @@ -78,7 +84,15 @@ pub fn (mut d Daemon) run() ? { } } - // TODO rebuild builder image when needed + // TODO remove old builder images. + // This issue is less trivial than it sounds, because a build could + // still be running when the image has to be rebuilt. That would + // prevent the image from being removed. Therefore, we will need to + // keep track of a list or something & remove an image once we have + // made sure it isn't being used anymore. + if time.now() >= d.image_build_timestamp { + d.rebuild_base_image() ? + } // Schedules new builds when possible d.start_new_builds() ? @@ -170,3 +184,10 @@ fn (mut d Daemon) renew_queue() ? { d.schedule_build(id, repo) ? } } + +fn (mut d Daemon) rebuild_base_image() ? { + d.linfo("Rebuilding builder image....") + + d.builder_image = build.create_build_image(d.base_image) ? + d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency) +} From fb65efdfbe04fd521a7a7d480f5e14b8b101051f Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Sat, 30 Apr 2022 18:38:24 +0200 Subject: [PATCH 19/21] feat(cron): added removal of old builder images --- src/cron/daemon/build.v | 5 ++--- src/cron/daemon/daemon.v | 29 ++++++++++++++++++++++++----- vieter.toml | 2 ++ 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/src/cron/daemon/build.v b/src/cron/daemon/build.v index afe5044..ec8be4d 100644 --- a/src/cron/daemon/build.v +++ b/src/cron/daemon/build.v @@ -2,7 +2,6 @@ module daemon import time import sync.stdatomic -import rand import build const build_empty = 0 @@ -63,9 +62,9 @@ fn (mut d Daemon) start_build(sb ScheduledBuild) bool { // run_build actually starts the build process for a given repo. fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? { - d.linfo('started build: ${sb.repo.url} ${sb.repo.branch}') + d.linfo('started build: $sb.repo.url $sb.repo.branch') - build.build_repo(d.address, d.api_key, d.builder_image, &sb.repo) ? + build.build_repo(d.address, d.api_key, d.builder_images.last(), &sb.repo) ? stdatomic.store_u64(&d.atomics[build_index], daemon.build_done) } diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 4eccfa0..09ccc3e 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -7,6 +7,7 @@ import datatypes { MinHeap } import cron.expression { CronExpression, parse_expression } import math import build +import docker struct ScheduledBuild { pub: @@ -24,7 +25,7 @@ mut: address string api_key string base_image string - builder_image string + builder_images []string global_schedule CronExpression api_update_frequency int image_rebuild_frequency int @@ -92,6 +93,9 @@ pub fn (mut d Daemon) run() ? { // made sure it isn't being used anymore. if time.now() >= d.image_build_timestamp { d.rebuild_base_image() ? + // In theory, executing this function here allows an old builder + // image to exist for at most image_rebuild_frequency minutes. + d.clean_old_base_images() } // Schedules new builds when possible @@ -144,7 +148,7 @@ fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? { } fn (mut d Daemon) renew_repos() ? { - d.ldebug('Renewing repos...') + d.linfo('Renewing repos...') mut new_repos := git.get_repos(d.address, d.api_key) ? d.repos_map = new_repos.move() @@ -155,7 +159,7 @@ fn (mut d Daemon) renew_repos() ? { // renew_queue replaces the old queue with a new one that reflects the newest // values in repos_map. fn (mut d Daemon) renew_queue() ? { - d.ldebug('Renewing queue...') + d.linfo('Renewing queue...') mut new_queue := MinHeap{} // Move any jobs that should have already started from the old queue onto @@ -186,8 +190,23 @@ fn (mut d Daemon) renew_queue() ? { } fn (mut d Daemon) rebuild_base_image() ? { - d.linfo("Rebuilding builder image....") + d.linfo('Rebuilding builder image....') - d.builder_image = build.create_build_image(d.base_image) ? + d.builder_images << build.create_build_image(d.base_image) ? d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency) } + +fn (mut d Daemon) clean_old_base_images() { + mut i := 0 + + for i < d.builder_images.len - 1 { + // For each builder image, we try to remove it by calling the Docker + // API. If the function returns an error or false, that means the image + // wasn't deleted. Therefore, we move the index over. If the function + // returns true, the array's length has decreased by one so we don't + // move the index. + if !docker.remove_image(d.builder_images[i]) or { false } { + i += 1 + } + } +} diff --git a/vieter.toml b/vieter.toml index c5ddf9f..fc86d77 100644 --- a/vieter.toml +++ b/vieter.toml @@ -11,4 +11,6 @@ address = "http://localhost:8000" global_schedule = '* *' api_update_frequency = 2 +image_rebuild_frequency = 1 max_concurrent_builds = 3 + From f9f440500efd10aad30f13bd78f3aac2cc15f276 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Sat, 30 Apr 2022 20:22:03 +0200 Subject: [PATCH 20/21] docs: added comment string to each function --- src/build/build.v | 9 +++++++++ src/cron/daemon/daemon.v | 6 ++++++ src/cron/expression/expression.v | 5 +++++ src/docker/docker.v | 4 ++++ src/git/cli.v | 6 ++++++ src/git/client.v | 3 +++ src/package/package.v | 1 + src/repo/sync.v | 4 +++- src/server/auth.v | 1 + src/server/git.v | 5 +++++ src/server/routes.v | 4 ++++ src/web/parse.v | 2 ++ 12 files changed, 49 insertions(+), 1 deletion(-) diff --git a/src/build/build.v b/src/build/build.v index 5f54564..2365fef 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -10,6 +10,11 @@ const container_build_dir = '/build' const build_image_repo = 'vieter-build' +// create_build_image creates a builder image given some base image which can +// then be used to build & package Arch images. It mostly just updates the +// system, install some necessary packages & creates a non-root user to run +// makepkg with. The base image should be some Linux distribution that uses +// Pacman as its package manager. pub fn create_build_image(base_image string) ?string { commands := [ // Update repos & install required packages @@ -67,6 +72,9 @@ pub fn create_build_image(base_image string) ?string { return image.id } +// build_repo builds, packages & publishes a given Arch package based on the +// provided GitRepo. The base image ID should be of an image previously created +// by create_build_image. pub fn build_repo(address string, api_key string, base_image_id string, repo &git.GitRepo) ? { build_arch := os.uname().machine @@ -113,6 +121,7 @@ pub fn build_repo(address string, api_key string, base_image_id string, repo &gi docker.remove_container(id) ? } +// build builds every Git repo in the server's list. fn build(conf Config) ? { build_arch := os.uname().machine diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 09ccc3e..4b22522 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -16,6 +16,7 @@ pub: timestamp time.Time } +// Overloaded operator for comparing ScheduledBuild objects fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool { return r1.timestamp < r2.timestamp } @@ -147,6 +148,8 @@ fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? { }) } +// renew_repos requests the newest list of Git repos from the server & replaces +// the old one. fn (mut d Daemon) renew_repos() ? { d.linfo('Renewing repos...') mut new_repos := git.get_repos(d.address, d.api_key) ? @@ -189,6 +192,7 @@ fn (mut d Daemon) renew_queue() ? { } } +// rebuild_base_image recreates the builder image. fn (mut d Daemon) rebuild_base_image() ? { d.linfo('Rebuilding builder image....') @@ -196,6 +200,8 @@ fn (mut d Daemon) rebuild_base_image() ? { d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency) } +// clean_old_base_images tries to remove any old but still present builder +// images. fn (mut d Daemon) clean_old_base_images() { mut i := 0 diff --git a/src/cron/expression/expression.v b/src/cron/expression/expression.v index 652870d..124337f 100644 --- a/src/cron/expression/expression.v +++ b/src/cron/expression/expression.v @@ -65,6 +65,7 @@ pub fn (ce &CronExpression) next(ref time.Time) ?time.Time { if minute_index == ce.minutes.len && hour_index < ce.hours.len { hour_index += 1 } + if hour_index == ce.hours.len && day_index < ce.days.len { day_index += 1 } @@ -197,6 +198,8 @@ fn parse_range(s string, min int, max int, mut bitv []bool) ? { } } +// bitv_to_ints converts a bit vector into an array containing the +// corresponding values. fn bitv_to_ints(bitv []bool, min int) []int { mut out := []int{} @@ -209,6 +212,8 @@ fn bitv_to_ints(bitv []bool, min int) []int { return out } +// parse_part parses a given part of a cron expression & returns the +// corresponding array of ints. fn parse_part(s string, min int, max int) ?[]int { mut bitv := []bool{len: max - min + 1, init: false} diff --git a/src/docker/docker.v b/src/docker/docker.v index 07ceb8e..5deef83 100644 --- a/src/docker/docker.v +++ b/src/docker/docker.v @@ -9,6 +9,8 @@ const socket = '/var/run/docker.sock' const buf_len = 1024 +// send writes a request to the Docker socket, waits for a response & returns +// it. fn send(req &string) ?http.Response { // Open a connection to the socket mut s := unix.connect_stream(docker.socket) or { @@ -72,12 +74,14 @@ fn send(req &string) ?http.Response { return http.parse_response(res.bytestr()) } +// request_with_body sends a request to the Docker socket with the given body. fn request_with_body(method string, url urllib.URL, content_type string, body string) ?http.Response { req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n' return send(req) } +// request sends a request to the Docker socket with an empty body. fn request(method string, url urllib.URL) ?http.Response { req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n' diff --git a/src/git/cli.v b/src/git/cli.v index 463f1ba..53527d5 100644 --- a/src/git/cli.v +++ b/src/git/cli.v @@ -96,6 +96,8 @@ pub fn cmd() cli.Command { } } +// get_repo_id_by_prefix tries to find the repo with the given prefix in its +// ID. If multiple or none are found, an error is raised. fn get_repo_id_by_prefix(conf Config, id_prefix string) ?string { repos := get_repos(conf.address, conf.api_key) ? @@ -118,6 +120,7 @@ fn get_repo_id_by_prefix(conf Config, id_prefix string) ?string { return res[0] } +// list prints out a list of all repositories. fn list(conf Config) ? { repos := get_repos(conf.address, conf.api_key) ? @@ -126,12 +129,14 @@ fn list(conf Config) ? { } } +// add adds a new repository to the server's list. fn add(conf Config, url string, branch string, repo string, arch []string) ? { res := add_repo(conf.address, conf.api_key, url, branch, repo, arch) ? println(res.message) } +// remove removes a repository from the server's list. fn remove(conf Config, id_prefix string) ? { id := get_repo_id_by_prefix(conf, id_prefix) ? res := remove_repo(conf.address, conf.api_key, id) ? @@ -139,6 +144,7 @@ fn remove(conf Config, id_prefix string) ? { println(res.message) } +// patch patches a given repository with the provided params. fn patch(conf Config, id_prefix string, params map[string]string) ? { id := get_repo_id_by_prefix(conf, id_prefix) ? res := patch_repo(conf.address, conf.api_key, id, params) ? diff --git a/src/git/client.v b/src/git/client.v index e4a39ac..a43c9ca 100644 --- a/src/git/client.v +++ b/src/git/client.v @@ -4,6 +4,9 @@ import json import response { Response } import net.http +// send_request is a convenience method for sending requests to the repos +// API. It mostly does string manipulation to create a query string containing +// the provided params. fn send_request(method http.Method, address string, url string, api_key string, params map[string]string) ?Response { mut full_url := '$address$url' diff --git a/src/package/package.v b/src/package/package.v index a6be636..a1042b5 100644 --- a/src/package/package.v +++ b/src/package/package.v @@ -175,6 +175,7 @@ pub fn read_pkg_archive(pkg_path string) ?Pkg { } } +// format_entry returns a string properly formatted to be added to a desc file. fn format_entry(key string, value string) string { return '\n%$key%\n$value\n' } diff --git a/src/repo/sync.v b/src/repo/sync.v index 12756b7..9c5e7ed 100644 --- a/src/repo/sync.v +++ b/src/repo/sync.v @@ -2,6 +2,8 @@ module repo import os +// archive_add_entry writes a file to an archive, given its path & inner path +// inside the archive. fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &string, inner_path &string) { st := C.stat{} @@ -29,7 +31,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri } } -// Re-generate the repo archive files +// sync regenerates the repository archive files. fn (r &RepoGroupManager) sync(repo string, arch string) ? { subrepo_path := os.join_path(r.repos_dir, repo, arch) diff --git a/src/server/auth.v b/src/server/auth.v index 8bc9d55..7c8a676 100644 --- a/src/server/auth.v +++ b/src/server/auth.v @@ -2,6 +2,7 @@ module server import net.http +// is_authorized checks whether the provided API key is correct. fn (mut app App) is_authorized() bool { x_header := app.req.header.get_custom('X-Api-Key', http.HeaderQueryConfig{ exact: true }) or { return false diff --git a/src/server/git.v b/src/server/git.v index a9d6f50..0cba17c 100644 --- a/src/server/git.v +++ b/src/server/git.v @@ -8,6 +8,7 @@ import response { new_data_response, new_response } const repos_file = 'repos.json' +// get_repos returns the current list of repos. ['/api/repos'; get] fn (mut app App) get_repos() web.Result { if !app.is_authorized() { @@ -25,6 +26,7 @@ fn (mut app App) get_repos() web.Result { return app.json(http.Status.ok, new_data_response(repos)) } +// get_single_repo returns the information for a single repo. ['/api/repos/:id'; get] fn (mut app App) get_single_repo(id string) web.Result { if !app.is_authorized() { @@ -48,6 +50,7 @@ fn (mut app App) get_single_repo(id string) web.Result { return app.json(http.Status.ok, new_data_response(repo)) } +// post_repo creates a new repo from the provided query string. ['/api/repos'; post] fn (mut app App) post_repo() web.Result { if !app.is_authorized() { @@ -86,6 +89,7 @@ fn (mut app App) post_repo() web.Result { return app.json(http.Status.ok, new_response('Repo added successfully.')) } +// delete_repo removes a given repo from the server's list. ['/api/repos/:id'; delete] fn (mut app App) delete_repo(id string) web.Result { if !app.is_authorized() { @@ -113,6 +117,7 @@ fn (mut app App) delete_repo(id string) web.Result { return app.json(http.Status.ok, new_response('Repo removed successfully.')) } +// patch_repo updates a repo's data with the given query params. ['/api/repos/:id'; patch] fn (mut app App) patch_repo(id string) web.Result { if !app.is_authorized() { diff --git a/src/server/routes.v b/src/server/routes.v index 4f6c4f0..f27afb4 100644 --- a/src/server/routes.v +++ b/src/server/routes.v @@ -16,6 +16,9 @@ pub fn (mut app App) healthcheck() web.Result { return app.json(http.Status.ok, new_response('Healthy.')) } +// get_repo_file handles all Pacman-related routes. It returns both the +// repository's archives, but also package archives or the contents of a +// package's desc file. ['/:repo/:arch/:filename'; get; head] fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Result { mut full_path := '' @@ -54,6 +57,7 @@ fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Re return app.file(full_path) } +// put_package handles publishing a package to a repository. ['/:repo/publish'; post] fn (mut app App) put_package(repo string) web.Result { if !app.is_authorized() { diff --git a/src/web/parse.v b/src/web/parse.v index 2eeef5e..a095f0c 100644 --- a/src/web/parse.v +++ b/src/web/parse.v @@ -47,6 +47,7 @@ fn parse_attrs(name string, attrs []string) ?([]http.Method, string) { return methods, path.to_lower() } +// Extracts query parameters from a URL. fn parse_query_from_url(url urllib.URL) map[string]string { mut query := map[string]string{} for v in url.query().data { @@ -55,6 +56,7 @@ fn parse_query_from_url(url urllib.URL) map[string]string { return query } +// Extract form data from an HTTP request. fn parse_form_from_request(request http.Request) ?(map[string]string, map[string][]http.FileData) { mut form := map[string]string{} mut files := map[string][]http.FileData{} From cfacf9ed0f5bd14e90163b6fe78bca16d25e4f7d Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Sat, 30 Apr 2022 20:48:49 +0200 Subject: [PATCH 21/21] fix(cron): don't show error for empty cron schedule --- Makefile | 4 ++-- src/build/build.v | 1 - src/cron/daemon/daemon.v | 11 ++++++++--- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index c4d496a..041bafc 100644 --- a/Makefile +++ b/Makefile @@ -77,8 +77,8 @@ v/v: clean: rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' -.PHONY: docs -docs: +.PHONY: api-docs +api-docs: rm -rf '$(SRC_DIR)/_docs' cd '$(SRC_DIR)' && v doc -all -f html -m -readme . diff --git a/src/build/build.v b/src/build/build.v index 2365fef..bc604fa 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -114,7 +114,6 @@ pub fn build_repo(address string, api_key string, base_image_id string, repo &gi break } - // Wait for 5 seconds time.sleep(1 * time.second) } diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 4b22522..729e94b 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -132,12 +132,17 @@ pub fn (mut d Daemon) run() ? { // schedule_build adds the next occurence of the given repo build to the queue. fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? { - ce := parse_expression(repo.schedule) or { - // TODO This shouldn't return an error if the expression is empty. - d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()") + ce := if repo.schedule != '' { + parse_expression(repo.schedule) or { + // TODO This shouldn't return an error if the expression is empty. + d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()") + d.global_schedule + } + } else { d.global_schedule } + // A repo that can't be scheduled will just be skipped for now timestamp := ce.next_from_now() ?