From ed29102717d016cfc337001bb054df6ef531ed07 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 1 Nov 2022 20:54:06 +0100 Subject: [PATCH 01/51] chore(ci): update to V 0.3.2 image --- .woodpecker/arch-rel.yml | 2 +- .woodpecker/arch.yml | 2 +- .woodpecker/build.yml | 2 +- .woodpecker/docs.yml | 2 +- .woodpecker/gitea.yml | 2 +- .woodpecker/lint.yml | 2 +- .woodpecker/man.yml | 2 +- .woodpecker/test.yml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.woodpecker/arch-rel.yml b/.woodpecker/arch-rel.yml index f5f228e..f727486 100644 --- a/.woodpecker/arch-rel.yml +++ b/.woodpecker/arch-rel.yml @@ -9,7 +9,7 @@ skip_clone: true pipeline: build: - image: 'menci/archlinuxarm:base-devel' + image: 'git.rustybever.be/vieter-v/vieter-builder' commands: # Add the vieter repository so we can use the compiler - echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf diff --git a/.woodpecker/arch.yml b/.woodpecker/arch.yml index 8f1a6ff..f5f8432 100644 --- a/.woodpecker/arch.yml +++ b/.woodpecker/arch.yml @@ -9,7 +9,7 @@ skip_clone: true pipeline: build: - image: 'menci/archlinuxarm:base-devel' + image: 'git.rustybever.be/vieter-v/vieter-builder' commands: # Add the vieter repository so we can use the compiler - echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf diff --git a/.woodpecker/build.yml b/.woodpecker/build.yml index 9ee8085..f10e2a5 100644 --- a/.woodpecker/build.yml +++ b/.woodpecker/build.yml @@ -1,5 +1,5 @@ variables: - - &vlang_image 'chewingbever/vlang:0.3' + - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' matrix: PLATFORM: diff --git a/.woodpecker/docs.yml b/.woodpecker/docs.yml index 048b1ad..cf4874e 100644 --- a/.woodpecker/docs.yml +++ b/.woodpecker/docs.yml @@ -1,5 +1,5 @@ variables: - - &vlang_image 'chewingbever/vlang:0.3' + - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' platform: 'linux/amd64' branches: diff --git a/.woodpecker/gitea.yml b/.woodpecker/gitea.yml index 8e3b9d4..9034f33 100644 --- a/.woodpecker/gitea.yml +++ b/.woodpecker/gitea.yml @@ -1,5 +1,5 @@ variables: - - &vlang_image 'chewingbever/vlang:0.3' + - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' platform: 'linux/amd64' branches: [ 'main' ] diff --git a/.woodpecker/lint.yml b/.woodpecker/lint.yml index c80ce33..f87c06f 100644 --- a/.woodpecker/lint.yml +++ b/.woodpecker/lint.yml @@ -1,5 +1,5 @@ variables: - - &vlang_image 'chewingbever/vlang:0.3' + - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' # These checks already get performed on the feature branches branches: diff --git a/.woodpecker/man.yml b/.woodpecker/man.yml index 86a1bd8..8c6ca06 100644 --- a/.woodpecker/man.yml +++ b/.woodpecker/man.yml @@ -1,5 +1,5 @@ variables: - - &vlang_image 'chewingbever/vlang:0.3' + - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' platform: 'linux/amd64' branches: diff --git a/.woodpecker/test.yml b/.woodpecker/test.yml index 08b7534..39cb9f9 100644 --- a/.woodpecker/test.yml +++ b/.woodpecker/test.yml @@ -1,5 +1,5 @@ variables: - - &vlang_image 'chewingbever/vlang:0.3' + - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' matrix: PLATFORM: From 22fd6e395b3bfa28fe04da80d9325e2b8387e0c4 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 1 Nov 2022 21:10:45 +0100 Subject: [PATCH 02/51] refactor: compile on V 0.3.2 --- Makefile | 2 +- src/build/build.v | 36 +++++++------- src/client/client.v | 22 +++++---- src/client/logs.v | 20 ++++---- src/client/targets.v | 24 ++++----- src/console/aur/aur.v | 14 +++--- src/console/console.v | 10 ++-- src/console/logs/logs.v | 72 +++++++++++++-------------- src/console/man/man.v | 6 +-- src/console/schedule/schedule.v | 8 +-- src/console/targets/build.v | 14 +++--- src/console/targets/targets.v | 84 ++++++++++++++++---------------- src/cron/cli.v | 8 +-- src/cron/cron.v | 4 +- src/cron/daemon/daemon.v | 4 +- src/cron/daemon/log.v | 12 ++--- src/cron/expression/expression.v | 18 +++---- src/package/package.v | 12 ++--- src/server/cli.v | 8 +-- src/server/server.v | 2 +- src/util/stream.v | 12 ++--- src/util/util.v | 4 +- src/web/logging.v | 12 ++--- 23 files changed, 205 insertions(+), 203 deletions(-) diff --git a/Makefile b/Makefile index 69bd795..895d3fd 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ SRC_DIR := src SOURCES != find '$(SRC_DIR)' -iname '*.v' V_PATH ?= v -V := $(V_PATH) -showcc -gc boehm +V := $(V_PATH) -showcc -gc boehm -W all: vieter diff --git a/src/build/build.v b/src/build/build.v index 2ad70a6..734427d 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -21,8 +21,8 @@ const ( // system, install some necessary packages & creates a non-root user to run // makepkg with. The base image should be some Linux distribution that uses // Pacman as its package manager. -pub fn create_build_image(base_image string) ?string { - mut dd := docker.new_conn()? +pub fn create_build_image(base_image string) !string { + mut dd := docker.new_conn()! defer { dd.close() or {} @@ -57,15 +57,15 @@ pub fn create_build_image(base_image string) ?string { image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' } // We pull the provided image - dd.pull_image(image_name, image_tag)? + dd.pull_image(image_name, image_tag)! - id := dd.container_create(c)?.id - // id := docker.create_container(c)? - dd.container_start(id)? + id := dd.container_create(c)!.id + // id := docker.create_container(c)! + dd.container_start(id)! // This loop waits until the container has stopped, so we can remove it after for { - data := dd.container_inspect(id)? + data := dd.container_inspect(id)! if !data.state.running { break @@ -79,8 +79,8 @@ pub fn create_build_image(base_image string) ?string { // TODO also add the base image's name into the image name to prevent // conflicts. tag := time.sys_mono_now().str() - image := dd.create_image_from_container(id, 'vieter-build', tag)? - dd.container_remove(id)? + image := dd.create_image_from_container(id, 'vieter-build', tag)! + dd.container_remove(id)! return image.id } @@ -96,8 +96,8 @@ pub: // build_target builds, packages & publishes a given Arch package based on the // provided target. The base image ID should be of an image previously created // by create_build_image. It returns the logs of the container. -pub fn build_target(address string, api_key string, base_image_id string, target &Target) ?BuildResult { - mut dd := docker.new_conn()? +pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult { + mut dd := docker.new_conn()! defer { dd.close() or {} @@ -125,25 +125,25 @@ pub fn build_target(address string, api_key string, base_image_id string, target user: '0:0' } - id := dd.container_create(c)?.id - dd.container_start(id)? + id := dd.container_create(c)!.id + dd.container_start(id)! - mut data := dd.container_inspect(id)? + mut data := dd.container_inspect(id)! // This loop waits until the container has stopped, so we can remove it after for data.state.running { time.sleep(1 * time.second) - data = dd.container_inspect(id)? + data = dd.container_inspect(id)! } - mut logs_stream := dd.container_get_logs(id)? + mut logs_stream := dd.container_get_logs(id)! // Read in the entire stream mut logs_builder := strings.new_builder(10 * 1024) - util.reader_to_writer(mut logs_stream, mut logs_builder)? + util.reader_to_writer(mut logs_stream, mut logs_builder)! - dd.container_remove(id)? + dd.container_remove(id)! return BuildResult{ start_time: data.state.start_time diff --git a/src/client/client.v b/src/client/client.v index d68ff18..aa6094a 100644 --- a/src/client/client.v +++ b/src/client/client.v @@ -21,7 +21,7 @@ pub fn new(address string, api_key string) Client { // send_request_raw sends an HTTP request, returning the http.Response object. // It encodes the params so that they're safe to pass as HTTP query parameters. -fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) ?http.Response { +fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) !http.Response { mut full_url := '$c.address$url' if params.len > 0 { @@ -38,31 +38,33 @@ fn (c &Client) send_request_raw(method Method, url string, params map[string]str full_url = '$full_url?$params_str' } - mut req := http.new_request(method, full_url, body)? - req.add_custom_header('X-Api-Key', c.api_key)? + // Looking at the source code, this function doesn't actually fail, so I'm + // not sure why it returns an optional + mut req := http.new_request(method, full_url, body) or { return error('') } + req.add_custom_header('X-Api-Key', c.api_key)! - res := req.do()? + res := req.do()! return res } // send_request just calls send_request_with_body with an empty body. -fn (c &Client) send_request(method Method, url string, params map[string]string) ?Response { +fn (c &Client) send_request(method Method, url string, params map[string]string) !Response { return c.send_request_with_body(method, url, params, '') } // send_request_with_body calls send_request_raw_response & parses its // output as a Response object. -fn (c &Client) send_request_with_body(method Method, url string, params map[string]string, body string) ?Response { - res_text := c.send_request_raw_response(method, url, params, body)? - data := json.decode(Response, res_text)? +fn (c &Client) send_request_with_body(method Method, url string, params map[string]string, body string) !Response { + res_text := c.send_request_raw_response(method, url, params, body)! + data := json.decode(Response, res_text)! return data } // send_request_raw_response returns the raw text response for an HTTP request. -fn (c &Client) send_request_raw_response(method Method, url string, params map[string]string, body string) ?string { - res := c.send_request_raw(method, url, params, body)? +fn (c &Client) send_request_raw_response(method Method, url string, params map[string]string, body string) !string { + res := c.send_request_raw(method, url, params, body)! return res.body } diff --git a/src/client/logs.v b/src/client/logs.v index b414245..eaddc8c 100644 --- a/src/client/logs.v +++ b/src/client/logs.v @@ -6,40 +6,40 @@ import web.response { Response } import time // get_build_logs returns all build logs. -pub fn (c &Client) get_build_logs(filter BuildLogFilter) ?Response<[]BuildLog> { +pub fn (c &Client) get_build_logs(filter BuildLogFilter) !Response<[]BuildLog> { params := models.params_from(filter) - data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)? + data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)! return data } // get_build_logs_for_target returns all build logs for a given target. -pub fn (c &Client) get_build_logs_for_target(target_id int) ?Response<[]BuildLog> { +pub fn (c &Client) get_build_logs_for_target(target_id int) !Response<[]BuildLog> { params := { 'repo': target_id.str() } - data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)? + data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)! return data } // get_build_log returns a specific build log. -pub fn (c &Client) get_build_log(id int) ?Response { - data := c.send_request(Method.get, '/api/v1/logs/$id', {})? +pub fn (c &Client) get_build_log(id int) !Response { + data := c.send_request(Method.get, '/api/v1/logs/$id', {})! return data } // get_build_log_content returns the contents of the build log file. -pub fn (c &Client) get_build_log_content(id int) ?string { - data := c.send_request_raw_response(Method.get, '/api/v1/logs/$id/content', {}, '')? +pub fn (c &Client) get_build_log_content(id int) !string { + data := c.send_request_raw_response(Method.get, '/api/v1/logs/$id/content', {}, '')! return data } // add_build_log adds a new build log to the server. -pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) ?Response { +pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) !Response { params := { 'target': target_id.str() 'startTime': start_time.unix_time().str() @@ -48,7 +48,7 @@ pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time t 'exitCode': exit_code.str() } - data := c.send_request_with_body(Method.post, '/api/v1/logs', params, content)? + data := c.send_request_with_body(Method.post, '/api/v1/logs', params, content)! return data } diff --git a/src/client/targets.v b/src/client/targets.v index c5e44fe..fd4254c 100644 --- a/src/client/targets.v +++ b/src/client/targets.v @@ -5,21 +5,21 @@ import net.http { Method } import web.response { Response } // get_targets returns a list of targets, given a filter object. -pub fn (c &Client) get_targets(filter TargetFilter) ?[]Target { +pub fn (c &Client) get_targets(filter TargetFilter) ![]Target { params := models.params_from(filter) - data := c.send_request<[]Target>(Method.get, '/api/v1/targets', params)? + data := c.send_request<[]Target>(Method.get, '/api/v1/targets', params)! return data.data } // get_all_targets retrieves *all* targs from the API using the default // limit. -pub fn (c &Client) get_all_targets() ?[]Target { +pub fn (c &Client) get_all_targets() ![]Target { mut targets := []Target{} mut offset := u64(0) for { - sub_targets := c.get_targets(offset: offset)? + sub_targets := c.get_targets(offset: offset)! if sub_targets.len == 0 { break @@ -34,8 +34,8 @@ pub fn (c &Client) get_all_targets() ?[]Target { } // get_target returns the target for a specific id. -pub fn (c &Client) get_target(id int) ?Target { - data := c.send_request(Method.get, '/api/v1/targets/$id', {})? +pub fn (c &Client) get_target(id int) !Target { + data := c.send_request(Method.get, '/api/v1/targets/$id', {})! return data.data } @@ -49,24 +49,24 @@ pub struct NewTarget { } // add_target adds a new target to the server. -pub fn (c &Client) add_target(t NewTarget) ?Response { +pub fn (c &Client) add_target(t NewTarget) !Response { params := models.params_from(t) - data := c.send_request(Method.post, '/api/v1/targets', params)? + data := c.send_request(Method.post, '/api/v1/targets', params)! return data } // remove_target removes the target with the given id from the server. -pub fn (c &Client) remove_target(id int) ?Response { - data := c.send_request(Method.delete, '/api/v1/targets/$id', {})? +pub fn (c &Client) remove_target(id int) !Response { + data := c.send_request(Method.delete, '/api/v1/targets/$id', {})! return data } // patch_target sends a PATCH request to the given target with the params as // payload. -pub fn (c &Client) patch_target(id int, params map[string]string) ?Response { - data := c.send_request(Method.patch, '/api/v1/targets/$id', params)? +pub fn (c &Client) patch_target(id int, params map[string]string) !Response { + data := c.send_request(Method.patch, '/api/v1/targets/$id', params)! return data } diff --git a/src/console/aur/aur.v b/src/console/aur/aur.v index c98f8e6..6a061dd 100644 --- a/src/console/aur/aur.v +++ b/src/console/aur/aur.v @@ -21,12 +21,12 @@ pub fn cmd() cli.Command { name: 'search' description: 'Search for packages.' required_args: 1 - execute: fn (cmd cli.Command) ? { + execute: fn (cmd cli.Command) ! { c := aur.new() - pkgs := c.search(cmd.args[0])? + pkgs := c.search(cmd.args[0])! data := pkgs.map([it.name, it.description]) - println(console.pretty_table(['name', 'description'], data)?) + println(console.pretty_table(['name', 'description'], data)!) } }, cli.Command{ @@ -34,12 +34,12 @@ pub fn cmd() cli.Command { usage: 'repo pkg-name [pkg-name...]' description: 'Add the given AUR package(s) to Vieter. Non-existent packages will be silently ignored.' required_args: 2 - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! c := aur.new() - pkgs := c.info(cmd.args[1..])? + pkgs := c.info(cmd.args[1..])! vc := client.new(conf.address, conf.api_key) diff --git a/src/console/console.v b/src/console/console.v index caf4cca..5c40de8 100644 --- a/src/console/console.v +++ b/src/console/console.v @@ -13,7 +13,7 @@ pub fn tabbed_table(data [][]string) string { // pretty_table converts a list of string data into a pretty table. Many thanks // to @hungrybluedev in the Vlang Discord for providing this code! // https://ptb.discord.com/channels/592103645835821068/592106336838352923/970278787143045192 -pub fn pretty_table(header []string, data [][]string) ?string { +pub fn pretty_table(header []string, data [][]string) !string { column_count := header.len mut column_widths := []int{len: column_count, init: header[it].len} @@ -26,7 +26,7 @@ pub fn pretty_table(header []string, data [][]string) ?string { } } - single_line_length := arrays.sum(column_widths)? + (column_count + 1) * 3 - 4 + single_line_length := arrays.sum(column_widths)! + (column_count + 1) * 3 - 4 horizontal_line := '+' + strings.repeat(`-`, single_line_length) + '+' mut buffer := strings.new_builder(data.len * single_line_length) @@ -64,12 +64,12 @@ pub fn pretty_table(header []string, data [][]string) ?string { // export_man_pages recursively generates all man pages for the given // cli.Command & writes them to the given directory. -pub fn export_man_pages(cmd cli.Command, path string) ? { +pub fn export_man_pages(cmd cli.Command, path string) ! { man := cmd.manpage() os.write_file(os.join_path_single(path, cmd.full_name().replace(' ', '-') + '.1'), - man)? + man)! for sub_cmd in cmd.commands { - export_man_pages(sub_cmd, path)? + export_man_pages(sub_cmd, path)! } } diff --git a/src/console/logs/logs.v b/src/console/logs/logs.v index 41830c2..6d5ffad 100644 --- a/src/console/logs/logs.v +++ b/src/console/logs/logs.v @@ -63,30 +63,30 @@ pub fn cmd() cli.Command { flag: cli.FlagType.string }, ] - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! mut filter := BuildLogFilter{} - limit := cmd.flags.get_int('limit')? + limit := cmd.flags.get_int('limit')! if limit != 0 { filter.limit = u64(limit) } - offset := cmd.flags.get_int('offset')? + offset := cmd.flags.get_int('offset')! if offset != 0 { filter.offset = u64(offset) } - target_id := cmd.flags.get_int('target')? + target_id := cmd.flags.get_int('target')! if target_id != 0 { filter.target = target_id } tz_offset := time.offset() - if cmd.flags.get_bool('today')? { + if cmd.flags.get_bool('today')! { today := time.now() filter.after = time.new_time(time.Time{ @@ -98,12 +98,12 @@ pub fn cmd() cli.Command { } // The -today flag overwrites any of the other date flags. else { - day_str := cmd.flags.get_string('day')? - before_str := cmd.flags.get_string('before')? - after_str := cmd.flags.get_string('after')? + day_str := cmd.flags.get_string('day')! + before_str := cmd.flags.get_string('before')! + after_str := cmd.flags.get_string('after')! if day_str != '' { - day := time.parse_rfc3339(day_str)? + day := time.parse_rfc3339(day_str)! day_utc := time.new_time(time.Time{ year: day.year month: day.month @@ -118,24 +118,24 @@ pub fn cmd() cli.Command { filter.before = day_utc.add_days(1) } else { if before_str != '' { - filter.before = time.parse(before_str)?.add_seconds(-tz_offset) + filter.before = time.parse(before_str)!.add_seconds(-tz_offset) } if after_str != '' { - filter.after = time.parse(after_str)?.add_seconds(-tz_offset) + filter.after = time.parse(after_str)!.add_seconds(-tz_offset) } } } - if cmd.flags.get_bool('failed')? { + if cmd.flags.get_bool('failed')! { filter.exit_codes = [ '!0', ] } - raw := cmd.flags.get_bool('raw')? + raw := cmd.flags.get_bool('raw')! - list(conf, filter, raw)? + list(conf, filter, raw)! } }, cli.Command{ @@ -143,12 +143,12 @@ pub fn cmd() cli.Command { required_args: 1 usage: 'id' description: 'Show all info for a specific build log.' - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! id := cmd.args[0].int() - info(conf, id)? + info(conf, id)! } }, cli.Command{ @@ -156,12 +156,12 @@ pub fn cmd() cli.Command { required_args: 1 usage: 'id' description: 'Output the content of a build log to stdout.' - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! id := cmd.args[0].int() - content(conf, id)? + content(conf, id)! } }, ] @@ -169,46 +169,46 @@ pub fn cmd() cli.Command { } // print_log_list prints a list of logs. -fn print_log_list(logs []BuildLog, raw bool) ? { +fn print_log_list(logs []BuildLog, raw bool) ! { data := logs.map([it.id.str(), it.target_id.str(), it.start_time.local().str(), it.exit_code.str()]) if raw { println(console.tabbed_table(data)) } else { - println(console.pretty_table(['id', 'target', 'start time', 'exit code'], data)?) + println(console.pretty_table(['id', 'target', 'start time', 'exit code'], data)!) } } // list prints a list of all build logs. -fn list(conf Config, filter BuildLogFilter, raw bool) ? { +fn list(conf Config, filter BuildLogFilter, raw bool) ! { c := client.new(conf.address, conf.api_key) - logs := c.get_build_logs(filter)?.data + logs := c.get_build_logs(filter)!.data - print_log_list(logs, raw)? + print_log_list(logs, raw)! } // list prints a list of all build logs for a given target. -fn list_for_target(conf Config, target_id int, raw bool) ? { +fn list_for_target(conf Config, target_id int, raw bool) ! { c := client.new(conf.address, conf.api_key) - logs := c.get_build_logs_for_target(target_id)?.data + logs := c.get_build_logs_for_target(target_id)!.data - print_log_list(logs, raw)? + print_log_list(logs, raw)! } // info print the detailed info for a given build log. -fn info(conf Config, id int) ? { +fn info(conf Config, id int) ! { c := client.new(conf.address, conf.api_key) - log := c.get_build_log(id)?.data + log := c.get_build_log(id)!.data print(log) } // content outputs the contents of the log file for a given build log to // stdout. -fn content(conf Config, id int) ? { +fn content(conf Config, id int) ! { c := client.new(conf.address, conf.api_key) - content := c.get_build_log_content(id)? + content := c.get_build_log_content(id)! println(content) } diff --git a/src/console/man/man.v b/src/console/man/man.v index d91a140..22cb5f7 100644 --- a/src/console/man/man.v +++ b/src/console/man/man.v @@ -11,11 +11,11 @@ pub fn cmd() cli.Command { description: 'Generate all man pages & save them in the given directory.' usage: 'dir' required_args: 1 - execute: fn (cmd cli.Command) ? { + execute: fn (cmd cli.Command) ! { root := cmd.root() - os.mkdir_all(cmd.args[0])? + os.mkdir_all(cmd.args[0])! - console.export_man_pages(root, cmd.args[0])? + console.export_man_pages(root, cmd.args[0])! } } } diff --git a/src/console/schedule/schedule.v b/src/console/schedule/schedule.v index 8fceddd..7ce0516 100644 --- a/src/console/schedule/schedule.v +++ b/src/console/schedule/schedule.v @@ -18,11 +18,11 @@ pub fn cmd() cli.Command { default_value: ['5'] }, ] - execute: fn (cmd cli.Command) ? { - ce := parse_expression(cmd.args.join(' '))? - count := cmd.flags.get_int('count')? + execute: fn (cmd cli.Command) ! { + ce := parse_expression(cmd.args.join(' '))! + count := cmd.flags.get_int('count')! - for t in ce.next_n(time.now(), count)? { + for t in ce.next_n(time.now(), count)! { println(t) } } diff --git a/src/console/targets/build.v b/src/console/targets/build.v index 6337aa3..83ebde2 100644 --- a/src/console/targets/build.v +++ b/src/console/targets/build.v @@ -6,29 +6,29 @@ import os import build // build locally builds the target with the given id. -fn build(conf Config, target_id int) ? { +fn build(conf Config, target_id int) ! { c := client.new(conf.address, conf.api_key) - target := c.get_target(target_id)? + target := c.get_target(target_id)! build_arch := os.uname().machine println('Creating base image...') - image_id := build.create_build_image(conf.base_image)? + image_id := build.create_build_image(conf.base_image)! println('Running build...') - res := build.build_target(conf.address, conf.api_key, image_id, target)? + res := build.build_target(conf.address, conf.api_key, image_id, target)! println('Removing build image...') - mut dd := docker.new_conn()? + mut dd := docker.new_conn()! defer { dd.close() or {} } - dd.remove_image(image_id)? + dd.remove_image(image_id)! println('Uploading logs to Vieter...') c.add_build_log(target.id, res.start_time, res.end_time, build_arch, res.exit_code, - res.logs)? + res.logs)! } diff --git a/src/console/targets/targets.v b/src/console/targets/targets.v index 5640011..774a129 100644 --- a/src/console/targets/targets.v +++ b/src/console/targets/targets.v @@ -39,30 +39,30 @@ pub fn cmd() cli.Command { flag: cli.FlagType.string }, ] - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! mut filter := TargetFilter{} - limit := cmd.flags.get_int('limit')? + limit := cmd.flags.get_int('limit')! if limit != 0 { filter.limit = u64(limit) } - offset := cmd.flags.get_int('offset')? + offset := cmd.flags.get_int('offset')! if offset != 0 { filter.offset = u64(offset) } - repo := cmd.flags.get_string('repo')? + repo := cmd.flags.get_string('repo')! if repo != '' { filter.repo = repo } - raw := cmd.flags.get_bool('raw')? + raw := cmd.flags.get_bool('raw')! - list(conf, filter, raw)? + list(conf, filter, raw)! } }, cli.Command{ @@ -83,20 +83,20 @@ pub fn cmd() cli.Command { flag: cli.FlagType.string }, ] - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! t := NewTarget{ - kind: cmd.flags.get_string('kind')? + kind: cmd.flags.get_string('kind')! url: cmd.args[0] repo: cmd.args[1] branch: cmd.flags.get_string('branch') or { '' } } - raw := cmd.flags.get_bool('raw')? + raw := cmd.flags.get_bool('raw')! - add(conf, t, raw)? + add(conf, t, raw)! } }, cli.Command{ @@ -104,11 +104,11 @@ pub fn cmd() cli.Command { required_args: 1 usage: 'id' description: 'Remove a target that matches the given id.' - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! - remove(conf, cmd.args[0])? + remove(conf, cmd.args[0])! } }, cli.Command{ @@ -116,11 +116,11 @@ pub fn cmd() cli.Command { required_args: 1 usage: 'id' description: 'Show detailed information for the target matching the id.' - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! - info(conf, cmd.args[0])? + info(conf, cmd.args[0])! } }, cli.Command{ @@ -160,9 +160,9 @@ pub fn cmd() cli.Command { flag: cli.FlagType.string }, ] - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! found := cmd.flags.get_all_found() @@ -170,11 +170,11 @@ pub fn cmd() cli.Command { for f in found { if f.name != 'config-file' { - params[f.name] = f.get_string()? + params[f.name] = f.get_string()! } } - patch(conf, cmd.args[0], params)? + patch(conf, cmd.args[0], params)! } }, cli.Command{ @@ -182,11 +182,11 @@ pub fn cmd() cli.Command { required_args: 1 usage: 'id' description: 'Build the target with the given id & publish it.' - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! - build(conf, cmd.args[0].int())? + build(conf, cmd.args[0].int())! } }, ] @@ -197,22 +197,22 @@ pub fn cmd() cli.Command { // ID. If multiple or none are found, an error is raised. // list prints out a list of all repositories. -fn list(conf Config, filter TargetFilter, raw bool) ? { +fn list(conf Config, filter TargetFilter, raw bool) ! { c := client.new(conf.address, conf.api_key) - repos := c.get_targets(filter)? + repos := c.get_targets(filter)! data := repos.map([it.id.str(), it.kind, it.url, it.repo]) if raw { println(console.tabbed_table(data)) } else { - println(console.pretty_table(['id', 'kind', 'url', 'repo'], data)?) + println(console.pretty_table(['id', 'kind', 'url', 'repo'], data)!) } } // add adds a new repository to the server's list. -fn add(conf Config, t &NewTarget, raw bool) ? { +fn add(conf Config, t &NewTarget, raw bool) ! { c := client.new(conf.address, conf.api_key) - res := c.add_target(t)? + res := c.add_target(t)! if raw { println(res.data) @@ -222,18 +222,18 @@ fn add(conf Config, t &NewTarget, raw bool) ? { } // remove removes a repository from the server's list. -fn remove(conf Config, id string) ? { +fn remove(conf Config, id string) ! { id_int := id.int() if id_int != 0 { c := client.new(conf.address, conf.api_key) - res := c.remove_target(id_int)? + res := c.remove_target(id_int)! println(res.message) } } // patch patches a given repository with the provided params. -fn patch(conf Config, id string, params map[string]string) ? { +fn patch(conf Config, id string, params map[string]string) ! { // We check the cron expression first because it's useless to send an // invalid one to the server. if 'schedule' in params && params['schedule'] != '' { @@ -245,14 +245,14 @@ fn patch(conf Config, id string, params map[string]string) ? { id_int := id.int() if id_int != 0 { c := client.new(conf.address, conf.api_key) - res := c.patch_target(id_int, params)? + res := c.patch_target(id_int, params)! println(res.message) } } // info shows detailed information for a given repo. -fn info(conf Config, id string) ? { +fn info(conf Config, id string) ! { id_int := id.int() if id_int == 0 { @@ -260,6 +260,6 @@ fn info(conf Config, id string) ? { } c := client.new(conf.address, conf.api_key) - repo := c.get_target(id_int)? + repo := c.get_target(id_int)! println(repo) } diff --git a/src/cron/cli.v b/src/cron/cli.v index 4d95833..0d7a042 100644 --- a/src/cron/cli.v +++ b/src/cron/cli.v @@ -22,11 +22,11 @@ pub fn cmd() cli.Command { return cli.Command{ name: 'cron' description: 'Start the cron service that periodically runs builds.' - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! - cron(conf)? + cron(conf)! } } } diff --git a/src/cron/cron.v b/src/cron/cron.v index 5f128cf..f1d6b7b 100644 --- a/src/cron/cron.v +++ b/src/cron/cron.v @@ -8,7 +8,7 @@ import os const log_file_name = 'vieter.cron.log' // cron starts a cron daemon & starts periodically scheduling builds. -pub fn cron(conf Config) ? { +pub fn cron(conf Config) ! { // Configure logger log_level := log.level_from_tag(conf.log_level) or { return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') @@ -27,7 +27,7 @@ pub fn cron(conf Config) ? { } mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce, - conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)? + conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)! d.run() } diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 934d35a..8c6516c 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -53,7 +53,7 @@ mut: // init_daemon initializes a new Daemon object. It renews the targets & // populates the build queue for the first time. -pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon { +pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) !Daemon { mut d := Daemon{ client: client.new(address, api_key) base_image: base_image @@ -207,7 +207,7 @@ fn (mut d Daemon) renew_queue() { // For some reason, using // ```v - // for d.queue.len() > 0 && d.queue.peek() ?.timestamp < now { + // for d.queue.len() > 0 && d.queue.peek() !.timestamp < now { //``` // here causes the function to prematurely just exit, without any errors or anything, very weird // https://github.com/vlang/v/issues/14042 diff --git a/src/cron/daemon/log.v b/src/cron/daemon/log.v index 003898b..95a50e7 100644 --- a/src/cron/daemon/log.v +++ b/src/cron/daemon/log.v @@ -3,33 +3,33 @@ module daemon import log // log reate a log message with the given level -pub fn (mut d Daemon) log(msg &string, level log.Level) { +pub fn (mut d Daemon) log(msg string, level log.Level) { lock d.logger { d.logger.send_output(msg, level) } } // lfatal create a log message with the fatal level -pub fn (mut d Daemon) lfatal(msg &string) { +pub fn (mut d Daemon) lfatal(msg string) { d.log(msg, log.Level.fatal) } // lerror create a log message with the error level -pub fn (mut d Daemon) lerror(msg &string) { +pub fn (mut d Daemon) lerror(msg string) { d.log(msg, log.Level.error) } // lwarn create a log message with the warn level -pub fn (mut d Daemon) lwarn(msg &string) { +pub fn (mut d Daemon) lwarn(msg string) { d.log(msg, log.Level.warn) } // linfo create a log message with the info level -pub fn (mut d Daemon) linfo(msg &string) { +pub fn (mut d Daemon) linfo(msg string) { d.log(msg, log.Level.info) } // ldebug create a log message with the debug level -pub fn (mut d Daemon) ldebug(msg &string) { +pub fn (mut d Daemon) ldebug(msg string) { d.log(msg, log.Level.debug) } diff --git a/src/cron/expression/expression.v b/src/cron/expression/expression.v index 17d2dde..438805d 100644 --- a/src/cron/expression/expression.v +++ b/src/cron/expression/expression.v @@ -12,7 +12,7 @@ pub struct CronExpression { // next calculates the earliest time this cron expression is valid. It will // always pick a moment in the future, even if ref matches completely up to the // minute. This function conciously does not take gap years into account. -pub fn (ce &CronExpression) next(ref time.Time) ?time.Time { +pub fn (ce &CronExpression) next(ref time.Time) !time.Time { // If the given ref matches the next cron occurence up to the minute, it // will return that value. Because we always want to return a value in the // future, we artifically shift the ref 60 seconds to make sure we always @@ -117,19 +117,19 @@ pub fn (ce &CronExpression) next(ref time.Time) ?time.Time { // next_from_now returns the result of ce.next(ref) where ref is the result of // time.now(). -pub fn (ce &CronExpression) next_from_now() ?time.Time { +pub fn (ce &CronExpression) next_from_now() !time.Time { return ce.next(time.now()) } // next_n returns the n next occurences of the expression, given a starting // time. -pub fn (ce &CronExpression) next_n(ref time.Time, n int) ?[]time.Time { +pub fn (ce &CronExpression) next_n(ref time.Time, n int) ![]time.Time { mut times := []time.Time{cap: n} - times << ce.next(ref)? + times << ce.next(ref)! for i in 1 .. n { - times << ce.next(times[i - 1])? + times << ce.next(times[i - 1])! } return times @@ -137,7 +137,7 @@ pub fn (ce &CronExpression) next_n(ref time.Time, n int) ?[]time.Time { // parse_range parses a given string into a range of sorted integers, if // possible. -fn parse_range(s string, min int, max int, mut bitv []bool) ? { +fn parse_range(s string, min int, max int, mut bitv []bool) ! { mut start := min mut end := max mut interval := 1 @@ -228,11 +228,11 @@ fn bitv_to_ints(bitv []bool, min int) []int { // parse_part parses a given part of a cron expression & returns the // corresponding array of ints. -fn parse_part(s string, min int, max int) ?[]int { +fn parse_part(s string, min int, max int) ![]int { mut bitv := []bool{len: max - min + 1, init: false} for range in s.split(',') { - parse_range(range, min, max, mut bitv)? + parse_range(range, min, max, mut bitv)! } return bitv_to_ints(bitv, min) @@ -240,7 +240,7 @@ fn parse_part(s string, min int, max int) ?[]int { // parse_expression parses an entire cron expression string into a // CronExpression object, if possible. -pub fn parse_expression(exp string) ?CronExpression { +pub fn parse_expression(exp string) !CronExpression { // The filter allows for multiple spaces between parts mut parts := exp.split(' ').filter(it != '') diff --git a/src/package/package.v b/src/package/package.v index 9eaf5a2..aadf6f2 100644 --- a/src/package/package.v +++ b/src/package/package.v @@ -43,12 +43,12 @@ pub mut: } // checksum calculates the sha256 hash of the package -pub fn (p &Pkg) checksum() ?string { +pub fn (p &Pkg) checksum() !string { return util.hash_file(p.path) } // parse_pkg_info_string parses a PkgInfo object from a string -fn parse_pkg_info_string(pkg_info_str &string) ?PkgInfo { +fn parse_pkg_info_string(pkg_info_str &string) !PkgInfo { mut pkg_info := PkgInfo{} // Iterate over the entire string @@ -101,7 +101,7 @@ fn parse_pkg_info_string(pkg_info_str &string) ?PkgInfo { // read_pkg_archive extracts the file list & .PKGINFO contents from an archive // NOTE: this command only supports zstd-, xz- & gzip-compressed tarballs. -pub fn read_pkg_archive(pkg_path string) ?Pkg { +pub fn read_pkg_archive(pkg_path string) !Pkg { if !os.is_file(pkg_path) { return error("'$pkg_path' doesn't exist or isn't a file.") } @@ -159,7 +159,7 @@ pub fn read_pkg_archive(pkg_path string) ?Pkg { pkg_text := unsafe { buf.vstring_with_len(size).clone() } - pkg_info = parse_pkg_info_string(pkg_text)? + pkg_info = parse_pkg_info_string(pkg_text)! } else { C.archive_read_data_skip(a) } @@ -201,7 +201,7 @@ pub fn (pkg &Pkg) filename() string { } // to_desc returns a desc file valid string representation -pub fn (pkg &Pkg) to_desc() ?string { +pub fn (pkg &Pkg) to_desc() !string { p := pkg.info // filename @@ -222,7 +222,7 @@ pub fn (pkg &Pkg) to_desc() ?string { desc += format_entry('CSIZE', p.csize.str()) desc += format_entry('ISIZE', p.size.str()) - sha256sum := pkg.checksum()? + sha256sum := pkg.checksum()! desc += format_entry('SHA256SUM', sha256sum) diff --git a/src/server/cli.v b/src/server/cli.v index 6fd09c5..26ee0f1 100644 --- a/src/server/cli.v +++ b/src/server/cli.v @@ -18,11 +18,11 @@ pub fn cmd() cli.Command { return cli.Command{ name: 'server' description: 'Start the Vieter server.' - execute: fn (cmd cli.Command) ? { - config_file := cmd.flags.get_string('config-file')? - conf := vconf.load(prefix: 'VIETER_', default_path: config_file)? + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! - server(conf)? + server(conf)! } } } diff --git a/src/server/server.v b/src/server/server.v index 9903cea..d5f6135 100644 --- a/src/server/server.v +++ b/src/server/server.v @@ -24,7 +24,7 @@ pub mut: } // server starts the web server & starts listening for requests -pub fn server(conf Config) ? { +pub fn server(conf Config) ! { // Prevent using 'any' as the default arch if conf.default_arch == 'any' { util.exit_with_message(1, "'any' is not allowed as the value for default_arch.") diff --git a/src/util/stream.v b/src/util/stream.v index 06397aa..15cc618 100644 --- a/src/util/stream.v +++ b/src/util/stream.v @@ -5,7 +5,7 @@ import io import os // reader_to_writer tries to consume the entire reader & write it to the writer. -pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ? { +pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ! { mut buf := []u8{len: 10 * 1024} for { @@ -21,8 +21,8 @@ pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ? { } // reader_to_file writes the contents of a BufferedReader to a file -pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? { - mut file := os.create(path)? +pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ! { + mut file := os.create(path)! defer { file.close() } @@ -69,11 +69,11 @@ pub fn match_array_in_array(a1 []T, a2 []T) int { // read_until_separator consumes an io.Reader until it encounters some // separator array. The data read is stored inside the provided res array. -pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ? { +pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ! { mut buf := []u8{len: sep.len} for { - c := reader.read(mut buf)? + c := reader.read(mut buf)! res << buf[..c] match_len := match_array_in_array(buf[..c], sep) @@ -84,7 +84,7 @@ pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ? { if match_len > 0 { match_left := sep.len - match_len - c2 := reader.read(mut buf[..match_left])? + c2 := reader.read(mut buf[..match_left])! res << buf[..c2] if buf[..c2] == sep[match_len..] { diff --git a/src/util/util.v b/src/util/util.v index 4cd374e..213104c 100644 --- a/src/util/util.v +++ b/src/util/util.v @@ -23,7 +23,7 @@ pub fn exit_with_message(code int, msg string) { } // hash_file returns the sha256 hash of a given file -pub fn hash_file(path &string) ?string { +pub fn hash_file(path &string) !string { file := os.open(path) or { return error('Failed to open file.') } mut sha256sum := sha256.new() @@ -39,7 +39,7 @@ pub fn hash_file(path &string) ?string { // This function never actually fails, but returns an option to follow // the Writer interface. - sha256sum.write(buf[..bytes_read])? + sha256sum.write(buf[..bytes_read])! } return sha256sum.checksum().hex() diff --git a/src/web/logging.v b/src/web/logging.v index fc697ff..12b07d7 100644 --- a/src/web/logging.v +++ b/src/web/logging.v @@ -3,33 +3,33 @@ module web import log // log reate a log message with the given level -pub fn (mut ctx Context) log(msg &string, level log.Level) { +pub fn (mut ctx Context) log(msg string, level log.Level) { lock ctx.logger { ctx.logger.send_output(msg, level) } } // lfatal create a log message with the fatal level -pub fn (mut ctx Context) lfatal(msg &string) { +pub fn (mut ctx Context) lfatal(msg string) { ctx.log(msg, log.Level.fatal) } // lerror create a log message with the error level -pub fn (mut ctx Context) lerror(msg &string) { +pub fn (mut ctx Context) lerror(msg string) { ctx.log(msg, log.Level.error) } // lwarn create a log message with the warn level -pub fn (mut ctx Context) lwarn(msg &string) { +pub fn (mut ctx Context) lwarn(msg string) { ctx.log(msg, log.Level.warn) } // linfo create a log message with the info level -pub fn (mut ctx Context) linfo(msg &string) { +pub fn (mut ctx Context) linfo(msg string) { ctx.log(msg, log.Level.info) } // ldebug create a log message with the debug level -pub fn (mut ctx Context) ldebug(msg &string) { +pub fn (mut ctx Context) ldebug(msg string) { ctx.log(msg, log.Level.debug) } From 23632be7a47f14d7e423b43c86256f8dead6d12e Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 1 Nov 2022 21:43:25 +0100 Subject: [PATCH 03/51] refactor: use relocated module names --- Makefile | 2 +- src/build/build.v | 2 +- src/console/aur/aur.v | 4 ++-- src/console/logs/logs.v | 2 +- src/console/targets/build.v | 2 +- src/console/targets/targets.v | 2 +- src/cron/cli.v | 2 +- src/cron/daemon/daemon.v | 2 +- src/server/cli.v | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 895d3fd..69bd795 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ SRC_DIR := src SOURCES != find '$(SRC_DIR)' -iname '*.v' V_PATH ?= v -V := $(V_PATH) -showcc -gc boehm -W +V := $(V_PATH) -showcc -gc boehm all: vieter diff --git a/src/build/build.v b/src/build/build.v index 734427d..247df6e 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -1,6 +1,6 @@ module build -import vieter_v.docker +import docker import encoding.base64 import time import os diff --git a/src/console/aur/aur.v b/src/console/aur/aur.v index 6a061dd..a6a3324 100644 --- a/src/console/aur/aur.v +++ b/src/console/aur/aur.v @@ -3,8 +3,8 @@ module aur import cli import console import client -import vieter_v.aur -import vieter_v.conf as vconf +import aur +import conf as vconf struct Config { address string [required] diff --git a/src/console/logs/logs.v b/src/console/logs/logs.v index 6d5ffad..1330dd0 100644 --- a/src/console/logs/logs.v +++ b/src/console/logs/logs.v @@ -1,7 +1,7 @@ module logs import cli -import vieter_v.conf as vconf +import conf as vconf import client import console import time diff --git a/src/console/targets/build.v b/src/console/targets/build.v index 83ebde2..9368558 100644 --- a/src/console/targets/build.v +++ b/src/console/targets/build.v @@ -1,7 +1,7 @@ module targets import client -import vieter_v.docker +import docker import os import build diff --git a/src/console/targets/targets.v b/src/console/targets/targets.v index 774a129..521ca23 100644 --- a/src/console/targets/targets.v +++ b/src/console/targets/targets.v @@ -1,7 +1,7 @@ module targets import cli -import vieter_v.conf as vconf +import conf as vconf import cron.expression { parse_expression } import client { NewTarget } import console diff --git a/src/cron/cli.v b/src/cron/cli.v index 0d7a042..16a3537 100644 --- a/src/cron/cli.v +++ b/src/cron/cli.v @@ -1,7 +1,7 @@ module cron import cli -import vieter_v.conf as vconf +import conf as vconf struct Config { pub: diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 8c6516c..0d30a23 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -6,7 +6,7 @@ import datatypes { MinHeap } import cron.expression { CronExpression, parse_expression } import math import build -import vieter_v.docker +import docker import os import client import models { Target } diff --git a/src/server/cli.v b/src/server/cli.v index 26ee0f1..a9644f3 100644 --- a/src/server/cli.v +++ b/src/server/cli.v @@ -1,7 +1,7 @@ module server import cli -import vieter_v.conf as vconf +import conf as vconf struct Config { pub: From a2fda0d4b71dc75df750c5b02a85e092dafc1a6a Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 1 Nov 2022 21:59:18 +0100 Subject: [PATCH 04/51] refactor: compile without warnings --- Makefile | 2 +- src/db/db.v | 4 ++-- src/repo/add.v | 32 ++++++++++++++++---------------- src/repo/remove.v | 24 ++++++++++++------------ src/repo/sync.v | 4 ++-- src/server/api_logs.v | 6 +++--- src/web/parse.v | 4 ++-- src/web/web.v | 18 +++++++++--------- 8 files changed, 47 insertions(+), 47 deletions(-) diff --git a/Makefile b/Makefile index 69bd795..895d3fd 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ SRC_DIR := src SOURCES != find '$(SRC_DIR)' -iname '*.v' V_PATH ?= v -V := $(V_PATH) -showcc -gc boehm +V := $(V_PATH) -showcc -gc boehm -W all: vieter diff --git a/src/db/db.v b/src/db/db.v index 9459c05..b8b861a 100644 --- a/src/db/db.v +++ b/src/db/db.v @@ -26,8 +26,8 @@ const ( ) // init initializes a database & adds the correct tables. -pub fn init(db_path string) ?VieterDb { - conn := sqlite.connect(db_path)? +pub fn init(db_path string) !VieterDb { + conn := sqlite.connect(db_path)! sql conn { create table MigrationVersion diff --git a/src/repo/add.v b/src/repo/add.v index 608ca50..8ab3ae1 100644 --- a/src/repo/add.v +++ b/src/repo/add.v @@ -29,7 +29,7 @@ pub: } // new creates a new RepoGroupManager & creates the directories as needed -pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager { +pub fn new(repos_dir string, pkg_dir string, default_arch string) !RepoGroupManager { if !os.is_dir(repos_dir) { os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') } } @@ -49,27 +49,27 @@ pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupMana // pkg archive. It's a wrapper around add_pkg_in_repo that parses the archive // file, passes the result to add_pkg_in_repo, and hard links the archive to // the right subdirectories in r.pkg_dir if it was successfully added. -pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult { +pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) !RepoAddResult { pkg := package.read_pkg_archive(pkg_path) or { return error('Failed to read package file: $err.msg()') } - archs := r.add_pkg_in_repo(repo, pkg)? + archs := r.add_pkg_in_repo(repo, pkg)! // If the add was successful, we move the file to the packages directory for arch in archs { repo_pkg_path := os.real_path(os.join_path(r.pkg_dir, repo, arch)) dest_path := os.join_path_single(repo_pkg_path, pkg.filename()) - os.mkdir_all(repo_pkg_path)? + os.mkdir_all(repo_pkg_path)! // We create hard links so that "any" arch packages aren't stored // multiple times - os.link(pkg_path, dest_path)? + os.link(pkg_path, dest_path)! } // After linking, we can remove the original file - os.rm(pkg_path)? + os.rm(pkg_path)! return RepoAddResult{ name: pkg.info.name @@ -85,11 +85,11 @@ pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?Re // r.default_arch. If this arch-repo doesn't exist yet, it is created. If the // architecture isn't 'any', the package is only added to the specific // architecture. -fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]string { +fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]string { // A package not of arch 'any' can be handled easily by adding it to the // respective repo if pkg.info.arch != 'any' { - r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)? + r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)! return [pkg.info.arch] } @@ -104,7 +104,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin // If this is the first package that's added to the repo, the directory // won't exist yet if os.exists(repo_dir) { - arch_repos = os.ls(repo_dir)? + arch_repos = os.ls(repo_dir)! } // The default_arch should always be updated when a package with arch 'any' @@ -118,7 +118,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin // not know which arch-repositories did succeed in adding the package, if // any. for arch in arch_repos { - r.add_pkg_in_arch_repo(repo, arch, pkg)? + r.add_pkg_in_arch_repo(repo, arch, pkg)! } return arch_repos @@ -128,24 +128,24 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin // arch-repo. It records the package's data in the arch-repo's desc & files // files, and afterwards updates the db & files archives to reflect these // changes. -fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ? { +fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ! { pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version') // Remove the previous version of the package, if present - r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)? + r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)! os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') } - os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()?) or { - os.rmdir_all(pkg_dir)? + os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()!) or { + os.rmdir_all(pkg_dir)! return error('Failed to write desc file.') } os.write_file(os.join_path_single(pkg_dir, 'files'), pkg.to_files()) or { - os.rmdir_all(pkg_dir)? + os.rmdir_all(pkg_dir)! return error('Failed to write files file.') } - r.sync(repo, arch)? + r.sync(repo, arch)! } diff --git a/src/repo/remove.v b/src/repo/remove.v index add921c..63866a9 100644 --- a/src/repo/remove.v +++ b/src/repo/remove.v @@ -5,7 +5,7 @@ import os // remove_pkg_from_arch_repo removes a package from an arch-repo's database. It // returns false if the package wasn't present in the database. It also // optionally re-syncs the repo archives. -pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) ?bool { +pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) !bool { repo_dir := os.join_path(r.repos_dir, repo, arch) // If the repository doesn't exist yet, the result is automatically false @@ -15,7 +15,7 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, // We iterate over every directory in the repo dir // TODO filter so we only check directories - for d in os.ls(repo_dir)? { + for d in os.ls(repo_dir)! { // Because a repository only allows a single version of each package, // we need only compare whether the name of the package is the same, // not the version. @@ -25,22 +25,22 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, // We lock the mutex here to prevent other routines from creating a // new archive while we remove an entry lock r.mutex { - os.rmdir_all(os.join_path_single(repo_dir, d))? + os.rmdir_all(os.join_path_single(repo_dir, d))! } // Also remove the package archive repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch) - archives := os.ls(repo_pkg_dir)?.filter(it.split('-')#[..-3].join('-') == name) + archives := os.ls(repo_pkg_dir)!.filter(it.split('-')#[..-3].join('-') == name) for archive_name in archives { full_path := os.join_path_single(repo_pkg_dir, archive_name) - os.rm(full_path)? + os.rm(full_path)! } // Sync the db archives if requested if sync { - r.sync(repo, arch)? + r.sync(repo, arch)! } return true @@ -51,7 +51,7 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, } // remove_arch_repo removes an arch-repo & its packages. -pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) ?bool { +pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) !bool { repo_dir := os.join_path(r.repos_dir, repo, arch) // If the repository doesn't exist yet, the result is automatically false @@ -59,16 +59,16 @@ pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) ?bool { return false } - os.rmdir_all(repo_dir)? + os.rmdir_all(repo_dir)! pkg_dir := os.join_path(r.pkg_dir, repo, arch) - os.rmdir_all(pkg_dir)? + os.rmdir_all(pkg_dir)! return true } // remove_repo removes a repo & its packages. -pub fn (r &RepoGroupManager) remove_repo(repo string) ?bool { +pub fn (r &RepoGroupManager) remove_repo(repo string) !bool { repo_dir := os.join_path_single(r.repos_dir, repo) // If the repository doesn't exist yet, the result is automatically false @@ -76,10 +76,10 @@ pub fn (r &RepoGroupManager) remove_repo(repo string) ?bool { return false } - os.rmdir_all(repo_dir)? + os.rmdir_all(repo_dir)! pkg_dir := os.join_path_single(r.pkg_dir, repo) - os.rmdir_all(pkg_dir)? + os.rmdir_all(pkg_dir)! return true } diff --git a/src/repo/sync.v b/src/repo/sync.v index 73d21c8..9554748 100644 --- a/src/repo/sync.v +++ b/src/repo/sync.v @@ -32,7 +32,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri } // sync regenerates the repository archive files. -fn (r &RepoGroupManager) sync(repo string, arch string) ? { +fn (r &RepoGroupManager) sync(repo string, arch string) ! { subrepo_path := os.join_path(r.repos_dir, repo, arch) lock r.mutex { @@ -54,7 +54,7 @@ fn (r &RepoGroupManager) sync(repo string, arch string) ? { C.archive_write_open_filename(a_files, &char(files_path.str)) // Iterate over each directory - for d in os.ls(subrepo_path)?.filter(os.is_dir(os.join_path_single(subrepo_path, + for d in os.ls(subrepo_path)!.filter(os.is_dir(os.join_path_single(subrepo_path, it))) { // desc mut inner_path := os.join_path_single(d, 'desc') diff --git a/src/server/api_logs.v b/src/server/api_logs.v index 287755a..fcbf024 100644 --- a/src/server/api_logs.v +++ b/src/server/api_logs.v @@ -43,9 +43,9 @@ fn (mut app App) v1_get_log_content(id int) web.Result { // parse_query_time unescapes an HTTP query parameter & tries to parse it as a // time.Time struct. -fn parse_query_time(query string) ?time.Time { - unescaped := urllib.query_unescape(query)? - t := time.parse(unescaped)? +fn parse_query_time(query string) !time.Time { + unescaped := urllib.query_unescape(query)! + t := time.parse(unescaped)! return t } diff --git a/src/web/parse.v b/src/web/parse.v index ee7a72c..7af635f 100644 --- a/src/web/parse.v +++ b/src/web/parse.v @@ -8,7 +8,7 @@ import net.http const attrs_to_ignore = ['auth'] // Parsing function attributes for methods and path. -fn parse_attrs(name string, attrs []string) ?([]http.Method, string) { +fn parse_attrs(name string, attrs []string) !([]http.Method, string) { if attrs.len == 0 { return [http.Method.get], '/$name' } @@ -61,7 +61,7 @@ fn parse_query_from_url(url urllib.URL) map[string]string { } // Extract form data from an HTTP request. -fn parse_form_from_request(request http.Request) ?(map[string]string, map[string][]http.FileData) { +fn parse_form_from_request(request http.Request) !(map[string]string, map[string][]http.FileData) { mut form := map[string]string{} mut files := map[string][]http.FileData{} if request.method in methods_with_form { diff --git a/src/web/web.v b/src/web/web.v index 1d1480f..1b40e7a 100644 --- a/src/web/web.v +++ b/src/web/web.v @@ -24,7 +24,7 @@ pub: pub mut: // TCP connection to client. // But beware, do not store it for further use, after request processing web will close connection. - conn &net.TcpConn + conn &net.TcpConn = unsafe { nil } // Gives access to a shared logger object logger shared log.Log // time.ticks() from start of web connection handle. @@ -67,20 +67,20 @@ struct Route { pub fn (ctx Context) before_request() {} // send_string writes the given string to the TCP connection socket. -fn (mut ctx Context) send_string(s string) ? { - ctx.conn.write(s.bytes())? +fn (mut ctx Context) send_string(s string) ! { + ctx.conn.write(s.bytes())! } // send_reader reads at most `size` bytes from the given reader & writes them // to the TCP connection socket. Internally, a 10KB buffer is used, to avoid // having to store all bytes in memory at once. -fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ? { +fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ! { mut buf := []u8{len: 10_000} mut bytes_left := size // Repeat as long as the stream still has data for bytes_left > 0 { - bytes_read := reader.read(mut buf)? + bytes_read := reader.read(mut buf)! bytes_left -= u64(bytes_read) mut to_write := bytes_read @@ -96,20 +96,20 @@ fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ? { // send_custom_response sends the given http.Response to the client. It can be // used to overwrite the Context object & send a completely custom // http.Response instead. -fn (mut ctx Context) send_custom_response(resp &http.Response) ? { - ctx.send_string(resp.bytestr())? +fn (mut ctx Context) send_custom_response(resp &http.Response) ! { + ctx.send_string(resp.bytestr())! } // send_response_header constructs a valid HTTP response with an empty body & // sends it to the client. -pub fn (mut ctx Context) send_response_header() ? { +pub fn (mut ctx Context) send_response_header() ! { mut resp := http.new_response( header: ctx.header.join(headers_close) ) resp.header.add(.content_type, ctx.content_type) resp.set_status(ctx.status) - ctx.send_custom_response(resp)? + ctx.send_custom_response(resp)! } // send is a convenience function for sending the HTTP response with an empty From 161341a1088942d24153cea599f857253a5c61d8 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 1 Nov 2022 22:07:14 +0100 Subject: [PATCH 05/51] fix: still use openssl --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 895d3fd..332f70d 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ SRC_DIR := src SOURCES != find '$(SRC_DIR)' -iname '*.v' V_PATH ?= v -V := $(V_PATH) -showcc -gc boehm -W +V := $(V_PATH) -showcc -gc boehm -W -d use_openssl all: vieter From 96a9798d3fa29cdebfa714cfc18754dde663d9f5 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 1 Nov 2022 22:30:48 +0100 Subject: [PATCH 06/51] refactor: updated tests to new syntax --- src/cron/expression/expression_parse_test.v | 50 ++++++++++----------- src/cron/expression/expression_test.v | 24 +++++----- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/src/cron/expression/expression_parse_test.v b/src/cron/expression/expression_parse_test.v index 4eebc49..5c12329 100644 --- a/src/cron/expression/expression_parse_test.v +++ b/src/cron/expression/expression_parse_test.v @@ -11,88 +11,88 @@ fn parse_range_error(s string, min int, max int) string { } // =====parse_range===== -fn test_range_star_range() ? { +fn test_range_star_range() ! { mut bitv := []bool{len: 6, init: false} - parse_range('*', 0, 5, mut bitv)? + parse_range('*', 0, 5, mut bitv)! assert bitv == [true, true, true, true, true, true] } -fn test_range_number() ? { +fn test_range_number() ! { mut bitv := []bool{len: 6, init: false} - parse_range('4', 0, 5, mut bitv)? + parse_range('4', 0, 5, mut bitv)! assert bitv_to_ints(bitv, 0) == [4] } -fn test_range_number_too_large() ? { +fn test_range_number_too_large() ! { assert parse_range_error('10', 0, 6) == 'Out of range.' } -fn test_range_number_too_small() ? { +fn test_range_number_too_small() ! { assert parse_range_error('0', 2, 6) == 'Out of range.' } -fn test_range_number_invalid() ? { +fn test_range_number_invalid() ! { assert parse_range_error('x', 0, 6) == 'Invalid number.' } -fn test_range_step_star_1() ? { +fn test_range_step_star_1() ! { mut bitv := []bool{len: 21, init: false} - parse_range('*/4', 0, 20, mut bitv)? + parse_range('*/4', 0, 20, mut bitv)! assert bitv_to_ints(bitv, 0) == [0, 4, 8, 12, 16, 20] } -fn test_range_step_star_2() ? { +fn test_range_step_star_2() ! { mut bitv := []bool{len: 8, init: false} - parse_range('*/3', 1, 8, mut bitv)? + parse_range('*/3', 1, 8, mut bitv)! assert bitv_to_ints(bitv, 1) == [1, 4, 7] } -fn test_range_step_star_too_large() ? { +fn test_range_step_star_too_large() ! { assert parse_range_error('*/21', 0, 20) == 'Step size too large.' } -fn test_range_step_zero() ? { +fn test_range_step_zero() ! { assert parse_range_error('*/0', 0, 20) == 'Step size zero not allowed.' } -fn test_range_step_number() ? { +fn test_range_step_number() ! { mut bitv := []bool{len: 21, init: false} - parse_range('5/4', 2, 22, mut bitv)? + parse_range('5/4', 2, 22, mut bitv)! assert bitv_to_ints(bitv, 2) == [5, 9, 13, 17, 21] } -fn test_range_step_number_too_large() ? { +fn test_range_step_number_too_large() ! { assert parse_range_error('10/4', 0, 5) == 'Out of range.' } -fn test_range_step_number_too_small() ? { +fn test_range_step_number_too_small() ! { assert parse_range_error('2/4', 5, 10) == 'Out of range.' } -fn test_range_dash() ? { +fn test_range_dash() ! { mut bitv := []bool{len: 10, init: false} - parse_range('4-8', 0, 9, mut bitv)? + parse_range('4-8', 0, 9, mut bitv)! assert bitv_to_ints(bitv, 0) == [4, 5, 6, 7, 8] } -fn test_range_dash_step() ? { +fn test_range_dash_step() ! { mut bitv := []bool{len: 10, init: false} - parse_range('4-8/2', 0, 9, mut bitv)? + parse_range('4-8/2', 0, 9, mut bitv)! assert bitv_to_ints(bitv, 0) == [4, 6, 8] } // =====parse_part===== -fn test_part_single() ? { - assert parse_part('*', 0, 5)? == [0, 1, 2, 3, 4, 5] +fn test_part_single() ! { + assert parse_part('*', 0, 5)! == [0, 1, 2, 3, 4, 5] } -fn test_part_multiple() ? { - assert parse_part('*/2,2/3', 1, 8)? == [1, 2, 3, 5, 7, 8] +fn test_part_multiple() ! { + assert parse_part('*/2,2/3', 1, 8)! == [1, 2, 3, 5, 7, 8] } diff --git a/src/cron/expression/expression_test.v b/src/cron/expression/expression_test.v index 9e25e92..82bf959 100644 --- a/src/cron/expression/expression_test.v +++ b/src/cron/expression/expression_test.v @@ -2,12 +2,12 @@ module expression import time { parse } -fn util_test_time(exp string, t1_str string, t2_str string) ? { - ce := parse_expression(exp)? - t1 := parse(t1_str)? - t2 := parse(t2_str)? +fn util_test_time(exp string, t1_str string, t2_str string) ! { + ce := parse_expression(exp)! + t1 := parse(t1_str)! + t2 := parse(t2_str)! - t3 := ce.next(t1)? + t3 := ce.next(t1)! assert t2.year == t3.year assert t2.month == t3.month @@ -16,19 +16,19 @@ fn util_test_time(exp string, t1_str string, t2_str string) ? { assert t2.minute == t3.minute } -fn test_next_simple() ? { +fn test_next_simple() ! { // Very simple - util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')? + util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')! // Overlap to next day - util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')? - util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')? + util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')! + util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')! - util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')? + util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')! // Overlap to next month - util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')? + util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')! // Overlap to next year - util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')? + util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')! } From cc9dcb3058f7e16e74c0cb1938aa634df34810a1 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Wed, 2 Nov 2022 18:25:49 +0100 Subject: [PATCH 07/51] fix(ci): install dependencies when linting --- .gitignore | 1 + .woodpecker/lint.yml | 13 ++++++++++++- Makefile | 4 ++-- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index a2804fe..aaec9ef 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ gdb.txt # Generated docs _docs/ +docs/resources/_gen/ /man/ # VLS logs diff --git a/.woodpecker/lint.yml b/.woodpecker/lint.yml index f87c06f..ec64d13 100644 --- a/.woodpecker/lint.yml +++ b/.woodpecker/lint.yml @@ -7,10 +7,21 @@ branches: platform: 'linux/amd64' pipeline: + # vfmt seems to get confused if these aren't present + install-modules: + image: *vlang_image + pull: true + commands: + - export VMODULES=$PWD/.vmodules + - 'cd src && v install' + when: + event: [pull_request] + lint: image: *vlang_image pull: true commands: + - export VMODULES=$PWD/.vmodules - make lint when: - event: [ pull_request ] + event: [pull_request] diff --git a/Makefile b/Makefile index 332f70d..e716807 100644 --- a/Makefile +++ b/Makefile @@ -92,9 +92,9 @@ clean: .PHONY: autofree autofree: afvieter afvieter: $(SOURCES) - $(V_PATH) -showcc -autofree -o afvieter $(SRC_DIR) + $(V) -showcc -autofree -o afvieter $(SRC_DIR) .PHONY: skip-unused skip-unused: suvieter suvieter: $(SOURCES) - $(V_PATH) -showcc -skip-unused -o suvieter $(SRC_DIR) + $(V) -skip-unused -o suvieter $(SRC_DIR) From 3095daed7dacdc56a0f17b4f937455e656143cc9 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Wed, 2 Nov 2022 18:38:02 +0100 Subject: [PATCH 08/51] chore: update dockerfile & changelog --- CHANGELOG.md | 4 ++++ Dockerfile | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cd39c4..18311d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased](https://git.rustybever.be/vieter-v/vieter/src/branch/dev) +### Changed + +* Migrated codebase to V 0.3.2 + ## [0.4.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.4.0) ### Added diff --git a/Dockerfile b/Dockerfile index 7aed917..210ae66 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM chewingbever/vlang:0.3 AS builder +FROM git.rustybever.be/chewing_bever/vlang:0.3.2 AS builder ARG TARGETPLATFORM ARG CI_COMMIT_SHA From 9a552f53027f526df9e8ad43ec218708d5e3445c Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 7 Nov 2022 21:11:10 +0100 Subject: [PATCH 09/51] fix(server): remove NOT NULL constraint on branch (fixes #289) --- src/db/db.v | 4 ++- .../migrations/004-nullable-branch/down.sql | 26 +++++++++++++++++++ src/db/migrations/004-nullable-branch/up.sql | 23 ++++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 src/db/migrations/004-nullable-branch/down.sql create mode 100644 src/db/migrations/004-nullable-branch/up.sql diff --git a/src/db/db.v b/src/db/db.v index b8b861a..6d9ab43 100644 --- a/src/db/db.v +++ b/src/db/db.v @@ -17,11 +17,13 @@ const ( $embed_file('migrations/001-initial/up.sql'), $embed_file('migrations/002-rename-to-targets/up.sql'), $embed_file('migrations/003-target-url-type/up.sql'), + $embed_file('migrations/004-nullable-branch/up.sql') ] migrations_down = [ $embed_file('migrations/001-initial/down.sql'), $embed_file('migrations/002-rename-to-targets/down.sql'), $embed_file('migrations/003-target-url-type/down.sql'), + $embed_file('migrations/004-nullable-branch/down.sql') ] ) @@ -60,7 +62,7 @@ pub fn init(db_path string) !VieterDb { res := conn.exec_none(part) if res != sqlite.sqlite_done { - return error('An error occurred while applying migration $version_num') + return error('An error occurred while applying migration $version_num: SQLite error code $res') } } diff --git a/src/db/migrations/004-nullable-branch/down.sql b/src/db/migrations/004-nullable-branch/down.sql new file mode 100644 index 0000000..2515593 --- /dev/null +++ b/src/db/migrations/004-nullable-branch/down.sql @@ -0,0 +1,26 @@ +-- This down won't really work because it'll throw NOT NULL errors, but I'm +-- just putting it here for future reference (still not sure whether I'm even + -- gonna use these) +PRAGMA foreign_keys=off; + +BEGIN TRANSACTION; + +ALTER TABLE Target RENAME TO _Target_old; + +CREATE TABLE Target ( + id INTEGER PRIMARY KEY, + url TEXT NOT NULL, + branch TEXT NOT NULL, + repo TEXT NOT NULL, + schedule TEXT, + kind TEXT NOT NULL DEFAULT 'git' +); + +INSERT INTO Target (id, url, branch, repo, schedule, kind) + SELECT id, url, branch, repo, schedule, kind FROM _Target_old; + +DROP TABLE _Target_old; + +COMMIT; + +PRAGMA foreign_keys=on; diff --git a/src/db/migrations/004-nullable-branch/up.sql b/src/db/migrations/004-nullable-branch/up.sql new file mode 100644 index 0000000..6333c37 --- /dev/null +++ b/src/db/migrations/004-nullable-branch/up.sql @@ -0,0 +1,23 @@ +PRAGMA foreign_keys=off; + +BEGIN TRANSACTION; + +ALTER TABLE Target RENAME TO _Target_old; + +CREATE TABLE Target ( + id INTEGER PRIMARY KEY, + url TEXT NOT NULL, + branch TEXT, + repo TEXT NOT NULL, + schedule TEXT, + kind TEXT NOT NULL DEFAULT 'git' +); + +INSERT INTO Target (id, url, branch, repo, schedule, kind) + SELECT id, url, branch, repo, schedule, kind FROM _Target_old; + +DROP TABLE _Target_old; + +COMMIT; + +PRAGMA foreign_keys=on; From 5542be041890fb9f438a0b2f03a764579d2f119e Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 7 Nov 2022 21:13:40 +0100 Subject: [PATCH 10/51] fix(api): set arch if not provided or empty (fixes #278) --- src/server/api_targets.v | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/api_targets.v b/src/server/api_targets.v index 6f284af..c4d32d2 100644 --- a/src/server/api_targets.v +++ b/src/server/api_targets.v @@ -32,7 +32,7 @@ fn (mut app App) v1_post_target() web.Result { // If a repo is created without specifying the arch, we assume it's meant // for the default architecture. - if 'arch' !in params { + if 'arch' !in params || params['arch'] == '' { params['arch'] = app.conf.default_arch } From fc4dc30f741c882368c616298b5b6ba06d5d6ce1 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 7 Nov 2022 21:35:49 +0100 Subject: [PATCH 11/51] fix(api): always return JSON response on success (fixes #276) --- src/console/targets/targets.v | 7 ++----- src/server/api_targets.v | 8 +++++--- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/console/targets/targets.v b/src/console/targets/targets.v index 521ca23..4179363 100644 --- a/src/console/targets/targets.v +++ b/src/console/targets/targets.v @@ -227,8 +227,7 @@ fn remove(conf Config, id string) ! { if id_int != 0 { c := client.new(conf.address, conf.api_key) - res := c.remove_target(id_int)! - println(res.message) + c.remove_target(id_int)! } } @@ -245,9 +244,7 @@ fn patch(conf Config, id string, params map[string]string) ! { id_int := id.int() if id_int != 0 { c := client.new(conf.address, conf.api_key) - res := c.patch_target(id_int, params)! - - println(res.message) + c.patch_target(id_int, params)! } } diff --git a/src/server/api_targets.v b/src/server/api_targets.v index c4d32d2..16db7e9 100644 --- a/src/server/api_targets.v +++ b/src/server/api_targets.v @@ -47,7 +47,7 @@ fn (mut app App) v1_post_target() web.Result { id := app.db.add_target(new_repo) - return app.json(http.Status.ok, new_data_response(id)) + return app.json(.ok, new_data_response(id)) } // v1_delete_target removes a given target from the server's list. @@ -55,7 +55,7 @@ fn (mut app App) v1_post_target() web.Result { fn (mut app App) v1_delete_target(id int) web.Result { app.db.delete_target(id) - return app.status(.ok) + return app.json(.ok, new_response('')) } // v1_patch_target updates a target's data with the given query params. @@ -69,5 +69,7 @@ fn (mut app App) v1_patch_target(id int) web.Result { app.db.update_target_archs(id, arch_objs) } - return app.status(.ok) + repo := app.db.get_target(id) or { return app.status(.internal_server_error) } + + return app.json(.ok, new_data_response(repo)) } From 17e58c91ed95d227d12364c56469da5144db8757 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 7 Nov 2022 21:40:59 +0100 Subject: [PATCH 12/51] chore: updated changelog; ran formatter --- CHANGELOG.md | 8 ++++++++ src/db/db.v | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 18311d7..a550524 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * Migrated codebase to V 0.3.2 +### Fixed + +* Arch value for target is now properly set if not provided +* All API endpoints now return proper JSON on success + * CLI no longer exits with non-zero status code when removing/patching + target +* Allow NULL values for branch in database + ## [0.4.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.4.0) ### Added diff --git a/src/db/db.v b/src/db/db.v index 6d9ab43..1a0160e 100644 --- a/src/db/db.v +++ b/src/db/db.v @@ -17,13 +17,13 @@ const ( $embed_file('migrations/001-initial/up.sql'), $embed_file('migrations/002-rename-to-targets/up.sql'), $embed_file('migrations/003-target-url-type/up.sql'), - $embed_file('migrations/004-nullable-branch/up.sql') + $embed_file('migrations/004-nullable-branch/up.sql'), ] migrations_down = [ $embed_file('migrations/001-initial/down.sql'), $embed_file('migrations/002-rename-to-targets/down.sql'), $embed_file('migrations/003-target-url-type/down.sql'), - $embed_file('migrations/004-nullable-branch/down.sql') + $embed_file('migrations/004-nullable-branch/down.sql'), ] ) From 949379616004e093e45fc506011b4862f57ec682 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Mon, 12 Sep 2022 13:59:52 +0200 Subject: [PATCH 13/51] refactor(package): split module into two files --- src/package/README.md | 5 ++ src/package/format.v | 103 ++++++++++++++++++++++++++++++++++++++++++ src/package/package.v | 101 ----------------------------------------- 3 files changed, 108 insertions(+), 101 deletions(-) create mode 100644 src/package/README.md create mode 100644 src/package/format.v diff --git a/src/package/README.md b/src/package/README.md new file mode 100644 index 0000000..b2bcbd7 --- /dev/null +++ b/src/package/README.md @@ -0,0 +1,5 @@ +# package + +This module handles both parsing the published Arch tarballs & the contents of +their `.PKGINFO` files, as well as generating the contents of the database +archives' `desc` & `files` files. diff --git a/src/package/format.v b/src/package/format.v new file mode 100644 index 0000000..a81d327 --- /dev/null +++ b/src/package/format.v @@ -0,0 +1,103 @@ +module package + +// format_entry returns a string properly formatted to be added to a desc file. +[inline] +fn format_entry(key string, value string) string { + return '\n%$key%\n$value\n' +} + +// full_name returns the properly formatted name for the package, including +// version & architecture +pub fn (pkg &Pkg) full_name() string { + p := pkg.info + return '$p.name-$p.version-$p.arch' +} + +// filename returns the correct filename of the package file +pub fn (pkg &Pkg) filename() string { + ext := match pkg.compression { + 0 { '.tar' } + 1 { '.tar.gz' } + 6 { '.tar.xz' } + 14 { '.tar.zst' } + else { panic("Another compression code shouldn't be possible. Faulty code: $pkg.compression") } + } + + return '${pkg.full_name()}.pkg$ext' +} + +// to_desc returns a desc file valid string representation +pub fn (pkg &Pkg) to_desc() !string { + p := pkg.info + + // filename + mut desc := '%FILENAME%\n$pkg.filename()\n' + + desc += format_entry('NAME', p.name) + desc += format_entry('BASE', p.base) + desc += format_entry('VERSION', p.version) + + if p.description.len > 0 { + desc += format_entry('DESC', p.description) + } + + if p.groups.len > 0 { + desc += format_entry('GROUPS', p.groups.join_lines()) + } + + desc += format_entry('CSIZE', p.csize.str()) + desc += format_entry('ISIZE', p.size.str()) + + sha256sum := pkg.checksum()! + + desc += format_entry('SHA256SUM', sha256sum) + + // TODO add pgpsig stuff + + if p.url.len > 0 { + desc += format_entry('URL', p.url) + } + + if p.licenses.len > 0 { + desc += format_entry('LICENSE', p.licenses.join_lines()) + } + + desc += format_entry('ARCH', p.arch) + desc += format_entry('BUILDDATE', p.build_date.str()) + desc += format_entry('PACKAGER', p.packager) + + if p.replaces.len > 0 { + desc += format_entry('REPLACES', p.replaces.join_lines()) + } + + if p.conflicts.len > 0 { + desc += format_entry('CONFLICTS', p.conflicts.join_lines()) + } + + if p.provides.len > 0 { + desc += format_entry('PROVIDES', p.provides.join_lines()) + } + + if p.depends.len > 0 { + desc += format_entry('DEPENDS', p.depends.join_lines()) + } + + if p.optdepends.len > 0 { + desc += format_entry('OPTDEPENDS', p.optdepends.join_lines()) + } + + if p.makedepends.len > 0 { + desc += format_entry('MAKEDEPENDS', p.makedepends.join_lines()) + } + + if p.checkdepends.len > 0 { + desc += format_entry('CHECKDEPENDS', p.checkdepends.join_lines()) + } + + return '$desc\n' +} + +// to_files returns a files file valid string representation +pub fn (pkg &Pkg) to_files() string { + return '%FILES%\n$pkg.files.join_lines()\n' +} diff --git a/src/package/package.v b/src/package/package.v index aadf6f2..4518ffd 100644 --- a/src/package/package.v +++ b/src/package/package.v @@ -174,104 +174,3 @@ pub fn read_pkg_archive(pkg_path string) !Pkg { compression: compression_code } } - -// format_entry returns a string properly formatted to be added to a desc file. -fn format_entry(key string, value string) string { - return '\n%$key%\n$value\n' -} - -// full_name returns the properly formatted name for the package, including -// version & architecture -pub fn (pkg &Pkg) full_name() string { - p := pkg.info - return '$p.name-$p.version-$p.arch' -} - -// filename returns the correct filename of the package file -pub fn (pkg &Pkg) filename() string { - ext := match pkg.compression { - 0 { '.tar' } - 1 { '.tar.gz' } - 6 { '.tar.xz' } - 14 { '.tar.zst' } - else { panic("Another compression code shouldn't be possible. Faulty code: $pkg.compression") } - } - - return '${pkg.full_name()}.pkg$ext' -} - -// to_desc returns a desc file valid string representation -pub fn (pkg &Pkg) to_desc() !string { - p := pkg.info - - // filename - mut desc := '%FILENAME%\n$pkg.filename()\n' - - desc += format_entry('NAME', p.name) - desc += format_entry('BASE', p.base) - desc += format_entry('VERSION', p.version) - - if p.description.len > 0 { - desc += format_entry('DESC', p.description) - } - - if p.groups.len > 0 { - desc += format_entry('GROUPS', p.groups.join_lines()) - } - - desc += format_entry('CSIZE', p.csize.str()) - desc += format_entry('ISIZE', p.size.str()) - - sha256sum := pkg.checksum()! - - desc += format_entry('SHA256SUM', sha256sum) - - // TODO add pgpsig stuff - - if p.url.len > 0 { - desc += format_entry('URL', p.url) - } - - if p.licenses.len > 0 { - desc += format_entry('LICENSE', p.licenses.join_lines()) - } - - desc += format_entry('ARCH', p.arch) - desc += format_entry('BUILDDATE', p.build_date.str()) - desc += format_entry('PACKAGER', p.packager) - - if p.replaces.len > 0 { - desc += format_entry('REPLACES', p.replaces.join_lines()) - } - - if p.conflicts.len > 0 { - desc += format_entry('CONFLICTS', p.conflicts.join_lines()) - } - - if p.provides.len > 0 { - desc += format_entry('PROVIDES', p.provides.join_lines()) - } - - if p.depends.len > 0 { - desc += format_entry('DEPENDS', p.depends.join_lines()) - } - - if p.optdepends.len > 0 { - desc += format_entry('OPTDEPENDS', p.optdepends.join_lines()) - } - - if p.makedepends.len > 0 { - desc += format_entry('MAKEDEPENDS', p.makedepends.join_lines()) - } - - if p.checkdepends.len > 0 { - desc += format_entry('CHECKDEPENDS', p.checkdepends.join_lines()) - } - - return '$desc\n' -} - -// to_files returns a files file valid string representation -pub fn (pkg &Pkg) to_files() string { - return '%FILES%\n$pkg.files.join_lines()\n' -} From 54f40b76385ad02e970114380f4d9a316420e7ba Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Mon, 12 Sep 2022 14:30:17 +0200 Subject: [PATCH 14/51] chore(repo): added readme --- src/repo/README.md | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 src/repo/README.md diff --git a/src/repo/README.md b/src/repo/README.md new file mode 100644 index 0000000..f06b1d3 --- /dev/null +++ b/src/repo/README.md @@ -0,0 +1,43 @@ +# repo + +This module manages the contents of the various repositories stored within a +Vieter instance. + +## Terminology + +* Arch-repository (arch-repo): specific architecture of a given repository. This is what + Pacman actually uses as a repository, and contains its own `.db` & `.files` + files. +* Repository (repo): a collection of arch-repositories. A single repository can + contain packages of different architectures, with each package being stored + in that specific architecture' arch-repository. +* Repository group (repo-group): a collection of repositories. Each Vieter + instance consists of a single repository group, which manages all underlying + repositories & arch-repositories. + +## Arch-repository layout + +An arch-repository (aka a regular Pacman repository) consists of a directory +with the following files (`{repo}` should be replaced with the name of the +repository): + +* One or more package directories. These directories follow the naming scheme + `${pkgname}-${pkgver}-${pkgrel}`. Each of these directories contains two + files, `desc` & `files`. The `desc` file is a list of the package's metadata, + while `files` contains a list of all files that the package contains. The + latter is used when using `pacman -F`. +* `{repo}.db` & `{repo}.db.tar.gz`: the database file of the repository. This + is just a compressed tarball of all package directories, but only their + `desc` files. Both these files should have the same content (`repo-add` + creates a symlink, but Vieter just serves the same file for both routes) +* `{repo}.files` & `{repo}.files.tar.gz`: the same as the `.db` file, but this + also contains the `files` files, instead of just the `desc` files. + +## Filesystem layout + +The repository part of Vieter consists of two directories. One is the `repos` +directory inside the configured `data_dir`, while the other is the configured +`pkg_dir`. `repos` contains only the repository group, while `pkg_dir` contains +the actual package archives. `pkg_dir` is the directory that can take up a +significant amount of memory, while `repos` solely consists of small text +files. From 71c77e90bcfa2390ed5f92d19ee3d70d065f4dad Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 4 Oct 2022 16:45:28 +0200 Subject: [PATCH 15/51] refactor(cron): expression parser now uses bitfields (closes #148) --- CHANGELOG.md | 1 + src/cron/expression/expression.v | 139 ------------------- src/cron/expression/expression_parse.v | 146 ++++++++++++++++++++ src/cron/expression/expression_parse_test.v | 39 ++---- 4 files changed, 162 insertions(+), 163 deletions(-) create mode 100644 src/cron/expression/expression_parse.v diff --git a/CHANGELOG.md b/CHANGELOG.md index a550524..d2dd760 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed * Migrated codebase to V 0.3.2 +* Cron expression parser now uses bitfields instead of bool arrays ### Fixed diff --git a/src/cron/expression/expression.v b/src/cron/expression/expression.v index 438805d..c3ff8c5 100644 --- a/src/cron/expression/expression.v +++ b/src/cron/expression/expression.v @@ -134,142 +134,3 @@ pub fn (ce &CronExpression) next_n(ref time.Time, n int) ![]time.Time { return times } - -// parse_range parses a given string into a range of sorted integers, if -// possible. -fn parse_range(s string, min int, max int, mut bitv []bool) ! { - mut start := min - mut end := max - mut interval := 1 - - exps := s.split('/') - - if exps.len > 2 { - return error('Invalid expression.') - } - - if exps[0] != '*' { - dash_parts := exps[0].split('-') - - if dash_parts.len > 2 { - return error('Invalid expression.') - } - - start = dash_parts[0].int() - - // The builtin parsing functions return zero if the string can't be - // parsed into a number, so we have to explicitely check whether they - // actually entered zero or if it's an invalid number. - if start == 0 && dash_parts[0] != '0' { - return error('Invalid number.') - } - - // Check whether the start value is out of range - if start < min || start > max { - return error('Out of range.') - } - - if dash_parts.len == 2 { - end = dash_parts[1].int() - - if end == 0 && dash_parts[1] != '0' { - return error('Invalid number.') - } - - if end < start || end > max { - return error('Out of range.') - } - } - } - - if exps.len > 1 { - interval = exps[1].int() - - // interval being zero is always invalid, but we want to check why - // it's invalid for better error messages. - if interval == 0 { - if exps[1] != '0' { - return error('Invalid number.') - } else { - return error('Step size zero not allowed.') - } - } - - if interval > max - min { - return error('Step size too large.') - } - } - // Here, s solely consists of a number, so that's the only value we - // should return. - else if exps[0] != '*' && !exps[0].contains('-') { - bitv[start - min] = true - return - } - - for start <= end { - bitv[start - min] = true - start += interval - } -} - -// bitv_to_ints converts a bit vector into an array containing the -// corresponding values. -fn bitv_to_ints(bitv []bool, min int) []int { - mut out := []int{} - - for i in 0 .. bitv.len { - if bitv[i] { - out << min + i - } - } - - return out -} - -// parse_part parses a given part of a cron expression & returns the -// corresponding array of ints. -fn parse_part(s string, min int, max int) ![]int { - mut bitv := []bool{len: max - min + 1, init: false} - - for range in s.split(',') { - parse_range(range, min, max, mut bitv)! - } - - return bitv_to_ints(bitv, min) -} - -// parse_expression parses an entire cron expression string into a -// CronExpression object, if possible. -pub fn parse_expression(exp string) !CronExpression { - // The filter allows for multiple spaces between parts - mut parts := exp.split(' ').filter(it != '') - - if parts.len < 2 || parts.len > 4 { - return error('Expression must contain between 2 and 4 space-separated parts.') - } - - // For ease of use, we allow the user to only specify as many parts as they - // need. - for parts.len < 4 { - parts << '*' - } - - mut part_results := [][]int{} - - mins := [0, 0, 1, 1] - maxs := [59, 23, 31, 12] - - // This for loop allows us to more clearly propagate the error to the user. - for i, min in mins { - part_results << parse_part(parts[i], min, maxs[i]) or { - return error('An error occurred with part $i: $err.msg()') - } - } - - return CronExpression{ - minutes: part_results[0] - hours: part_results[1] - days: part_results[2] - months: part_results[3] - } -} diff --git a/src/cron/expression/expression_parse.v b/src/cron/expression/expression_parse.v new file mode 100644 index 0000000..4aaec5b --- /dev/null +++ b/src/cron/expression/expression_parse.v @@ -0,0 +1,146 @@ +module expression + +import bitfield + +// parse_range parses a given string into a range of sorted integers. Its +// result is a BitField with set bits for all numbers in the result. +fn parse_range(s string, min int, max int) !bitfield.BitField { + mut start := min + mut end := max + mut interval := 1 + mut bf := bitfield.new(max - min + 1) + + exps := s.split('/') + + if exps.len > 2 { + return error('Invalid expression.') + } + + if exps[0] != '*' { + dash_parts := exps[0].split('-') + + if dash_parts.len > 2 { + return error('Invalid expression.') + } + + start = dash_parts[0].int() + + // The builtin parsing functions return zero if the string can't be + // parsed into a number, so we have to explicitely check whether they + // actually entered zero or if it's an invalid number. + if start == 0 && dash_parts[0] != '0' { + return error('Invalid number.') + } + + // Check whether the start value is out of range + if start < min || start > max { + return error('Out of range.') + } + + if dash_parts.len == 2 { + end = dash_parts[1].int() + + if end == 0 && dash_parts[1] != '0' { + return error('Invalid number.') + } + + if end < start || end > max { + return error('Out of range.') + } + } + } + + if exps.len > 1 { + interval = exps[1].int() + + // interval being zero is always invalid, but we want to check why + // it's invalid for better error messages. + if interval == 0 { + if exps[1] != '0' { + return error('Invalid number.') + } else { + return error('Step size zero not allowed.') + } + } + + if interval > max - min { + return error('Step size too large.') + } + } + // Here, s solely consists of a number, so that's the only value we + // should return. + else if exps[0] != '*' && !exps[0].contains('-') { + bf.set_bit(start - min) + return bf + } + + for start <= end { + bf.set_bit(start - min) + start += interval + } + + return bf +} + +// bf_to_ints takes a BitField and converts it into the expected list of actual +// integers. +fn bf_to_ints(bf bitfield.BitField, min int) []int { + mut out := []int{} + + for i in 0 .. bf.get_size() { + if bf.get_bit(i) == 1 { + out << min + i + } + } + + return out +} + +// parse_part parses a given part of a cron expression & returns the +// corresponding array of ints. +fn parse_part(s string, min int, max int) ![]int { + mut bf := bitfield.new(max - min + 1) + + for range in s.split(',') { + bf2 := parse_range(range, min, max)! + bf = bitfield.bf_or(bf, bf2) + } + + return bf_to_ints(bf, min) +} + +// parse_expression parses an entire cron expression string into a +// CronExpression object, if possible. +pub fn parse_expression(exp string) !CronExpression { + // The filter allows for multiple spaces between parts + mut parts := exp.split(' ').filter(it != '') + + if parts.len < 2 || parts.len > 4 { + return error('Expression must contain between 2 and 4 space-separated parts.') + } + + // For ease of use, we allow the user to only specify as many parts as they + // need. + for parts.len < 4 { + parts << '*' + } + + mut part_results := [][]int{} + + mins := [0, 0, 1, 1] + maxs := [59, 23, 31, 12] + + // This for loop allows us to more clearly propagate the error to the user. + for i, min in mins { + part_results << parse_part(parts[i], min, maxs[i]) or { + return error('An error occurred with part $i: $err.msg()') + } + } + + return CronExpression{ + minutes: part_results[0] + hours: part_results[1] + days: part_results[2] + months: part_results[3] + } +} diff --git a/src/cron/expression/expression_parse_test.v b/src/cron/expression/expression_parse_test.v index 5c12329..92e8291 100644 --- a/src/cron/expression/expression_parse_test.v +++ b/src/cron/expression/expression_parse_test.v @@ -3,26 +3,22 @@ module expression // parse_range_error returns the returned error message. If the result is '', // that means the function didn't error. fn parse_range_error(s string, min int, max int) string { - mut bitv := []bool{len: max - min + 1, init: false} - - parse_range(s, min, max, mut bitv) or { return err.msg } + parse_range(s, min, max) or { return err.msg } return '' } // =====parse_range===== fn test_range_star_range() ! { - mut bitv := []bool{len: 6, init: false} - parse_range('*', 0, 5, mut bitv)! + bf := parse_range('*', 0, 5)! - assert bitv == [true, true, true, true, true, true] + assert bf_to_ints(bf, 0) == [0, 1, 2, 3, 4, 5] } fn test_range_number() ! { - mut bitv := []bool{len: 6, init: false} - parse_range('4', 0, 5, mut bitv)! + bf := parse_range('4', 0, 5)! - assert bitv_to_ints(bitv, 0) == [4] + assert bf_to_ints(bf, 0) == [4] } fn test_range_number_too_large() ! { @@ -38,17 +34,15 @@ fn test_range_number_invalid() ! { } fn test_range_step_star_1() ! { - mut bitv := []bool{len: 21, init: false} - parse_range('*/4', 0, 20, mut bitv)! + bf := parse_range('*/4', 0, 20)! - assert bitv_to_ints(bitv, 0) == [0, 4, 8, 12, 16, 20] + assert bf_to_ints(bf, 0) == [0, 4, 8, 12, 16, 20] } fn test_range_step_star_2() ! { - mut bitv := []bool{len: 8, init: false} - parse_range('*/3', 1, 8, mut bitv)! + bf := parse_range('*/3', 1, 8)! - assert bitv_to_ints(bitv, 1) == [1, 4, 7] + assert bf_to_ints(bf, 1) == [1, 4, 7] } fn test_range_step_star_too_large() ! { @@ -60,10 +54,9 @@ fn test_range_step_zero() ! { } fn test_range_step_number() ! { - mut bitv := []bool{len: 21, init: false} - parse_range('5/4', 2, 22, mut bitv)! + bf := parse_range('5/4', 2, 22)! - assert bitv_to_ints(bitv, 2) == [5, 9, 13, 17, 21] + assert bf_to_ints(bf, 2) == [5, 9, 13, 17, 21] } fn test_range_step_number_too_large() ! { @@ -75,17 +68,15 @@ fn test_range_step_number_too_small() ! { } fn test_range_dash() ! { - mut bitv := []bool{len: 10, init: false} - parse_range('4-8', 0, 9, mut bitv)! + bf := parse_range('4-8', 0, 9)! - assert bitv_to_ints(bitv, 0) == [4, 5, 6, 7, 8] + assert bf_to_ints(bf, 0) == [4, 5, 6, 7, 8] } fn test_range_dash_step() ! { - mut bitv := []bool{len: 10, init: false} - parse_range('4-8/2', 0, 9, mut bitv)! + bf := parse_range('4-8/2', 0, 9)! - assert bitv_to_ints(bitv, 0) == [4, 6, 8] + assert bf_to_ints(bf, 0) == [4, 6, 8] } // =====parse_part===== From 6281ef76070f1ccf7b2bed05e06f9ca7cdf5322c Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 6 Dec 2022 13:50:25 +0100 Subject: [PATCH 16/51] feat: start of agent code --- src/agent/agent.v | 25 ++++++++++++++++++ src/agent/cli.v | 31 ++++++++++++++++++++++ src/agent/daemon.v | 65 ++++++++++++++++++++++++++++++++++++++++++++++ src/build/build.v | 10 +++++++ src/main.v | 2 ++ 5 files changed, 133 insertions(+) create mode 100644 src/agent/agent.v create mode 100644 src/agent/cli.v create mode 100644 src/agent/daemon.v diff --git a/src/agent/agent.v b/src/agent/agent.v new file mode 100644 index 0000000..3affd21 --- /dev/null +++ b/src/agent/agent.v @@ -0,0 +1,25 @@ +module agent + +import log +import os + +const log_file_name = 'vieter.agent.log' + +// agent start an agent service +pub fn agent(conf Config) ! { + // Configure logger + log_level := log.level_from_tag(conf.log_level) or { + return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') + } + + mut logger := log.Log{ + level: log_level + } + + log_file := os.join_path_single(conf.data_dir, agent.log_file_name) + logger.set_full_logpath(log_file) + logger.log_to_console_too() + + mut d := agent.agent_init(logger, conf) + d.run() +} diff --git a/src/agent/cli.v b/src/agent/cli.v new file mode 100644 index 0000000..46942ec --- /dev/null +++ b/src/agent/cli.v @@ -0,0 +1,31 @@ +module agent + +import cli +import conf as vconf + +struct Config { +pub: + log_level string = 'WARN' + api_key string + address string + data_dir string + max_concurrent_builds int = 1 + polling_frequency int = 30 + // Architecture of agent + /* arch string */ + /* image_rebuild_frequency int = 1440 */ +} + +// cmd returns the cli module that handles the cron daemon. +pub fn cmd() cli.Command { + return cli.Command{ + name: 'agent' + description: 'Start an agent service & start polling for new builds.' + execute: fn (cmd cli.Command) ! { + config_file := cmd.flags.get_string('config-file')! + conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! + + agent(conf)! + } + } +} diff --git a/src/agent/daemon.v b/src/agent/daemon.v new file mode 100644 index 0000000..389a148 --- /dev/null +++ b/src/agent/daemon.v @@ -0,0 +1,65 @@ +module agent + +import log +import sync.stdatomic +import build { BuildConfig } +import client + +const ( + build_empty = 0 + build_running = 1 + build_done = 2 +) + +struct AgentDaemon { + logger shared log.Log + conf Config + // Which builds are currently running; length is same as + // conf.max_concurrent_builds + builds []BuildConfig + // Atomic variables used to detect when a build has finished; length is the + // same as conf.max_concurrent_builds + client client.Client + atomics []u64 +} + +fn agent_init(logger log.Log, conf Config) AgentDaemon { + mut d := AgentDaemon{ + logger: logger + client: client.new(conf.address, conf.api_key) + conf: conf + builds: []BuildConfig{len: conf.max_concurrent_builds} + atomics: []u64{len: conf.max_concurrent_builds} + } + + return d +} + +pub fn (mut d AgentDaemon) run() { + for { + free_builds := d.update_atomics() + + if free_builds > 0 { + + } + + } +} + +// clean_finished_builds checks for each build whether it's completed, and sets +// it to free again if so. The return value is how many fields are now set to +// free. +fn (mut d AgentDaemon) update_atomics() int { + mut count := 0 + + for i in 0 .. d.atomics.len { + if stdatomic.load_u64(&d.atomics[i]) == agent.build_done { + stdatomic.store_u64(&d.atomics[i], agent.build_empty) + count++ + } else if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty { + count++ + } + } + + return count +} diff --git a/src/build/build.v b/src/build/build.v index 247df6e..b7c5cb6 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -16,6 +16,16 @@ const ( '/usr/local/bin', '/usr/bin/site_perl', '/usr/bin/vendor_perl', '/usr/bin/core_perl'] ) +pub struct BuildConfig { +pub: + id int + kind string + url string + branch string + repo string + base_image string +} + // create_build_image creates a builder image given some base image which can // then be used to build & package Arch images. It mostly just updates the // system, install some necessary packages & creates a non-root user to run diff --git a/src/main.v b/src/main.v index fc09f7e..424e328 100644 --- a/src/main.v +++ b/src/main.v @@ -9,6 +9,7 @@ import console.schedule import console.man import console.aur import cron +import agent fn main() { mut app := cli.Command{ @@ -40,6 +41,7 @@ fn main() { schedule.cmd(), man.cmd(), aur.cmd(), + agent.cmd() ] } app.setup() From 9a49d96e202208453169524585eb28882628f10f Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 6 Dec 2022 14:11:17 +0100 Subject: [PATCH 17/51] feat(build): start of server-side job queue --- src/agent/agent.v | 2 +- src/agent/cli.v | 16 +++++----- src/agent/daemon.v | 10 +++--- src/build/build.v | 10 +++--- src/build/queue.v | 70 +++++++++++++++++++++++++++++++++++++++++ src/main.v | 2 +- src/server/api_builds.v | 39 +++++++++++++++++++++++ src/server/cli.v | 14 +++++---- src/server/server.v | 39 ++++++++++++++++++++++- 9 files changed, 174 insertions(+), 28 deletions(-) create mode 100644 src/build/queue.v create mode 100644 src/server/api_builds.v diff --git a/src/agent/agent.v b/src/agent/agent.v index 3affd21..1758c85 100644 --- a/src/agent/agent.v +++ b/src/agent/agent.v @@ -20,6 +20,6 @@ pub fn agent(conf Config) ! { logger.set_full_logpath(log_file) logger.log_to_console_too() - mut d := agent.agent_init(logger, conf) + mut d := agent_init(logger, conf) d.run() } diff --git a/src/agent/cli.v b/src/agent/cli.v index 46942ec..a0a249c 100644 --- a/src/agent/cli.v +++ b/src/agent/cli.v @@ -5,15 +5,15 @@ import conf as vconf struct Config { pub: - log_level string = 'WARN' - api_key string - address string - data_dir string - max_concurrent_builds int = 1 - polling_frequency int = 30 + log_level string = 'WARN' + api_key string + address string + data_dir string + max_concurrent_builds int = 1 + polling_frequency int = 30 // Architecture of agent - /* arch string */ - /* image_rebuild_frequency int = 1440 */ + // arch string + // image_rebuild_frequency int = 1440 } // cmd returns the cli module that handles the cron daemon. diff --git a/src/agent/daemon.v b/src/agent/daemon.v index 389a148..fd5fe04 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -13,13 +13,13 @@ const ( struct AgentDaemon { logger shared log.Log - conf Config + conf Config // Which builds are currently running; length is same as // conf.max_concurrent_builds builds []BuildConfig // Atomic variables used to detect when a build has finished; length is the // same as conf.max_concurrent_builds - client client.Client + client client.Client atomics []u64 } @@ -39,10 +39,8 @@ pub fn (mut d AgentDaemon) run() { for { free_builds := d.update_atomics() - if free_builds > 0 { - - } - + if free_builds > 0 { + } } } diff --git a/src/build/build.v b/src/build/build.v index b7c5cb6..13d3e45 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -18,11 +18,11 @@ const ( pub struct BuildConfig { pub: - id int - kind string - url string - branch string - repo string + target_id int + kind string + url string + branch string + repo string base_image string } diff --git a/src/build/queue.v b/src/build/queue.v new file mode 100644 index 0000000..81d3fa9 --- /dev/null +++ b/src/build/queue.v @@ -0,0 +1,70 @@ +module build + +import models { Target } +import cron.expression { CronExpression, parse_expression } +import time +import datatypes { MinHeap } + +struct BuildJob { +pub: + // Earliest point this + timestamp time.Time + config BuildConfig +} + +// Overloaded operator for comparing ScheduledBuild objects +fn (r1 BuildJob) < (r2 BuildJob) bool { + return r1.timestamp < r2.timestamp +} + +pub struct BuildJobQueue { + // Schedule to use for targets without explicitely defined cron expression + default_schedule CronExpression + // Base image to use for targets without defined base image + default_base_image string +mut: + // For each architecture, a priority queue is tracked + queues map[string]MinHeap + // Each queued build job is also stored in a map, with the keys being the + // target IDs. This is used when removing or editing targets. + // jobs map[int]BuildJob +} + +pub fn new_job_queue(default_schedule CronExpression, default_base_image string) BuildJobQueue { + return BuildJobQueue{ + default_schedule: default_schedule + default_base_image: default_base_image + } +} + +// insert a new job into the queue for a given target on an architecture. +pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! { + if arch !in q.queues { + q.queues[arch] = MinHeap{} + } + + ce := if target.schedule != '' { + parse_expression(target.schedule) or { + return error("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()") + } + } else { + q.default_schedule + } + + timestamp := ce.next_from_now()! + + job := BuildJob{ + timestamp: timestamp + config: BuildConfig{ + target_id: target.id + kind: target.kind + url: target.url + branch: target.branch + repo: target.repo + // TODO make this configurable + base_image: q.default_base_image + } + } + + q.queues[arch].insert(job) +} diff --git a/src/main.v b/src/main.v index 424e328..34387bf 100644 --- a/src/main.v +++ b/src/main.v @@ -41,7 +41,7 @@ fn main() { schedule.cmd(), man.cmd(), aur.cmd(), - agent.cmd() + agent.cmd(), ] } app.setup() diff --git a/src/server/api_builds.v b/src/server/api_builds.v new file mode 100644 index 0000000..888fe9d --- /dev/null +++ b/src/server/api_builds.v @@ -0,0 +1,39 @@ +module server + +/* import web */ +/* import web.response { new_data_response, new_response } */ +/* import time */ +/* import build { BuildConfig } */ +/* // import os */ +/* // import util */ +/* // import models { BuildLog, BuildLogFilter } */ + +/* ['/api/v1/builds/poll'; auth; get] */ +/* fn (mut app App) v1_poll_build_queue() web.Result { */ +/* arch := app.query['arch'] or { */ +/* return app.json(.bad_request, new_response('Missing arch query arg.')) */ +/* } */ + +/* max_str := app.query['max'] or { */ +/* return app.json(.bad_request, new_response('Missing max query arg.')) */ +/* } */ +/* max := max_str.int() */ + +/* mut out := []BuildConfig{} */ + +/* now := time.now() */ + +/* lock app.build_queues { */ +/* mut queue := app.build_queues[arch] or { return app.json(.ok, new_data_response(out)) } */ + +/* for queue.len() > 0 && out.len < max { */ +/* next := queue.peek() or { return app.status(.internal_server_error) } */ + +/* if next.timestamp < now { */ +/* out << queue.pop() or { return app.status(.internal_server_error) }.config */ +/* } */ +/* } */ +/* } */ + +/* return app.json(.ok, new_data_response(out)) */ +/* } */ diff --git a/src/server/cli.v b/src/server/cli.v index a9644f3..2fede6c 100644 --- a/src/server/cli.v +++ b/src/server/cli.v @@ -5,12 +5,14 @@ import conf as vconf struct Config { pub: - log_level string = 'WARN' - pkg_dir string - data_dir string - api_key string - default_arch string - port int = 8000 + log_level string = 'WARN' + pkg_dir string + data_dir string + api_key string + default_arch string + global_schedule string = '0 3' + port int = 8000 + base_image string = 'archlinux:base-devel' } // cmd returns the cli submodule that handles starting the server diff --git a/src/server/server.v b/src/server/server.v index d5f6135..fb45e6d 100644 --- a/src/server/server.v +++ b/src/server/server.v @@ -6,6 +6,8 @@ import log import repo import util import db +import build { BuildJobQueue } +import cron.expression const ( log_file_name = 'vieter.log' @@ -20,9 +22,37 @@ pub: conf Config [required; web_global] pub mut: repo repo.RepoGroupManager [required; web_global] - db db.VieterDb + // Keys are the various architectures for packages + job_queue BuildJobQueue [required; web_global] + db db.VieterDb } +// fn (mut app App) init_build_queues() { +// // Initialize build queues +// mut i := 0 +// mut targets := app.db.get_targets(limit: 25) + +// default_ce := expression.parse_expression(conf.global_schedule) or { return } + +// for targets.len > 0 { +// for t in targets { +// ce := parse_expression(t.schedule) or { default_ce } + +// for arch in t.arch { +// if arch !in app.build_queues { +// app.build_queues[arch] = Minheap{} +// } + +// build_config := BuildConfig{} +// app.build_queues[arch].push(ScheduledBuild{ +// timestamp: ce.next() +// config: build_config +// }) +// } +// } +// } +//} + // server starts the web server & starts listening for requests pub fn server(conf Config) ! { // Prevent using 'any' as the default arch @@ -30,6 +60,10 @@ pub fn server(conf Config) ! { util.exit_with_message(1, "'any' is not allowed as the value for default_arch.") } + global_ce := expression.parse_expression(conf.global_schedule) or { + util.exit_with_message(1, 'Invalid global cron expression: $err.msg()') + } + // Configure logger log_level := log.level_from_tag(conf.log_level) or { util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') @@ -71,11 +105,14 @@ pub fn server(conf Config) ! { util.exit_with_message(1, 'Failed to initialize database: $err.msg()') } + mut queue := build.new_job_queue(global_ce, conf.base_image) + web.run(&App{ logger: logger api_key: conf.api_key conf: conf repo: repo db: db + job_queue: queue }, conf.port) } From c57de4d8ee4994b6efcc3370690835e86bd893e4 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 12 Dec 2022 20:33:51 +0100 Subject: [PATCH 18/51] feat(server): initialize job queue on start; api endpoint for polling jobs --- src/build/queue.v | 113 +++++++++++++++++++++++++++++++--------- src/server/api_builds.v | 50 ++++++------------ src/server/server.v | 52 ++++++++---------- 3 files changed, 129 insertions(+), 86 deletions(-) diff --git a/src/build/queue.v b/src/build/queue.v index 81d3fa9..65b279e 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -4,6 +4,7 @@ import models { Target } import cron.expression { CronExpression, parse_expression } import time import datatypes { MinHeap } +import util struct BuildJob { pub: @@ -23,6 +24,7 @@ pub struct BuildJobQueue { // Base image to use for targets without defined base image default_base_image string mut: + mutex shared util.Dummy // For each architecture, a priority queue is tracked queues map[string]MinHeap // Each queued build job is also stored in a map, with the keys being the @@ -39,32 +41,95 @@ pub fn new_job_queue(default_schedule CronExpression, default_base_image string) // insert a new job into the queue for a given target on an architecture. pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! { - if arch !in q.queues { - q.queues[arch] = MinHeap{} - } - - ce := if target.schedule != '' { - parse_expression(target.schedule) or { - return error("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()") + lock q.mutex { + if arch !in q.queues { + q.queues[arch] = MinHeap{} } - } else { - q.default_schedule - } - timestamp := ce.next_from_now()! - - job := BuildJob{ - timestamp: timestamp - config: BuildConfig{ - target_id: target.id - kind: target.kind - url: target.url - branch: target.branch - repo: target.repo - // TODO make this configurable - base_image: q.default_base_image + ce := if target.schedule != '' { + parse_expression(target.schedule) or { + return error("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()") + } + } else { + q.default_schedule } - } - q.queues[arch].insert(job) + timestamp := ce.next_from_now()! + + job := BuildJob{ + timestamp: timestamp + config: BuildConfig{ + target_id: target.id + kind: target.kind + url: target.url + branch: target.branch + repo: target.repo + // TODO make this configurable + base_image: q.default_base_image + } + } + + q.queues[arch].insert(job) + } +} + +// peek shows the first job for the given architecture that's ready to be +// executed, if present. +pub fn (q &BuildJobQueue) peek(arch string) ?BuildJob { + rlock q.mutex { + if arch !in q.queues { + return none + } + + job := q.queues[arch].peek() or { return none } + + if job.timestamp < time.now() { + return job + } + } + + return none +} + +// pop removes the first job for the given architecture that's ready to be +// executed from the queue and returns it, if present. +pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob { + lock q.mutex { + if arch !in q.queues { + return none + } + + job := q.queues[arch].peek() or { return none } + + if job.timestamp < time.now() { + return q.queues[arch].pop() + } + } + + return none +} + +// pop_n tries to pop at most n available jobs for the given architecture. +pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob { + lock q.mutex { + if arch !in q.queues { + return [] + } + + mut out := []BuildJob{} + + for out.len < n { + job := q.queues[arch].peek() or { break } + + if job.timestamp < time.now() { + out << q.queues[arch].pop() or { break } + } else { + break + } + } + + return out + } + + return [] } diff --git a/src/server/api_builds.v b/src/server/api_builds.v index 888fe9d..62948cd 100644 --- a/src/server/api_builds.v +++ b/src/server/api_builds.v @@ -1,39 +1,23 @@ module server -/* import web */ -/* import web.response { new_data_response, new_response } */ -/* import time */ -/* import build { BuildConfig } */ -/* // import os */ -/* // import util */ -/* // import models { BuildLog, BuildLogFilter } */ +import web +import web.response { new_data_response, new_response } +// import os +// import util +// import models { BuildLog, BuildLogFilter } -/* ['/api/v1/builds/poll'; auth; get] */ -/* fn (mut app App) v1_poll_build_queue() web.Result { */ -/* arch := app.query['arch'] or { */ -/* return app.json(.bad_request, new_response('Missing arch query arg.')) */ -/* } */ +['/api/v1/builds/poll'; auth; get] +fn (mut app App) v1_poll_build_queue() web.Result { + arch := app.query['arch'] or { + return app.json(.bad_request, new_response('Missing arch query arg.')) + } -/* max_str := app.query['max'] or { */ -/* return app.json(.bad_request, new_response('Missing max query arg.')) */ -/* } */ -/* max := max_str.int() */ + max_str := app.query['max'] or { + return app.json(.bad_request, new_response('Missing max query arg.')) + } + max := max_str.int() -/* mut out := []BuildConfig{} */ + mut out := app.job_queue.pop_n(arch, max) -/* now := time.now() */ - -/* lock app.build_queues { */ -/* mut queue := app.build_queues[arch] or { return app.json(.ok, new_data_response(out)) } */ - -/* for queue.len() > 0 && out.len < max { */ -/* next := queue.peek() or { return app.status(.internal_server_error) } */ - -/* if next.timestamp < now { */ -/* out << queue.pop() or { return app.status(.internal_server_error) }.config */ -/* } */ -/* } */ -/* } */ - -/* return app.json(.ok, new_data_response(out)) */ -/* } */ + return app.json(.ok, new_data_response(out)) +} diff --git a/src/server/server.v b/src/server/server.v index fb45e6d..e2c19c2 100644 --- a/src/server/server.v +++ b/src/server/server.v @@ -24,34 +24,25 @@ pub mut: repo repo.RepoGroupManager [required; web_global] // Keys are the various architectures for packages job_queue BuildJobQueue [required; web_global] - db db.VieterDb + db db.VieterDb } -// fn (mut app App) init_build_queues() { -// // Initialize build queues -// mut i := 0 -// mut targets := app.db.get_targets(limit: 25) +fn (mut app App) init_job_queue() ! { + // Initialize build queues + mut targets := app.db.get_targets(limit: 25) + mut i := u64(0) -// default_ce := expression.parse_expression(conf.global_schedule) or { return } + for targets.len > 0 { + for target in targets { + for arch in target.arch { + app.job_queue.insert(target, arch.value)! + } + } -// for targets.len > 0 { -// for t in targets { -// ce := parse_expression(t.schedule) or { default_ce } - -// for arch in t.arch { -// if arch !in app.build_queues { -// app.build_queues[arch] = Minheap{} -// } - -// build_config := BuildConfig{} -// app.build_queues[arch].push(ScheduledBuild{ -// timestamp: ce.next() -// config: build_config -// }) -// } -// } -// } -//} + i += 25 + targets = app.db.get_targets(limit: 25, offset: i) + } +} // server starts the web server & starts listening for requests pub fn server(conf Config) ! { @@ -105,14 +96,17 @@ pub fn server(conf Config) ! { util.exit_with_message(1, 'Failed to initialize database: $err.msg()') } - mut queue := build.new_job_queue(global_ce, conf.base_image) - - web.run(&App{ + mut app := &App{ logger: logger api_key: conf.api_key conf: conf repo: repo db: db - job_queue: queue - }, conf.port) + job_queue: build.new_job_queue(global_ce, conf.base_image) + } + app.init_job_queue() or { + util.exit_with_message(1, 'Failed to inialize job queue: $err.msg()') + } + + web.run(app, conf.port) } From 0a5c4295e008b3687d160957328a934c85489f9b Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 12 Dec 2022 20:59:43 +0100 Subject: [PATCH 19/51] feat(server): properly reschedule jobs after polling --- src/build/queue.v | 50 ++++++++++++++++++++++++++++++++++------- src/server/api_builds.v | 6 ++--- 2 files changed, 45 insertions(+), 11 deletions(-) diff --git a/src/build/queue.v b/src/build/queue.v index 65b279e..b704926 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -8,12 +8,16 @@ import util struct BuildJob { pub: - // Earliest point this + // Next timestamp from which point this job is allowed to be executed timestamp time.Time - config BuildConfig + // Required for calculating next timestamp after having pop'ed a job + ce CronExpression + // Actual build config sent to the agent + config BuildConfig } -// Overloaded operator for comparing ScheduledBuild objects +// Allows BuildJob structs to be sorted according to their timestamp in +// MinHeaps fn (r1 BuildJob) < (r2 BuildJob) bool { return r1.timestamp < r2.timestamp } @@ -39,7 +43,9 @@ pub fn new_job_queue(default_schedule CronExpression, default_base_image string) } } -// insert a new job into the queue for a given target on an architecture. +// insert a new target's job into the queue for the given architecture. This +// job will then be endlessly rescheduled after being pop'ed, unless removed +// explicitely. pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! { lock q.mutex { if arch !in q.queues { @@ -58,6 +64,7 @@ pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! { job := BuildJob{ timestamp: timestamp + ce: ce config: BuildConfig{ target_id: target.id kind: target.kind @@ -69,10 +76,25 @@ pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! { } } + dump(job) q.queues[arch].insert(job) } } +// reschedule the given job by calculating the next timestamp and re-adding it +// to its respective queue. This function is called by the pop functions +// *after* having pop'ed the job. +fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) ! { + new_timestamp := job.ce.next_from_now()! + + new_job := BuildJob{ + ...job + timestamp: new_timestamp + } + + q.queues[arch].insert(new_job) +} + // peek shows the first job for the given architecture that's ready to be // executed, if present. pub fn (q &BuildJobQueue) peek(arch string) ?BuildJob { @@ -99,10 +121,17 @@ pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob { return none } - job := q.queues[arch].peek() or { return none } + mut job := q.queues[arch].peek() or { return none } if job.timestamp < time.now() { - return q.queues[arch].pop() + job = q.queues[arch].pop()? + + // TODO how do we handle this properly? Is it even possible for a + // cron expression to not return a next time if it's already been + // used before? + q.reschedule(job, arch) or {} + + return job } } @@ -119,10 +148,15 @@ pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob { mut out := []BuildJob{} for out.len < n { - job := q.queues[arch].peek() or { break } + mut job := q.queues[arch].peek() or { break } if job.timestamp < time.now() { - out << q.queues[arch].pop() or { break } + job = q.queues[arch].pop() or { break } + + // TODO idem + q.reschedule(job, arch) or {} + + out << job } else { break } diff --git a/src/server/api_builds.v b/src/server/api_builds.v index 62948cd..ec3c8ec 100644 --- a/src/server/api_builds.v +++ b/src/server/api_builds.v @@ -6,8 +6,8 @@ import web.response { new_data_response, new_response } // import util // import models { BuildLog, BuildLogFilter } -['/api/v1/builds/poll'; auth; get] -fn (mut app App) v1_poll_build_queue() web.Result { +['/api/v1/jobs/poll'; auth; get] +fn (mut app App) v1_poll_job_queue() web.Result { arch := app.query['arch'] or { return app.json(.bad_request, new_response('Missing arch query arg.')) } @@ -17,7 +17,7 @@ fn (mut app App) v1_poll_build_queue() web.Result { } max := max_str.int() - mut out := app.job_queue.pop_n(arch, max) + mut out := app.job_queue.pop_n(arch, max).map(it.config) return app.json(.ok, new_data_response(out)) } From 5bab1f77f0686a3db9eedb2ecab36d7592299655 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 12 Dec 2022 21:21:58 +0100 Subject: [PATCH 20/51] feat(agent): begin reforming for new api --- src/agent/daemon.v | 7 ++++--- src/agent/images.v | 49 ++++++++++++++++++++++++++++++++++++++++++++++ src/agent/log.v | 35 +++++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 src/agent/images.v create mode 100644 src/agent/log.v diff --git a/src/agent/daemon.v b/src/agent/daemon.v index fd5fe04..71f4780 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -14,6 +14,8 @@ const ( struct AgentDaemon { logger shared log.Log conf Config + // List of last built builder images + builder_images []string // Which builds are currently running; length is same as // conf.max_concurrent_builds builds []BuildConfig @@ -44,9 +46,8 @@ pub fn (mut d AgentDaemon) run() { } } -// clean_finished_builds checks for each build whether it's completed, and sets -// it to free again if so. The return value is how many fields are now set to -// free. +// update_atomics checks for each build whether it's completed, and sets it to +// free again if so. The return value is how many fields are now set to free. fn (mut d AgentDaemon) update_atomics() int { mut count := 0 diff --git a/src/agent/images.v b/src/agent/images.v new file mode 100644 index 0000000..454f85f --- /dev/null +++ b/src/agent/images.v @@ -0,0 +1,49 @@ +module agent + +import time +import docker + +struct ImageManager { + images map[string]string + timestamps map[string]time.Time +} + +// clean_old_base_images tries to remove any old but still present builder +// images. +fn (mut d AgentDaemon) clean_old_base_images() { + mut i := 0 + + mut dd := docker.new_conn() or { + d.lerror('Failed to connect to Docker socket.') + return + } + + defer { + dd.close() or {} + } + + for i < d.builder_images.len - 1 { + // For each builder image, we try to remove it by calling the Docker + // API. If the function returns an error or false, that means the image + // wasn't deleted. Therefore, we move the index over. If the function + // returns true, the array's length has decreased by one so we don't + // move the index. + dd.remove_image(d.builder_images[i]) or { i += 1 } + } +} + +// rebuild_base_image builds a builder image from the given base image. +/* fn (mut d AgentDaemon) build_base_image(base_image string) bool { */ +/* d.linfo('Rebuilding builder image....') */ + +/* d.builder_images << build.create_build_image(d.base_image) or { */ +/* d.lerror('Failed to rebuild base image. Retrying in ${daemon.rebuild_base_image_retry_timout}s...') */ +/* d.image_build_timestamp = time.now().add_seconds(daemon.rebuild_base_image_retry_timout) */ + +/* return false */ +/* } */ + +/* d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency) */ + +/* return true */ +/* } */ diff --git a/src/agent/log.v b/src/agent/log.v new file mode 100644 index 0000000..d47df0f --- /dev/null +++ b/src/agent/log.v @@ -0,0 +1,35 @@ +module agent + +import log + +// log reate a log message with the given level +pub fn (mut d AgentDaemon) log(msg string, level log.Level) { + lock d.logger { + d.logger.send_output(msg, level) + } +} + +// lfatal create a log message with the fatal level +pub fn (mut d AgentDaemon) lfatal(msg string) { + d.log(msg, log.Level.fatal) +} + +// lerror create a log message with the error level +pub fn (mut d AgentDaemon) lerror(msg string) { + d.log(msg, log.Level.error) +} + +// lwarn create a log message with the warn level +pub fn (mut d AgentDaemon) lwarn(msg string) { + d.log(msg, log.Level.warn) +} + +// linfo create a log message with the info level +pub fn (mut d AgentDaemon) linfo(msg string) { + d.log(msg, log.Level.info) +} + +// ldebug create a log message with the debug level +pub fn (mut d AgentDaemon) ldebug(msg string) { + d.log(msg, log.Level.debug) +} From 7ef8d4b846a6245258a23503862e8f95a2985d81 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 12 Dec 2022 21:50:34 +0100 Subject: [PATCH 21/51] feat(agent): wrote ImageManager --- src/agent/cli.v | 2 +- src/agent/daemon.v | 4 +-- src/agent/images.v | 73 ++++++++++++++++++++++++++-------------------- 3 files changed, 45 insertions(+), 34 deletions(-) diff --git a/src/agent/cli.v b/src/agent/cli.v index a0a249c..063d960 100644 --- a/src/agent/cli.v +++ b/src/agent/cli.v @@ -13,7 +13,7 @@ pub: polling_frequency int = 30 // Architecture of agent // arch string - // image_rebuild_frequency int = 1440 + image_rebuild_frequency int = 1440 } // cmd returns the cli module that handles the cron daemon. diff --git a/src/agent/daemon.v b/src/agent/daemon.v index 71f4780..0508790 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -14,8 +14,7 @@ const ( struct AgentDaemon { logger shared log.Log conf Config - // List of last built builder images - builder_images []string + images ImageManager // Which builds are currently running; length is same as // conf.max_concurrent_builds builds []BuildConfig @@ -30,6 +29,7 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon { logger: logger client: client.new(conf.address, conf.api_key) conf: conf + images: new_image_manager(conf.image_rebuild_frequency) builds: []BuildConfig{len: conf.max_concurrent_builds} atomics: []u64{len: conf.max_concurrent_builds} } diff --git a/src/agent/images.v b/src/agent/images.v index 454f85f..aee2be0 100644 --- a/src/agent/images.v +++ b/src/agent/images.v @@ -2,48 +2,59 @@ module agent import time import docker +import build struct ImageManager { - images map[string]string - timestamps map[string]time.Time +mut: + refresh_frequency int + images map[string][]string [required] + timestamps map[string]time.Time [required] } -// clean_old_base_images tries to remove any old but still present builder -// images. -fn (mut d AgentDaemon) clean_old_base_images() { - mut i := 0 +fn new_image_manager(refresh_frequency int) ImageManager { + return ImageManager{ + refresh_frequency: refresh_frequency + images: map[string][]string{} + timestamps: map[string]time.Time{} + } +} - mut dd := docker.new_conn() or { - d.lerror('Failed to connect to Docker socket.') +fn (mut m ImageManager) refresh_image(base_image string) ! { + // No need to refresh the image if the previous one is still new enough + if base_image in m.timestamps + && m.timestamps[base_image].add_seconds(m.refresh_frequency) > time.now() { return } + // TODO use better image tags for built images + new_image := build.create_build_image(base_image) or { + return error('Failed to build builder image from base image $base_image') + } + + m.images[base_image] << new_image + m.timestamps[base_image] = time.now() +} + +// clean_old_images tries to remove any old but still present builder images. +fn (mut m ImageManager) clean_old_images() { + mut dd := docker.new_conn() or { return } + defer { dd.close() or {} } - for i < d.builder_images.len - 1 { - // For each builder image, we try to remove it by calling the Docker - // API. If the function returns an error or false, that means the image - // wasn't deleted. Therefore, we move the index over. If the function - // returns true, the array's length has decreased by one so we don't - // move the index. - dd.remove_image(d.builder_images[i]) or { i += 1 } + mut i := 0 + + for image in m.images.keys() { + i = 0 + + for i < m.images[image].len - 1 { + // For each builder image, we try to remove it by calling the Docker + // API. If the function returns an error or false, that means the image + // wasn't deleted. Therefore, we move the index over. If the function + // returns true, the array's length has decreased by one so we don't + // move the index. + dd.remove_image(m.images[image][i]) or { i += 1 } + } } } - -// rebuild_base_image builds a builder image from the given base image. -/* fn (mut d AgentDaemon) build_base_image(base_image string) bool { */ -/* d.linfo('Rebuilding builder image....') */ - -/* d.builder_images << build.create_build_image(d.base_image) or { */ -/* d.lerror('Failed to rebuild base image. Retrying in ${daemon.rebuild_base_image_retry_timout}s...') */ -/* d.image_build_timestamp = time.now().add_seconds(daemon.rebuild_base_image_retry_timout) */ - -/* return false */ -/* } */ - -/* d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency) */ - -/* return true */ -/* } */ From 6f23d690a7a0a78d7d9203850c96204832149df0 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 12 Dec 2022 22:09:57 +0100 Subject: [PATCH 22/51] feat(agent): partially wrote daemon code --- src/agent/cli.v | 2 ++ src/agent/daemon.v | 77 ++++++++++++++++++++++++++++++++++++++++++++-- src/agent/images.v | 4 +++ src/build/build.v | 19 ++++++++++-- src/build/shell.v | 16 +++++----- src/client/jobs.v | 11 +++++++ 6 files changed, 116 insertions(+), 13 deletions(-) create mode 100644 src/client/jobs.v diff --git a/src/agent/cli.v b/src/agent/cli.v index 063d960..1badbab 100644 --- a/src/agent/cli.v +++ b/src/agent/cli.v @@ -6,6 +6,8 @@ import conf as vconf struct Config { pub: log_level string = 'WARN' + // Architecture that the agent represents + arch string api_key string address string data_dir string diff --git a/src/agent/daemon.v b/src/agent/daemon.v index 0508790..aabcb44 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -4,6 +4,8 @@ import log import sync.stdatomic import build { BuildConfig } import client +import time +import os const ( build_empty = 0 @@ -14,6 +16,7 @@ const ( struct AgentDaemon { logger shared log.Log conf Config +mut: images ImageManager // Which builds are currently running; length is same as // conf.max_concurrent_builds @@ -41,13 +44,33 @@ pub fn (mut d AgentDaemon) run() { for { free_builds := d.update_atomics() - if free_builds > 0 { + // All build slots are taken, so there's nothing to be done + if free_builds == 0 { + time.sleep(1 * time.second) + continue + } + + // Builds have finished, so old builder images might have freed up. + d.images.clean_old_images() + + // Poll for new jobs + new_configs := d.client.poll_jobs(free_builds) or { + d.lerror('Failed to poll jobs: $err.msg()') + + time.sleep(1 * time.second) + continue + } + + // Schedule new jobs + for config in new_configs { + d.start_build(config) } } } // update_atomics checks for each build whether it's completed, and sets it to -// free again if so. The return value is how many fields are now set to free. +// free again if so. The return value is how many build slots are currently +// free. fn (mut d AgentDaemon) update_atomics() int { mut count := 0 @@ -62,3 +85,53 @@ fn (mut d AgentDaemon) update_atomics() int { return count } + +// start_build starts a build for the given BuildConfig object. +fn (mut d AgentDaemon) start_build(config BuildConfig) bool { + for i in 0 .. d.atomics.len { + if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty { + stdatomic.store_u64(&d.atomics[i], agent.build_running) + d.builds[i] = config + + go d.run_build(i, config) + + return true + } + } + + return false +} + +// run_build actually starts the build process for a given target. +fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) { + d.linfo('started build: $config.url -> $config.repo') + + // 0 means success, 1 means failure + mut status := 0 + + new_config := BuildConfig{ + ...config + base_image: d.images.get(config.base_image) + } + + res := build.build_config(d.client.address, d.client.api_key, new_config) or { + d.ldebug('build_config error: $err.msg()') + status = 1 + + build.BuildResult{} + } + + if status == 0 { + d.linfo('finished build: $config.url -> $config.repo; uploading logs...') + + build_arch := os.uname().machine + d.client.add_build_log(config.target_id, res.start_time, res.end_time, build_arch, + res.exit_code, res.logs) or { + d.lerror('Failed to upload logs for build: $config.url -> $config.repo') + } + } else { + d.linfo('an error occured during build: $config.url -> $config.repo') + } + + stdatomic.store_u64(&d.atomics[build_index], agent.build_done) +} diff --git a/src/agent/images.v b/src/agent/images.v index aee2be0..78bf2d0 100644 --- a/src/agent/images.v +++ b/src/agent/images.v @@ -19,6 +19,10 @@ fn new_image_manager(refresh_frequency int) ImageManager { } } +pub fn (m &ImageManager) get(base_image string) string { + return m.images[base_image].last() +} + fn (mut m ImageManager) refresh_image(base_image string) ! { // No need to refresh the image if the previous one is still new enough if base_image in m.timestamps diff --git a/src/build/build.v b/src/build/build.v index 13d3e45..744ce9c 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -103,10 +103,23 @@ pub: logs string } +pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult { +config := BuildConfig{ + target_id: target.id + kind: target.kind + url: target.url + branch: target.branch + repo: target.repo + base_image: base_image_id + } + + return build_config(address, api_key, config) +} + // build_target builds, packages & publishes a given Arch package based on the // provided target. The base image ID should be of an image previously created // by create_build_image. It returns the logs of the container. -pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult { +pub fn build_config(address string, api_key string, config BuildConfig) !BuildResult { mut dd := docker.new_conn()! defer { @@ -114,14 +127,14 @@ pub fn build_target(address string, api_key string, base_image_id string, target } build_arch := os.uname().machine - build_script := create_build_script(address, target, build_arch) + build_script := create_build_script(address, config, build_arch) // We convert the build script into a base64 string, which then gets passed // to the container as an env var base64_script := base64.encode_str(build_script) c := docker.NewContainer{ - image: '$base_image_id' + image: '$config.base_image' env: [ 'BUILD_SCRIPT=$base64_script', 'API_KEY=$api_key', diff --git a/src/build/shell.v b/src/build/shell.v index e573d53..42ec3c0 100644 --- a/src/build/shell.v +++ b/src/build/shell.v @@ -23,13 +23,13 @@ pub fn echo_commands(cmds []string) []string { } // create_build_script generates a shell script that builds a given Target. -fn create_build_script(address string, target &Target, build_arch string) string { - repo_url := '$address/$target.repo' +fn create_build_script(address string, config BuildConfig, build_arch string) string { + repo_url := '$address/$config.repo' mut commands := [ // This will later be replaced by a proper setting for changing the // mirrorlist - "echo -e '[$target.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf" + "echo -e '[$config.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf" // We need to update the package list of the repo we just added above. // This should however not pull in a lot of packages as long as the // builder image is rebuilt frequently. @@ -38,22 +38,22 @@ fn create_build_script(address string, target &Target, build_arch string) string 'su builder', ] - commands << match target.kind { + commands << match config.kind { 'git' { - if target.branch == '' { + if config.branch == '' { [ - "git clone --single-branch --depth 1 '$target.url' repo", + "git clone --single-branch --depth 1 '$config.url' repo", ] } else { [ - "git clone --single-branch --depth 1 --branch $target.branch '$target.url' repo", + "git clone --single-branch --depth 1 --branch $config.branch '$config.url' repo", ] } } 'url' { [ 'mkdir repo', - "curl -o repo/PKGBUILD -L '$target.url'", + "curl -o repo/PKGBUILD -L '$config.url'", ] } else { diff --git a/src/client/jobs.v b/src/client/jobs.v new file mode 100644 index 0000000..281d6ce --- /dev/null +++ b/src/client/jobs.v @@ -0,0 +1,11 @@ +module client + +import build { BuildConfig } + +pub fn (c &Client) poll_jobs(max int) ![]BuildConfig { + data := c.send_request<[]BuildConfig>(.get, '/api/v1/jobs/poll', { + 'max': max.str() + })! + + return data.data +} From 3611123f4549523f2420ac8a1157d146c9064c8d Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Mon, 12 Dec 2022 22:58:43 +0100 Subject: [PATCH 23/51] feat(agent): initial working version --- src/agent/cli.v | 4 ++-- src/agent/daemon.v | 39 ++++++++++++++++++++++++++++++++------- src/build/build.v | 18 +++++++++--------- src/build/queue.v | 1 - src/build/shell.v | 2 -- src/client/jobs.v | 5 +++-- vieter.toml | 2 +- 7 files changed, 47 insertions(+), 24 deletions(-) diff --git a/src/agent/cli.v b/src/agent/cli.v index 1badbab..a375f08 100644 --- a/src/agent/cli.v +++ b/src/agent/cli.v @@ -5,9 +5,9 @@ import conf as vconf struct Config { pub: - log_level string = 'WARN' + log_level string = 'WARN' // Architecture that the agent represents - arch string + arch string api_key string address string data_dir string diff --git a/src/agent/daemon.v b/src/agent/daemon.v index aabcb44..f060863 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -41,6 +41,10 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon { } pub fn (mut d AgentDaemon) run() { + // This is just so that the very first time the loop is ran, the jobs are + // always polled + mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency) + for { free_builds := d.update_atomics() @@ -54,16 +58,37 @@ pub fn (mut d AgentDaemon) run() { d.images.clean_old_images() // Poll for new jobs - new_configs := d.client.poll_jobs(free_builds) or { - d.lerror('Failed to poll jobs: $err.msg()') + if time.now() >= last_poll_time.add_seconds(d.conf.polling_frequency) { + new_configs := d.client.poll_jobs(d.conf.arch, free_builds) or { + d.lerror('Failed to poll jobs: $err.msg()') + + time.sleep(5 * time.second) + continue + } + last_poll_time = time.now() + + // Schedule new jobs + for config in new_configs { + // TODO handle this better than to just skip the config + // Make sure a recent build base image is available for building the config + d.images.refresh_image(config.base_image) or { + d.lerror(err.msg()) + continue + } + d.start_build(config) + } time.sleep(1 * time.second) - continue } - - // Schedule new jobs - for config in new_configs { - d.start_build(config) + // Builds are running, so check again after one second + else if free_builds < d.conf.max_concurrent_builds { + time.sleep(1 * time.second) + } + // The agent is not doing anything, so we just wait until the next poll + // time + else { + time_until_next_poll := time.now() - last_poll_time + time.sleep(time_until_next_poll) } } } diff --git a/src/build/build.v b/src/build/build.v index 744ce9c..2d51156 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -104,16 +104,16 @@ pub: } pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult { -config := BuildConfig{ - target_id: target.id - kind: target.kind - url: target.url - branch: target.branch - repo: target.repo - base_image: base_image_id - } + config := BuildConfig{ + target_id: target.id + kind: target.kind + url: target.url + branch: target.branch + repo: target.repo + base_image: base_image_id + } - return build_config(address, api_key, config) + return build_config(address, api_key, config) } // build_target builds, packages & publishes a given Arch package based on the diff --git a/src/build/queue.v b/src/build/queue.v index b704926..29036e4 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -76,7 +76,6 @@ pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! { } } - dump(job) q.queues[arch].insert(job) } } diff --git a/src/build/shell.v b/src/build/shell.v index 42ec3c0..c2d0c9b 100644 --- a/src/build/shell.v +++ b/src/build/shell.v @@ -1,7 +1,5 @@ module build -import models { Target } - // escape_shell_string escapes any characters that could be interpreted // incorrectly by a shell. The resulting value should be safe to use inside an // echo statement. diff --git a/src/client/jobs.v b/src/client/jobs.v index 281d6ce..30f2531 100644 --- a/src/client/jobs.v +++ b/src/client/jobs.v @@ -2,9 +2,10 @@ module client import build { BuildConfig } -pub fn (c &Client) poll_jobs(max int) ![]BuildConfig { +pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig { data := c.send_request<[]BuildConfig>(.get, '/api/v1/jobs/poll', { - 'max': max.str() + 'arch': arch + 'max': max.str() })! return data.data diff --git a/vieter.toml b/vieter.toml index d3922a4..9a68ae3 100644 --- a/vieter.toml +++ b/vieter.toml @@ -4,6 +4,7 @@ data_dir = "data" pkg_dir = "data/pkgs" log_level = "DEBUG" default_arch = "x86_64" +arch = "x86_64" address = "http://localhost:8000" @@ -11,4 +12,3 @@ global_schedule = '* *' api_update_frequency = 2 image_rebuild_frequency = 1 max_concurrent_builds = 3 - From 882a9a60a973427b7d0a181dc5f2c1117cd6188f Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 08:58:27 +0100 Subject: [PATCH 24/51] feat(build): allowed invalidating entries in build queue --- src/build/queue.v | 91 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 67 insertions(+), 24 deletions(-) diff --git a/src/build/queue.v b/src/build/queue.v index 29036e4..b559552 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -8,6 +8,8 @@ import util struct BuildJob { pub: + // Time at which this build job was created/queued + created time.Time // Next timestamp from which point this job is allowed to be executed timestamp time.Time // Required for calculating next timestamp after having pop'ed a job @@ -22,6 +24,8 @@ fn (r1 BuildJob) < (r2 BuildJob) bool { return r1.timestamp < r2.timestamp } +// The build job queue is responsible for managing the list of scheduled builds +// for each architecture. Agents receive jobs from this queue. pub struct BuildJobQueue { // Schedule to use for targets without explicitely defined cron expression default_schedule CronExpression @@ -31,15 +35,17 @@ mut: mutex shared util.Dummy // For each architecture, a priority queue is tracked queues map[string]MinHeap - // Each queued build job is also stored in a map, with the keys being the - // target IDs. This is used when removing or editing targets. - // jobs map[int]BuildJob + // When a target is removed from the server or edited, its previous build + // configs will be invalid. This map allows for those to be simply skipped + // by ignoring any build configs created before this timestamp. + invalidated map[int]time.Time } pub fn new_job_queue(default_schedule CronExpression, default_base_image string) BuildJobQueue { return BuildJobQueue{ default_schedule: default_schedule default_base_image: default_base_image + invalidated: map[int]time.Time{} } } @@ -63,6 +69,7 @@ pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! { timestamp := ce.next_from_now()! job := BuildJob{ + created: time.now() timestamp: timestamp ce: ce config: BuildConfig{ @@ -88,6 +95,7 @@ fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) ! { new_job := BuildJob{ ...job + created: time.now() timestamp: new_timestamp } @@ -96,16 +104,26 @@ fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) ! { // peek shows the first job for the given architecture that's ready to be // executed, if present. -pub fn (q &BuildJobQueue) peek(arch string) ?BuildJob { +pub fn (mut q BuildJobQueue) peek(arch string) ?BuildJob { rlock q.mutex { if arch !in q.queues { return none } - job := q.queues[arch].peek() or { return none } + for { + job := q.queues[arch].peek() or { return none } - if job.timestamp < time.now() { - return job + // Skip any invalidated jobs + if job.config.target_id in q.invalidated + && job.created < q.invalidated[job.config.target_id] { + // This pop *should* never fail according to the source code + q.queues[arch].pop() or { return none } + continue + } + + if job.timestamp < time.now() { + return job + } } } @@ -120,17 +138,27 @@ pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob { return none } - mut job := q.queues[arch].peek() or { return none } + for { + mut job := q.queues[arch].peek() or { return none } - if job.timestamp < time.now() { - job = q.queues[arch].pop()? + // Skip any invalidated jobs + if job.config.target_id in q.invalidated + && job.created < q.invalidated[job.config.target_id] { + // This pop *should* never fail according to the source code + q.queues[arch].pop() or { return none } + continue + } - // TODO how do we handle this properly? Is it even possible for a - // cron expression to not return a next time if it's already been - // used before? - q.reschedule(job, arch) or {} + if job.timestamp < time.now() { + job = q.queues[arch].pop()? - return job + // TODO how do we handle this properly? Is it even possible for a + // cron expression to not return a next time if it's already been + // used before? + q.reschedule(job, arch) or {} + + return job + } } } @@ -146,18 +174,28 @@ pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob { mut out := []BuildJob{} - for out.len < n { - mut job := q.queues[arch].peek() or { break } + outer: for out.len < n { + for { + mut job := q.queues[arch].peek() or { break outer } - if job.timestamp < time.now() { - job = q.queues[arch].pop() or { break } + // Skip any invalidated jobs + if job.config.target_id in q.invalidated + && job.created < q.invalidated[job.config.target_id] { + // This pop *should* never fail according to the source code + q.queues[arch].pop() or { break outer } + continue + } - // TODO idem - q.reschedule(job, arch) or {} + if job.timestamp < time.now() { + job = q.queues[arch].pop() or { break outer } - out << job - } else { - break + // TODO idem + q.reschedule(job, arch) or {} + + out << job + } else { + break outer + } } } @@ -166,3 +204,8 @@ pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob { return [] } + +// invalidate a target's old build jobs. +pub fn (mut q BuildJobQueue) invalidate(target_id int) { + q.invalidated[target_id] = time.now() +} From b6168a3060752474bb1ba4bd961ac119eddce16f Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 12:38:39 +0100 Subject: [PATCH 25/51] fix(build): change tests to use BuildConfig instead --- src/build/shell_test.v | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/build/shell_test.v b/src/build/shell_test.v index 341df88..d228faf 100644 --- a/src/build/shell_test.v +++ b/src/build/shell_test.v @@ -1,42 +1,48 @@ module build -import models { Target } +import models fn test_create_build_script_git_branch() { - target := Target{ - id: 1 + config := BuildConfig{ + target_id: 1 kind: 'git' url: 'https://examplerepo.com' branch: 'main' repo: 'vieter' + base_image: 'not-used:latest' } - build_script := create_build_script('https://example.com', target, 'x86_64') + + build_script := create_build_script('https://example.com', config, 'x86_64') expected := $embed_file('build_script_git_branch.sh') assert build_script == expected.to_string().trim_space() } fn test_create_build_script_git() { - target := Target{ - id: 1 + config := BuildConfig{ + target_id: 1 kind: 'git' url: 'https://examplerepo.com' repo: 'vieter' + base_image: 'not-used:latest' } - build_script := create_build_script('https://example.com', target, 'x86_64') + + build_script := create_build_script('https://example.com', config, 'x86_64') expected := $embed_file('build_script_git.sh') assert build_script == expected.to_string().trim_space() } fn test_create_build_script_url() { - target := Target{ - id: 1 + config := BuildConfig{ + target_id: 1 kind: 'url' url: 'https://examplerepo.com' repo: 'vieter' + base_image: 'not-used:latest' } - build_script := create_build_script('https://example.com', target, 'x86_64') + + build_script := create_build_script('https://example.com', config, 'x86_64') expected := $embed_file('build_script_url.sh') assert build_script == expected.to_string().trim_space() From e742d3de6da36298ba4f34c2f12820a85e08fb47 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 13:46:07 +0100 Subject: [PATCH 26/51] fix(db): return correct id when adding targets --- src/db/logs.v | 2 ++ src/db/targets.v | 11 +++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/db/logs.v b/src/db/logs.v index 923dde2..2745467 100644 --- a/src/db/logs.v +++ b/src/db/logs.v @@ -84,6 +84,8 @@ pub fn (db &VieterDb) add_build_log(log BuildLog) int { insert log into BuildLog } + // Here, this does work because a log doesn't contain any foreign keys, + // meaning the ORM only has to do a single add inserted_id := db.conn.last_id() as int return inserted_id diff --git a/src/db/targets.v b/src/db/targets.v index a705ebb..41e56df 100644 --- a/src/db/targets.v +++ b/src/db/targets.v @@ -38,14 +38,17 @@ pub fn (db &VieterDb) get_target(target_id int) ?Target { } // add_target inserts the given target into the database. -pub fn (db &VieterDb) add_target(repo Target) int { +pub fn (db &VieterDb) add_target(target Target) int { sql db.conn { - insert repo into Target + insert target into Target } - inserted_id := db.conn.last_id() as int + // ID of inserted target is the largest id + inserted_target := sql db.conn { + select from Target order by id desc limit 1 + } - return inserted_id + return inserted_target.id } // delete_target deletes the target with the given id from the database. From 63427899217aae4390e01b72238758ac1457856d Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 13:58:51 +0100 Subject: [PATCH 27/51] feat(server): update job queue when adding, removing or updating targets --- src/build/queue.v | 7 +++++++ src/server/api_targets.v | 29 +++++++++++++++++++---------- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/src/build/queue.v b/src/build/queue.v index b559552..a78e56a 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -49,6 +49,13 @@ pub fn new_job_queue(default_schedule CronExpression, default_base_image string) } } +// insert_all executes insert for each architecture of the given Target. +pub fn (mut q BuildJobQueue) insert_all(target Target) ! { + for arch in target.arch { + q.insert(target, arch.value)! + } +} + // insert a new target's job into the queue for the given architecture. This // job will then be endlessly rescheduled after being pop'ed, unless removed // explicitely. diff --git a/src/server/api_targets.v b/src/server/api_targets.v index 16db7e9..dc39d37 100644 --- a/src/server/api_targets.v +++ b/src/server/api_targets.v @@ -12,17 +12,17 @@ fn (mut app App) v1_get_targets() web.Result { filter := models.from_params(app.query) or { return app.json(http.Status.bad_request, new_response('Invalid query parameters.')) } - repos := app.db.get_targets(filter) + targets := app.db.get_targets(filter) - return app.json(.ok, new_data_response(repos)) + return app.json(.ok, new_data_response(targets)) } // v1_get_single_target returns the information for a single target. ['/api/v1/targets/:id'; auth; get] fn (mut app App) v1_get_single_target(id int) web.Result { - repo := app.db.get_target(id) or { return app.not_found() } + target := app.db.get_target(id) or { return app.not_found() } - return app.json(.ok, new_data_response(repo)) + return app.json(.ok, new_data_response(target)) } // v1_post_target creates a new target from the provided query string. @@ -30,22 +30,27 @@ fn (mut app App) v1_get_single_target(id int) web.Result { fn (mut app App) v1_post_target() web.Result { mut params := app.query.clone() - // If a repo is created without specifying the arch, we assume it's meant + // If a target is created without specifying the arch, we assume it's meant // for the default architecture. if 'arch' !in params || params['arch'] == '' { params['arch'] = app.conf.default_arch } - new_repo := models.from_params(params) or { + mut new_target := models.from_params(params) or { return app.json(http.Status.bad_request, new_response(err.msg())) } // Ensure someone doesn't submit an invalid kind - if new_repo.kind !in models.valid_kinds { + if new_target.kind !in models.valid_kinds { return app.json(http.Status.bad_request, new_response('Invalid kind.')) } - id := app.db.add_target(new_repo) + id := app.db.add_target(new_target) + new_target.id = id + + // Add the target to the job queue + // TODO return better error here if it's the cron schedule that's incorrect + app.job_queue.insert_all(new_target) or { return app.status(.internal_server_error) } return app.json(.ok, new_data_response(id)) } @@ -54,6 +59,7 @@ fn (mut app App) v1_post_target() web.Result { ['/api/v1/targets/:id'; auth; delete] fn (mut app App) v1_delete_target(id int) web.Result { app.db.delete_target(id) + app.job_queue.invalidate(id) return app.json(.ok, new_response('')) } @@ -69,7 +75,10 @@ fn (mut app App) v1_patch_target(id int) web.Result { app.db.update_target_archs(id, arch_objs) } - repo := app.db.get_target(id) or { return app.status(.internal_server_error) } + target := app.db.get_target(id) or { return app.status(.internal_server_error) } - return app.json(.ok, new_data_response(repo)) + app.job_queue.invalidate(id) + app.job_queue.insert_all(target) or { return app.status(.internal_server_error) } + + return app.json(.ok, new_data_response(target)) } From 5cbfc0ebcb45e08b5d445d6e3997f7a92628a797 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 17:42:49 +0100 Subject: [PATCH 28/51] feat(agent): clean up code a bit; add frequent polling when active --- src/agent/agent.v | 6 ++-- src/agent/cli.v | 16 +++++----- src/agent/daemon.v | 78 ++++++++++++++++++++++++++++------------------ src/agent/images.v | 31 +++++++++++++----- src/agent/log.v | 2 +- 5 files changed, 83 insertions(+), 50 deletions(-) diff --git a/src/agent/agent.v b/src/agent/agent.v index 1758c85..69b9947 100644 --- a/src/agent/agent.v +++ b/src/agent/agent.v @@ -2,12 +2,12 @@ module agent import log import os +import util const log_file_name = 'vieter.agent.log' -// agent start an agent service +// agent starts an agent service pub fn agent(conf Config) ! { - // Configure logger log_level := log.level_from_tag(conf.log_level) or { return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') } @@ -16,6 +16,8 @@ pub fn agent(conf Config) ! { level: log_level } + os.mkdir_all(conf.data_dir) or { util.exit_with_message(1, 'Failed to create data directory.') } + log_file := os.join_path_single(conf.data_dir, agent.log_file_name) logger.set_full_logpath(log_file) logger.log_to_console_too() diff --git a/src/agent/cli.v b/src/agent/cli.v index a375f08..1535e17 100644 --- a/src/agent/cli.v +++ b/src/agent/cli.v @@ -7,14 +7,12 @@ struct Config { pub: log_level string = 'WARN' // Architecture that the agent represents - arch string - api_key string - address string - data_dir string - max_concurrent_builds int = 1 - polling_frequency int = 30 - // Architecture of agent - // arch string + arch string + api_key string + address string + data_dir string + max_concurrent_builds int = 1 + polling_frequency int = 30 image_rebuild_frequency int = 1440 } @@ -22,7 +20,7 @@ pub: pub fn cmd() cli.Command { return cli.Command{ name: 'agent' - description: 'Start an agent service & start polling for new builds.' + description: 'Start an agent daemon.' execute: fn (cmd cli.Command) ! { config_file := cmd.flags.get_string('config-file')! conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! diff --git a/src/agent/daemon.v b/src/agent/daemon.v index f060863..f753e25 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -16,17 +16,17 @@ const ( struct AgentDaemon { logger shared log.Log conf Config + client client.Client mut: images ImageManager - // Which builds are currently running; length is same as - // conf.max_concurrent_builds + // Which builds are currently running; length is conf.max_concurrent_builds builds []BuildConfig - // Atomic variables used to detect when a build has finished; length is the - // same as conf.max_concurrent_builds - client client.Client + // Atomic variables used to detect when a build has finished; length is + // conf.max_concurrent_builds atomics []u64 } +// agent_init initializes a new agent fn agent_init(logger log.Log, conf Config) AgentDaemon { mut d := AgentDaemon{ logger: logger @@ -40,37 +40,49 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon { return d } +// run starts the actual agent daemon. This function will run forever. pub fn (mut d AgentDaemon) run() { // This is just so that the very first time the loop is ran, the jobs are // always polled mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency) + mut sleep_time := 1 * time.second + mut finished, mut empty := 0, 0 for { - free_builds := d.update_atomics() + finished, empty = d.update_atomics() - // All build slots are taken, so there's nothing to be done - if free_builds == 0 { + // No new finished builds and no free slots, so there's nothing to be + // done + if finished + empty == 0 { time.sleep(1 * time.second) continue } // Builds have finished, so old builder images might have freed up. - d.images.clean_old_images() + // TODO this might query the docker daemon too frequently. + if finished > 0 { + d.images.clean_old_images() + } - // Poll for new jobs - if time.now() >= last_poll_time.add_seconds(d.conf.polling_frequency) { - new_configs := d.client.poll_jobs(d.conf.arch, free_builds) or { + // The agent will always poll for new jobs after at most + // `polling_frequency` seconds. However, when jobs have finished, the + // agent will also poll for new jobs. This is because jobs are often + // clustered together (especially when mostly using the global cron + // schedule), so there's a much higher chance jobs are available. + if finished > 0 || time.now() >= last_poll_time.add_seconds(d.conf.polling_frequency) { + new_configs := d.client.poll_jobs(d.conf.arch, finished + empty) or { d.lerror('Failed to poll jobs: $err.msg()') + // TODO pick a better delay here time.sleep(5 * time.second) continue } last_poll_time = time.now() - // Schedule new jobs for config in new_configs { // TODO handle this better than to just skip the config - // Make sure a recent build base image is available for building the config + // Make sure a recent build base image is available for + // building the config d.images.refresh_image(config.base_image) or { d.lerror(err.msg()) continue @@ -78,40 +90,45 @@ pub fn (mut d AgentDaemon) run() { d.start_build(config) } - time.sleep(1 * time.second) - } - // Builds are running, so check again after one second - else if free_builds < d.conf.max_concurrent_builds { - time.sleep(1 * time.second) + // No new jobs were scheduled and the agent isn't doing anything, + // so we just wait until the next polling period. + if new_configs.len == 0 && finished + empty == d.conf.max_concurrent_builds { + sleep_time = time.now() - last_poll_time + } } // The agent is not doing anything, so we just wait until the next poll // time - else { - time_until_next_poll := time.now() - last_poll_time - time.sleep(time_until_next_poll) + else if finished + empty == d.conf.max_concurrent_builds { + sleep_time = time.now() - last_poll_time } + + time.sleep(sleep_time) } } // update_atomics checks for each build whether it's completed, and sets it to -// free again if so. The return value is how many build slots are currently -// free. -fn (mut d AgentDaemon) update_atomics() int { - mut count := 0 +// empty again if so. The return value is a tuple `(finished, empty)` where +// `finished` is how many builds were just finished and thus set to empty, and +// `empty` is how many build slots were already empty. The amount of running +// builds can then be calculated by substracting these two values from the +// total allowed concurrent builds. +fn (mut d AgentDaemon) update_atomics() (int, int) { + mut finished := 0 + mut empty := 0 for i in 0 .. d.atomics.len { if stdatomic.load_u64(&d.atomics[i]) == agent.build_done { stdatomic.store_u64(&d.atomics[i], agent.build_empty) - count++ + finished++ } else if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty { - count++ + empty++ } } - return count + return finished, empty } -// start_build starts a build for the given BuildConfig object. +// start_build starts a build for the given BuildConfig. fn (mut d AgentDaemon) start_build(config BuildConfig) bool { for i in 0 .. d.atomics.len { if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty { @@ -149,6 +166,7 @@ fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) { if status == 0 { d.linfo('finished build: $config.url -> $config.repo; uploading logs...') + // TODO use the arch value here build_arch := os.uname().machine d.client.add_build_log(config.target_id, res.start_time, res.end_time, build_arch, res.exit_code, res.logs) or { diff --git a/src/agent/images.v b/src/agent/images.v index 78bf2d0..64a8f74 100644 --- a/src/agent/images.v +++ b/src/agent/images.v @@ -4,29 +4,42 @@ import time import docker import build +// An ImageManager is a utility that creates builder images from given base +// images, updating these builder images if they've become too old. This +// structure can manage images from any number of base images, paving the way +// for configurable base images per target/repository. struct ImageManager { mut: - refresh_frequency int - images map[string][]string [required] - timestamps map[string]time.Time [required] + max_image_age int [required] + // For each base images, one or more builder images can exist at the same + // time + images map[string][]string [required] + // For each base image, we track when its newest image was built + timestamps map[string]time.Time [required] } -fn new_image_manager(refresh_frequency int) ImageManager { +// new_image_manager initializes a new image manager. +fn new_image_manager(max_image_age int) ImageManager { return ImageManager{ - refresh_frequency: refresh_frequency + max_image_age: max_image_age images: map[string][]string{} timestamps: map[string]time.Time{} } } +// get returns the name of the newest image for the given base image. Note that +// this function should only be called *after* a first call to `refresh_image`. pub fn (m &ImageManager) get(base_image string) string { return m.images[base_image].last() } +// refresh_image builds a new builder image from the given base image if the +// previous builder image is too old or non-existent. This function will do +// nothing if these conditions aren't met, so it's safe to call it every time +// you want to ensure an image is up to date. fn (mut m ImageManager) refresh_image(base_image string) ! { - // No need to refresh the image if the previous one is still new enough if base_image in m.timestamps - && m.timestamps[base_image].add_seconds(m.refresh_frequency) > time.now() { + && m.timestamps[base_image].add_seconds(m.max_image_age) > time.now() { return } @@ -39,7 +52,9 @@ fn (mut m ImageManager) refresh_image(base_image string) ! { m.timestamps[base_image] = time.now() } -// clean_old_images tries to remove any old but still present builder images. +// clean_old_images removes all older builder images that are no longer in use. +// The function will always leave at least one builder image, namely the newest +// one. fn (mut m ImageManager) clean_old_images() { mut dd := docker.new_conn() or { return } diff --git a/src/agent/log.v b/src/agent/log.v index d47df0f..cd59207 100644 --- a/src/agent/log.v +++ b/src/agent/log.v @@ -2,7 +2,7 @@ module agent import log -// log reate a log message with the given level +// log a message with the given level pub fn (mut d AgentDaemon) log(msg string, level log.Level) { lock d.logger { d.logger.send_output(msg, level) From 03f2240ff63e2f115e348626941d8f7919bd3e0f Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 17:51:42 +0100 Subject: [PATCH 29/51] chore: please the linter --- src/build/build.v | 3 ++- src/build/queue.v | 1 + src/build/shell_test.v | 2 -- src/client/jobs.v | 1 + src/server/api_builds.v | 4 +--- src/server/server.v | 2 ++ 6 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/build/build.v b/src/build/build.v index 2d51156..84d288c 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -103,6 +103,7 @@ pub: logs string } +// build_target builds the given target. Internally it calls `build_config`. pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult { config := BuildConfig{ target_id: target.id @@ -116,7 +117,7 @@ pub fn build_target(address string, api_key string, base_image_id string, target return build_config(address, api_key, config) } -// build_target builds, packages & publishes a given Arch package based on the +// build_config builds, packages & publishes a given Arch package based on the // provided target. The base image ID should be of an image previously created // by create_build_image. It returns the logs of the container. pub fn build_config(address string, api_key string, config BuildConfig) !BuildResult { diff --git a/src/build/queue.v b/src/build/queue.v index a78e56a..2a28e62 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -41,6 +41,7 @@ mut: invalidated map[int]time.Time } +// new_job_queue initializes a new job queue pub fn new_job_queue(default_schedule CronExpression, default_base_image string) BuildJobQueue { return BuildJobQueue{ default_schedule: default_schedule diff --git a/src/build/shell_test.v b/src/build/shell_test.v index d228faf..8bb22d9 100644 --- a/src/build/shell_test.v +++ b/src/build/shell_test.v @@ -1,7 +1,5 @@ module build -import models - fn test_create_build_script_git_branch() { config := BuildConfig{ target_id: 1 diff --git a/src/client/jobs.v b/src/client/jobs.v index 30f2531..7fee94f 100644 --- a/src/client/jobs.v +++ b/src/client/jobs.v @@ -2,6 +2,7 @@ module client import build { BuildConfig } +// poll_jobs requests a list of new build jobs from the server. pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig { data := c.send_request<[]BuildConfig>(.get, '/api/v1/jobs/poll', { 'arch': arch diff --git a/src/server/api_builds.v b/src/server/api_builds.v index ec3c8ec..922b252 100644 --- a/src/server/api_builds.v +++ b/src/server/api_builds.v @@ -2,10 +2,8 @@ module server import web import web.response { new_data_response, new_response } -// import os -// import util -// import models { BuildLog, BuildLogFilter } +// v1_poll_job_queue allows agents to poll for new build jobs. ['/api/v1/jobs/poll'; auth; get] fn (mut app App) v1_poll_job_queue() web.Result { arch := app.query['arch'] or { diff --git a/src/server/server.v b/src/server/server.v index e2c19c2..1e86906 100644 --- a/src/server/server.v +++ b/src/server/server.v @@ -27,6 +27,8 @@ pub mut: db db.VieterDb } +// init_job_queue populates a fresh job queue with all the targets currently +// stored in the database. fn (mut app App) init_job_queue() ! { // Initialize build queues mut targets := app.db.get_targets(limit: 25) From d3151863ee88c7fdf75d6a569be25e76511c38c1 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 18:24:21 +0100 Subject: [PATCH 30/51] refactor(build): remove some code duplication from queue --- CHANGELOG.md | 2 ++ README.md | 5 +-- src/build/queue.v | 88 ++++++++++++++++++++--------------------------- 3 files changed, 43 insertions(+), 52 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2dd760..aed7571 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * Migrated codebase to V 0.3.2 * Cron expression parser now uses bitfields instead of bool arrays +* Added option to deploy using agent-server architecture instead of cron daemon ### Fixed @@ -19,6 +20,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * CLI no longer exits with non-zero status code when removing/patching target * Allow NULL values for branch in database +* Endpoint for adding targets now returns the correct id ## [0.4.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.4.0) diff --git a/README.md b/README.md index b9fff69..637d4c1 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,8 @@ quicker. I chose [V](https://vlang.io/) as I've been very intrigued by this language for a while now. I wanted a fast language that I could code while relaxing, without having to exert too much mental effort & V seemed like the right choice for -that. +that. Sadly, this didn't quite turn out the way I expected, but I'm sticking +with it anyways ;p ## Features @@ -49,7 +50,7 @@ update`. I used to maintain a mirror that tracked the latest master, but nowadays, I maintain a Docker image containing the specific compiler version that Vieter -builds with. Currently, this is V 0.3. +builds with. Currently, this is V 0.3.2. ## Contributing diff --git a/src/build/queue.v b/src/build/queue.v index 2a28e62..dd2bb87 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -110,6 +110,21 @@ fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) ! { q.queues[arch].insert(new_job) } +// pop_invalid pops all invalid jobs. +fn (mut q BuildJobQueue) pop_invalid(arch string) { + for { + job := q.queues[arch].peek() or { return } + + if job.config.target_id in q.invalidated + && job.created < q.invalidated[job.config.target_id] { + // This pop *should* never fail according to the source code + q.queues[arch].pop() or {} + } else { + break + } + } +} + // peek shows the first job for the given architecture that's ready to be // executed, if present. pub fn (mut q BuildJobQueue) peek(arch string) ?BuildJob { @@ -118,20 +133,11 @@ pub fn (mut q BuildJobQueue) peek(arch string) ?BuildJob { return none } - for { - job := q.queues[arch].peek() or { return none } + q.pop_invalid(arch) + job := q.queues[arch].peek()? - // Skip any invalidated jobs - if job.config.target_id in q.invalidated - && job.created < q.invalidated[job.config.target_id] { - // This pop *should* never fail according to the source code - q.queues[arch].pop() or { return none } - continue - } - - if job.timestamp < time.now() { - return job - } + if job.timestamp < time.now() { + return job } } @@ -146,27 +152,18 @@ pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob { return none } - for { - mut job := q.queues[arch].peek() or { return none } + q.pop_invalid(arch) + mut job := q.queues[arch].peek()? - // Skip any invalidated jobs - if job.config.target_id in q.invalidated - && job.created < q.invalidated[job.config.target_id] { - // This pop *should* never fail according to the source code - q.queues[arch].pop() or { return none } - continue - } + if job.timestamp < time.now() { + job = q.queues[arch].pop()? - if job.timestamp < time.now() { - job = q.queues[arch].pop()? + // TODO how do we handle this properly? Is it even possible for a + // cron expression to not return a next time if it's already been + // used before? + q.reschedule(job, arch) or {} - // TODO how do we handle this properly? Is it even possible for a - // cron expression to not return a next time if it's already been - // used before? - q.reschedule(job, arch) or {} - - return job - } + return job } } @@ -182,28 +179,19 @@ pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob { mut out := []BuildJob{} - outer: for out.len < n { - for { - mut job := q.queues[arch].peek() or { break outer } + for out.len < n { + q.pop_invalid(arch) + mut job := q.queues[arch].peek() or { break } - // Skip any invalidated jobs - if job.config.target_id in q.invalidated - && job.created < q.invalidated[job.config.target_id] { - // This pop *should* never fail according to the source code - q.queues[arch].pop() or { break outer } - continue - } + if job.timestamp < time.now() { + job = q.queues[arch].pop() or { break } - if job.timestamp < time.now() { - job = q.queues[arch].pop() or { break outer } + // TODO idem + q.reschedule(job, arch) or {} - // TODO idem - q.reschedule(job, arch) or {} - - out << job - } else { - break outer - } + out << job + } else { + break } } From 8a2f720bdf1702c7ffaadd27230372fb94519ceb Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 19:33:21 +0100 Subject: [PATCH 31/51] docs(agent): added agent configuration docs --- docs/content/configuration.md | 24 +++++++++++++++++++++++- docs/content/installation.md | 8 ++++---- docs/content/usage/builds/schedule.md | 2 +- src/agent/daemon.v | 2 +- 4 files changed, 29 insertions(+), 7 deletions(-) diff --git a/docs/content/configuration.md b/docs/content/configuration.md index af941a2..95bf713 100644 --- a/docs/content/configuration.md +++ b/docs/content/configuration.md @@ -17,7 +17,7 @@ If a variable is both present in the config file & as an environment variable, the value in the environment variable is used. {{< hint info >}} -**Note** +**Note** All environment variables can also be provided from a file by appending them with `_FILE`. This for example allows you to provide the API key from a Docker secrets file. @@ -97,3 +97,25 @@ configuration variable required for each command. build`. * Default: `archlinux:base-devel` +### `vieter agent` + +* `log_level`: log verbosity level. Value should be one of `FATAL`, `ERROR`, + `WARN`, `INFO` or `DEBUG`. + * Default: `WARN` +* `address`: *public* URL of the Vieter repository server to build for. From + this server jobs are retrieved. All built packages are published to this + server. +* `api_key`: API key of the above server. +* `data_dir`: directory to store log file in. +* `max_concurrent_builds`: how many builds to run at the same time. + * Default: `1` +* `polling_frequency`: how often (in seconds) to poll the server for new + builds. Note that the agent might poll more frequently when it's actively + processing builds. +* `image_rebuild_frequency`: Vieter periodically builds images that are then + used as a basis for running build containers. This is to prevent each build + from downloading an entire repository worth of dependencies. This setting + defines how frequently (in minutes) to rebuild these images. + * Default: `1440` (every 24 hours) +* `arch`: architecture for which this agent should pull down builds (e.g. + `x86_64`) diff --git a/docs/content/installation.md b/docs/content/installation.md index 87b9cba..21eda64 100644 --- a/docs/content/installation.md +++ b/docs/content/installation.md @@ -21,7 +21,7 @@ branch. This branch will be the most up to date, but does not give any guarantees about stability, so beware! Thanks to the single-binary design of Vieter, this image can be used both for -the repository server & the cron daemon. +the repository server, the cron daemon and the agent. Below is an example compose file to set up both the repository server & the cron daemon: @@ -76,7 +76,7 @@ architectures will build on both. ## Binary On the -[releases](https://git.rustybever.be/vieter/vieter/releases) +[releases](https://git.rustybever.be/vieter-v/vieter/releases) page, you can find statically compiled binaries for all released versions. This is the same binary as used inside the Docker images. @@ -106,5 +106,5 @@ guarantee that a compiler update won't temporarily break them. ## Building from source -The project [README](https://git.rustybever.be/vieter/vieter#building) contains -instructions for building Vieter from source. +The project [README](https://git.rustybever.be/vieter-v/vieter#building) +contains instructions for building Vieter from source. diff --git a/docs/content/usage/builds/schedule.md b/docs/content/usage/builds/schedule.md index 38f76a4..de59e25 100644 --- a/docs/content/usage/builds/schedule.md +++ b/docs/content/usage/builds/schedule.md @@ -37,6 +37,6 @@ Each section can consist of as many of these parts as necessary. ## CLI tool The Vieter binary contains a command that shows you the next matching times for -a given expression. This can be useful to understand the syntax. For more +a given expression. This can be useful for understanding the syntax. For more information, see [vieter-schedule(1)](https://rustybever.be/man/vieter/vieter-schedule.1.html). diff --git a/src/agent/daemon.v b/src/agent/daemon.v index f753e25..ff29d5e 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -32,7 +32,7 @@ fn agent_init(logger log.Log, conf Config) AgentDaemon { logger: logger client: client.new(conf.address, conf.api_key) conf: conf - images: new_image_manager(conf.image_rebuild_frequency) + images: new_image_manager(conf.image_rebuild_frequency * 60) builds: []BuildConfig{len: conf.max_concurrent_builds} atomics: []u64{len: conf.max_concurrent_builds} } From f6c5e7c2469f474193270cd09264ee1fb022499c Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 19:59:18 +0100 Subject: [PATCH 32/51] feat: add option to force-build package --- src/build/build.v | 4 +++- src/build/shell.v | 20 ++++++++++++++------ src/console/targets/build.v | 4 ++-- src/console/targets/targets.v | 9 ++++++++- src/cron/daemon/build.v | 2 +- vieter.toml | 2 +- 6 files changed, 29 insertions(+), 12 deletions(-) diff --git a/src/build/build.v b/src/build/build.v index 84d288c..6da851a 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -24,6 +24,7 @@ pub: branch string repo string base_image string + force bool } // create_build_image creates a builder image given some base image which can @@ -104,7 +105,7 @@ pub: } // build_target builds the given target. Internally it calls `build_config`. -pub fn build_target(address string, api_key string, base_image_id string, target &Target) !BuildResult { +pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool) !BuildResult { config := BuildConfig{ target_id: target.id kind: target.kind @@ -112,6 +113,7 @@ pub fn build_target(address string, api_key string, base_image_id string, target branch: target.branch repo: target.repo base_image: base_image_id + force: force } return build_config(address, api_key, config) diff --git a/src/build/shell.v b/src/build/shell.v index c2d0c9b..ac61e07 100644 --- a/src/build/shell.v +++ b/src/build/shell.v @@ -63,14 +63,22 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st 'cd repo', 'makepkg --nobuild --syncdeps --needed --noconfirm', 'source PKGBUILD', + ] + + if !config.force { // The build container checks whether the package is already present on // the server. - 'curl -s --head --fail $repo_url/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0', - // If the above curl command succeeds, we don't need to rebuild the - // package. However, because we're in a su shell, the exit command will - // drop us back into the root shell. Therefore, we must check whether - // we're in root so we don't proceed. - '[ "\$(id -u)" == 0 ] && exit 0', + commands << [ + 'curl -s --head --fail $repo_url/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0', + // If the above curl command succeeds, we don't need to rebuild the + // package. However, because we're in a su shell, the exit command will + // drop us back into the root shell. Therefore, we must check whether + // we're in root so we don't proceed. + '[ "\$(id -u)" == 0 ] && exit 0', + ] + } + + commands << [ 'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $repo_url/publish; done', ] diff --git a/src/console/targets/build.v b/src/console/targets/build.v index 9368558..e18077d 100644 --- a/src/console/targets/build.v +++ b/src/console/targets/build.v @@ -6,7 +6,7 @@ import os import build // build locally builds the target with the given id. -fn build(conf Config, target_id int) ! { +fn build(conf Config, target_id int, force bool) ! { c := client.new(conf.address, conf.api_key) target := c.get_target(target_id)! @@ -16,7 +16,7 @@ fn build(conf Config, target_id int) ! { image_id := build.create_build_image(conf.base_image)! println('Running build...') - res := build.build_target(conf.address, conf.api_key, image_id, target)! + res := build.build_target(conf.address, conf.api_key, image_id, target, force)! println('Removing build image...') diff --git a/src/console/targets/targets.v b/src/console/targets/targets.v index 4179363..ffcd36c 100644 --- a/src/console/targets/targets.v +++ b/src/console/targets/targets.v @@ -182,11 +182,18 @@ pub fn cmd() cli.Command { required_args: 1 usage: 'id' description: 'Build the target with the given id & publish it.' + flags: [ + cli.Flag{ + name: 'force' + description: 'Build the target without checking whether it needs to be renewed.' + flag: cli.FlagType.bool + }, + ] execute: fn (cmd cli.Command) ! { config_file := cmd.flags.get_string('config-file')! conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! - build(conf, cmd.args[0].int())! + build(conf, cmd.args[0].int(), cmd.flags.get_bool('force')!)! } }, ] diff --git a/src/cron/daemon/build.v b/src/cron/daemon/build.v index beed9fc..42edc92 100644 --- a/src/cron/daemon/build.v +++ b/src/cron/daemon/build.v @@ -79,7 +79,7 @@ fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) { mut status := 0 res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(), - &sb.target) or { + &sb.target, false) or { d.ldebug('build_target error: $err.msg()') status = 1 diff --git a/vieter.toml b/vieter.toml index 9a68ae3..74a7397 100644 --- a/vieter.toml +++ b/vieter.toml @@ -8,7 +8,7 @@ arch = "x86_64" address = "http://localhost:8000" -global_schedule = '* *' +# global_schedule = '* *' api_update_frequency = 2 image_rebuild_frequency = 1 max_concurrent_builds = 3 From 6a208dbe6ca73b25e1e0e30f3b3b266620061ebc Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 21:22:22 +0100 Subject: [PATCH 33/51] feat: allow queueing one-time builds --- src/build/queue.v | 46 ++++++++++++++++++++++++++--------------- src/server/api_builds.v | 21 +++++++++++++++++++ src/server/server.v | 2 +- 3 files changed, 51 insertions(+), 18 deletions(-) diff --git a/src/build/queue.v b/src/build/queue.v index dd2bb87..1395a0b 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -16,6 +16,8 @@ pub: ce CronExpression // Actual build config sent to the agent config BuildConfig + // Whether this is a one-time job + single bool } // Allows BuildJob structs to be sorted according to their timestamp in @@ -53,22 +55,29 @@ pub fn new_job_queue(default_schedule CronExpression, default_base_image string) // insert_all executes insert for each architecture of the given Target. pub fn (mut q BuildJobQueue) insert_all(target Target) ! { for arch in target.arch { - q.insert(target, arch.value)! + q.insert(target: target, arch: arch.value)! } } +[params] +pub struct InsertConfig { + target Target [required] + arch string [required] + single bool +} + // insert a new target's job into the queue for the given architecture. This // job will then be endlessly rescheduled after being pop'ed, unless removed // explicitely. -pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! { +pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! { lock q.mutex { - if arch !in q.queues { - q.queues[arch] = MinHeap{} + if input.arch !in q.queues { + q.queues[input.arch] = MinHeap{} } - ce := if target.schedule != '' { - parse_expression(target.schedule) or { - return error("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()") + ce := if input.target.schedule != '' { + parse_expression(input.target.schedule) or { + return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()") } } else { q.default_schedule @@ -80,18 +89,19 @@ pub fn (mut q BuildJobQueue) insert(target Target, arch string) ! { created: time.now() timestamp: timestamp ce: ce + single: input.single config: BuildConfig{ - target_id: target.id - kind: target.kind - url: target.url - branch: target.branch - repo: target.repo + target_id: input.target.id + kind: input.target.kind + url: input.target.url + branch: input.target.branch + repo: input.target.repo // TODO make this configurable base_image: q.default_base_image } } - q.queues[arch].insert(job) + q.queues[input.arch].insert(job) } } @@ -158,10 +168,12 @@ pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob { if job.timestamp < time.now() { job = q.queues[arch].pop()? - // TODO how do we handle this properly? Is it even possible for a - // cron expression to not return a next time if it's already been - // used before? - q.reschedule(job, arch) or {} + if !job.single { + // TODO how do we handle this properly? Is it even possible for a + // cron expression to not return a next time if it's already been + // used before? + q.reschedule(job, arch) or {} + } return job } diff --git a/src/server/api_builds.v b/src/server/api_builds.v index 922b252..bc841ce 100644 --- a/src/server/api_builds.v +++ b/src/server/api_builds.v @@ -19,3 +19,24 @@ fn (mut app App) v1_poll_job_queue() web.Result { return app.json(.ok, new_data_response(out)) } + +['/api/v1/jobs/queue'; auth; post] +fn (mut app App) v1_queue_job() web.Result { + target_id := app.query['target'] or { + return app.json(.bad_request, new_response('Missing target query arg.')) + }.int() + + arch := app.query['arch'] or { + return app.json(.bad_request, new_response('Missing arch query arg.')) + } + + target := app.db.get_target(target_id) or { + return app.json(.bad_request, new_response('Unknown target id.')) + } + + app.job_queue.insert(target: target, arch: arch, single: true) or { + return app.status(.internal_server_error) + } + + return app.status(.ok) +} diff --git a/src/server/server.v b/src/server/server.v index 1e86906..6d18f09 100644 --- a/src/server/server.v +++ b/src/server/server.v @@ -37,7 +37,7 @@ fn (mut app App) init_job_queue() ! { for targets.len > 0 { for target in targets { for arch in target.arch { - app.job_queue.insert(target, arch.value)! + app.job_queue.insert(target: target, arch: arch.value)! } } From 2cc3e8404e98329bb8b0dfd5dda39393b0abc2da Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Tue, 13 Dec 2022 22:03:04 +0100 Subject: [PATCH 34/51] feat: queue one-time builds from CLI --- src/build/queue.v | 40 +++++++++++++++---------- src/client/jobs.v | 11 +++++++ src/console/targets/targets.v | 28 ++++++++++++++++- src/server/{api_builds.v => api_jobs.v} | 8 ++++- 4 files changed, 69 insertions(+), 18 deletions(-) rename src/server/{api_builds.v => api_jobs.v} (83%) diff --git a/src/build/queue.v b/src/build/queue.v index 1395a0b..5d50f34 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -7,7 +7,7 @@ import datatypes { MinHeap } import util struct BuildJob { -pub: +pub mut: // Time at which this build job was created/queued created time.Time // Next timestamp from which point this job is allowed to be executed @@ -64,6 +64,8 @@ pub struct InsertConfig { target Target [required] arch string [required] single bool + force bool + now bool } // insert a new target's job into the queue for the given architecture. This @@ -75,20 +77,8 @@ pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! { q.queues[input.arch] = MinHeap{} } - ce := if input.target.schedule != '' { - parse_expression(input.target.schedule) or { - return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()") - } - } else { - q.default_schedule - } - - timestamp := ce.next_from_now()! - - job := BuildJob{ + mut job := BuildJob{ created: time.now() - timestamp: timestamp - ce: ce single: input.single config: BuildConfig{ target_id: input.target.id @@ -98,9 +88,25 @@ pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! { repo: input.target.repo // TODO make this configurable base_image: q.default_base_image + force: input.force } } + if !input.now { + ce := if input.target.schedule != '' { + parse_expression(input.target.schedule) or { + return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()") + } + } else { + q.default_schedule + } + + job.timestamp = ce.next_from_now()! + job.ce = ce + } else { + job.timestamp = time.now() + } + q.queues[input.arch].insert(job) } } @@ -198,8 +204,10 @@ pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob { if job.timestamp < time.now() { job = q.queues[arch].pop() or { break } - // TODO idem - q.reschedule(job, arch) or {} + if !job.single { + // TODO idem + q.reschedule(job, arch) or {} + } out << job } else { diff --git a/src/client/jobs.v b/src/client/jobs.v index 7fee94f..2d8e99b 100644 --- a/src/client/jobs.v +++ b/src/client/jobs.v @@ -1,6 +1,7 @@ module client import build { BuildConfig } +import web.response { Response } // poll_jobs requests a list of new build jobs from the server. pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig { @@ -11,3 +12,13 @@ pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig { return data.data } + +pub fn (c &Client) queue_job(target_id int, arch string, force bool) !Response { + data := c.send_request(.post, '/api/v1/jobs/queue', { + 'target': target_id.str() + 'arch': arch + 'force': force.str() + })! + + return data +} diff --git a/src/console/targets/targets.v b/src/console/targets/targets.v index ffcd36c..b527896 100644 --- a/src/console/targets/targets.v +++ b/src/console/targets/targets.v @@ -188,12 +188,38 @@ pub fn cmd() cli.Command { description: 'Build the target without checking whether it needs to be renewed.' flag: cli.FlagType.bool }, + cli.Flag{ + name: 'remote' + description: 'Schedule the build on the server instead of running it locally.' + flag: cli.FlagType.bool + }, + cli.Flag{ + name: 'arch' + description: 'Architecture to schedule build for. Required when using -remote.' + flag: cli.FlagType.string + }, ] execute: fn (cmd cli.Command) ! { config_file := cmd.flags.get_string('config-file')! conf := vconf.load(prefix: 'VIETER_', default_path: config_file)! - build(conf, cmd.args[0].int(), cmd.flags.get_bool('force')!)! + remote := cmd.flags.get_bool('remote')! + force := cmd.flags.get_bool('force')! + target_id := cmd.args[0].int() + + if remote { + arch := cmd.flags.get_string('arch')! + + if arch == '' { + return error('When scheduling the build remotely, you have to specify an architecture.') + } + + c := client.new(conf.address, conf.api_key) + res := c.queue_job(target_id, arch, force)! + println(res.message) + } else { + build(conf, target_id, force)! + } } }, ] diff --git a/src/server/api_builds.v b/src/server/api_jobs.v similarity index 83% rename from src/server/api_builds.v rename to src/server/api_jobs.v index bc841ce..b75e70e 100644 --- a/src/server/api_builds.v +++ b/src/server/api_jobs.v @@ -30,11 +30,17 @@ fn (mut app App) v1_queue_job() web.Result { return app.json(.bad_request, new_response('Missing arch query arg.')) } + if arch == '' { + app.json(.bad_request, new_response('Empty arch query arg.')) + } + + force := 'force' in app.query + target := app.db.get_target(target_id) or { return app.json(.bad_request, new_response('Unknown target id.')) } - app.job_queue.insert(target: target, arch: arch, single: true) or { + app.job_queue.insert(target: target, arch: arch, single: true, now: true, force: force) or { return app.status(.internal_server_error) } From d7a04c6ebff26a95d8680d3fb233bf370943a646 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Wed, 14 Dec 2022 16:03:57 +0100 Subject: [PATCH 35/51] chore: please the great lint --- CHANGELOG.md | 4 ++++ src/agent/images.v | 4 ++-- src/client/jobs.v | 2 ++ src/server/api_jobs.v | 1 + 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aed7571..c55e16b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * Migrated codebase to V 0.3.2 * Cron expression parser now uses bitfields instead of bool arrays * Added option to deploy using agent-server architecture instead of cron daemon +* Allow force-building packages, meaning the build won't check if the + repository is already up to date +* Allow scheduling builds on the server from the CLI tool instead of building + them locally ### Fixed diff --git a/src/agent/images.v b/src/agent/images.v index 64a8f74..185192e 100644 --- a/src/agent/images.v +++ b/src/agent/images.v @@ -9,9 +9,9 @@ import build // structure can manage images from any number of base images, paving the way // for configurable base images per target/repository. struct ImageManager { -mut: max_image_age int [required] - // For each base images, one or more builder images can exist at the same +mut: + // For each base image, one or more builder images can exist at the same // time images map[string][]string [required] // For each base image, we track when its newest image was built diff --git a/src/client/jobs.v b/src/client/jobs.v index 2d8e99b..440affa 100644 --- a/src/client/jobs.v +++ b/src/client/jobs.v @@ -13,6 +13,8 @@ pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig { return data.data } +// queue_job adds a new one-time build job for the given target to the job +// queue. pub fn (c &Client) queue_job(target_id int, arch string, force bool) !Response { data := c.send_request(.post, '/api/v1/jobs/queue', { 'target': target_id.str() diff --git a/src/server/api_jobs.v b/src/server/api_jobs.v index b75e70e..7795351 100644 --- a/src/server/api_jobs.v +++ b/src/server/api_jobs.v @@ -20,6 +20,7 @@ fn (mut app App) v1_poll_job_queue() web.Result { return app.json(.ok, new_data_response(out)) } +// v1_queue_job allows queueing a new one-time build job for the given target. ['/api/v1/jobs/queue'; auth; post] fn (mut app App) v1_queue_job() web.Result { target_id := app.query['target'] or { From 51df1874f5b2d31b88ed68a02a910ac091b53af3 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Wed, 14 Dec 2022 16:33:50 +0100 Subject: [PATCH 36/51] agent: some better logging --- src/agent/daemon.v | 10 ++++------ src/build/build.v | 5 +++++ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/agent/daemon.v b/src/agent/daemon.v index ff29d5e..0647733 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -146,7 +146,7 @@ fn (mut d AgentDaemon) start_build(config BuildConfig) bool { // run_build actually starts the build process for a given target. fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) { - d.linfo('started build: $config.url -> $config.repo') + d.linfo('started build: $config') // 0 means success, 1 means failure mut status := 0 @@ -164,16 +164,14 @@ fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) { } if status == 0 { - d.linfo('finished build: $config.url -> $config.repo; uploading logs...') + d.linfo('Uploading build logs for $config') // TODO use the arch value here build_arch := os.uname().machine d.client.add_build_log(config.target_id, res.start_time, res.end_time, build_arch, - res.exit_code, res.logs) or { - d.lerror('Failed to upload logs for build: $config.url -> $config.repo') - } + res.exit_code, res.logs) or { d.lerror('Failed to upload logs for $config') } } else { - d.linfo('an error occured during build: $config.url -> $config.repo') + d.lwarn('an error occurred during build: $config') } stdatomic.store_u64(&d.atomics[build_index], agent.build_done) diff --git a/src/build/build.v b/src/build/build.v index 6da851a..3d916bf 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -27,6 +27,11 @@ pub: force bool } +// str return a single-line string representation of a build log +pub fn (c BuildConfig) str() string { + return '{ target: $c.target_id, kind: $c.kind, url: $c.url, branch: $c.branch, repo: $c.repo, base_image: $c.base_image, force: $c.force }' +} + // create_build_image creates a builder image given some base image which can // then be used to build & package Arch images. It mostly just updates the // system, install some necessary packages & creates a non-root user to run From 60cb91c18cf1cf7ea66835d1b8e73cb66ff05ab3 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Wed, 14 Dec 2022 17:23:51 +0100 Subject: [PATCH 37/51] chore: final read before merging --- src/build/queue.v | 4 +++- src/server/server.v | 4 +--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/build/queue.v b/src/build/queue.v index 5d50f34..7902173 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -144,7 +144,9 @@ fn (mut q BuildJobQueue) pop_invalid(arch string) { // peek shows the first job for the given architecture that's ready to be // executed, if present. pub fn (mut q BuildJobQueue) peek(arch string) ?BuildJob { - rlock q.mutex { + // Even peek requires a write lock, because pop_invalid can modify the data + // structure + lock q.mutex { if arch !in q.queues { return none } diff --git a/src/server/server.v b/src/server/server.v index 6d18f09..74b1f37 100644 --- a/src/server/server.v +++ b/src/server/server.v @@ -36,9 +36,7 @@ fn (mut app App) init_job_queue() ! { for targets.len > 0 { for target in targets { - for arch in target.arch { - app.job_queue.insert(target: target, arch: arch.value)! - } + app.job_queue.insert_all(target)! } i += 25 From 0bd51586088588c352e04f5e4727f856b66e5aeb Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Thu, 15 Dec 2022 09:46:48 +0100 Subject: [PATCH 38/51] feat(client): handle empty and non-successful responses --- src/client/client.v | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/src/client/client.v b/src/client/client.v index aa6094a..3541555 100644 --- a/src/client/client.v +++ b/src/client/client.v @@ -1,8 +1,8 @@ module client -import net.http { Method } +import net.http { Method, Status } import net.urllib -import web.response { Response } +import web.response { Response, new_data_response } import json pub struct Client { @@ -56,8 +56,29 @@ fn (c &Client) send_request(method Method, url string, params map[string]stri // send_request_with_body calls send_request_raw_response & parses its // output as a Response object. fn (c &Client) send_request_with_body(method Method, url string, params map[string]string, body string) !Response { - res_text := c.send_request_raw_response(method, url, params, body)! - data := json.decode(Response, res_text)! + res := c.send_request_raw(method, url, params, body)! + + // Just return an empty successful response + if res.status_code == Status.no_content.int() { + return new_data_response(T{}) + } + + // Non-successful requests are expected to return either an empty body or + // Response + if res.status_code < 200 || res.status_code > 299 { + status_string := http.status_from_int(res.status_code).str() + + // A non-successful status call will have an empty body + if res.body == '' { + return error('Error $res.status_code ($status_string): (empty response)') + } + + data := json.decode(Response, res.body)! + + return error('Status $res.status_code ($status_string): $data.message') + } + + data := json.decode(Response, res.body)! return data } From 0727d0fd2517cb5dd67aa6f25d94b651c0f351f1 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Thu, 15 Dec 2022 10:01:45 +0100 Subject: [PATCH 39/51] refactor(client): streamline code & improve error propagation --- src/client/client.v | 13 ++++++------- src/client/jobs.v | 7 ++----- src/client/logs.v | 19 ++++--------------- src/client/targets.v | 13 ++++++------- src/console/logs/logs.v | 12 ++---------- src/console/targets/targets.v | 32 +++++++++----------------------- 6 files changed, 29 insertions(+), 67 deletions(-) diff --git a/src/client/client.v b/src/client/client.v index 3541555..5f24197 100644 --- a/src/client/client.v +++ b/src/client/client.v @@ -1,6 +1,6 @@ module client -import net.http { Method, Status } +import net.http { Method } import net.urllib import web.response { Response, new_data_response } import json @@ -57,25 +57,24 @@ fn (c &Client) send_request(method Method, url string, params map[string]stri // output as a Response object. fn (c &Client) send_request_with_body(method Method, url string, params map[string]string, body string) !Response { res := c.send_request_raw(method, url, params, body)! + status := http.status_from_int(res.status_code) // Just return an empty successful response - if res.status_code == Status.no_content.int() { + if status.is_success() && res.body == '' { return new_data_response(T{}) } // Non-successful requests are expected to return either an empty body or // Response - if res.status_code < 200 || res.status_code > 299 { - status_string := http.status_from_int(res.status_code).str() - + if status.is_error() { // A non-successful status call will have an empty body if res.body == '' { - return error('Error $res.status_code ($status_string): (empty response)') + return error('Error $res.status_code ($status.str()): (empty response)') } data := json.decode(Response, res.body)! - return error('Status $res.status_code ($status_string): $data.message') + return error('Status $res.status_code ($status.str()): $data.message') } data := json.decode(Response, res.body)! diff --git a/src/client/jobs.v b/src/client/jobs.v index 440affa..a545499 100644 --- a/src/client/jobs.v +++ b/src/client/jobs.v @@ -1,7 +1,6 @@ module client import build { BuildConfig } -import web.response { Response } // poll_jobs requests a list of new build jobs from the server. pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig { @@ -15,12 +14,10 @@ pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig { // queue_job adds a new one-time build job for the given target to the job // queue. -pub fn (c &Client) queue_job(target_id int, arch string, force bool) !Response { - data := c.send_request(.post, '/api/v1/jobs/queue', { +pub fn (c &Client) queue_job(target_id int, arch string, force bool) ! { + c.send_request(.post, '/api/v1/jobs/queue', { 'target': target_id.str() 'arch': arch 'force': force.str() })! - - return data } diff --git a/src/client/logs.v b/src/client/logs.v index eaddc8c..85063bc 100644 --- a/src/client/logs.v +++ b/src/client/logs.v @@ -6,29 +6,18 @@ import web.response { Response } import time // get_build_logs returns all build logs. -pub fn (c &Client) get_build_logs(filter BuildLogFilter) !Response<[]BuildLog> { +pub fn (c &Client) get_build_logs(filter BuildLogFilter) ![]BuildLog { params := models.params_from(filter) data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)! - return data -} - -// get_build_logs_for_target returns all build logs for a given target. -pub fn (c &Client) get_build_logs_for_target(target_id int) !Response<[]BuildLog> { - params := { - 'repo': target_id.str() - } - - data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)! - - return data + return data.data } // get_build_log returns a specific build log. -pub fn (c &Client) get_build_log(id int) !Response { +pub fn (c &Client) get_build_log(id int) !BuildLog { data := c.send_request(Method.get, '/api/v1/logs/$id', {})! - return data + return data.data } // get_build_log_content returns the contents of the build log file. diff --git a/src/client/targets.v b/src/client/targets.v index fd4254c..40bfdae 100644 --- a/src/client/targets.v +++ b/src/client/targets.v @@ -2,7 +2,6 @@ module client import models { Target, TargetFilter } import net.http { Method } -import web.response { Response } // get_targets returns a list of targets, given a filter object. pub fn (c &Client) get_targets(filter TargetFilter) ![]Target { @@ -49,24 +48,24 @@ pub struct NewTarget { } // add_target adds a new target to the server. -pub fn (c &Client) add_target(t NewTarget) !Response { +pub fn (c &Client) add_target(t NewTarget) !int { params := models.params_from(t) data := c.send_request(Method.post, '/api/v1/targets', params)! - return data + return data.data } // remove_target removes the target with the given id from the server. -pub fn (c &Client) remove_target(id int) !Response { +pub fn (c &Client) remove_target(id int) !string { data := c.send_request(Method.delete, '/api/v1/targets/$id', {})! - return data + return data.data } // patch_target sends a PATCH request to the given target with the params as // payload. -pub fn (c &Client) patch_target(id int, params map[string]string) !Response { +pub fn (c &Client) patch_target(id int, params map[string]string) !string { data := c.send_request(Method.patch, '/api/v1/targets/$id', params)! - return data + return data.data } diff --git a/src/console/logs/logs.v b/src/console/logs/logs.v index 1330dd0..3064a58 100644 --- a/src/console/logs/logs.v +++ b/src/console/logs/logs.v @@ -183,15 +183,7 @@ fn print_log_list(logs []BuildLog, raw bool) ! { // list prints a list of all build logs. fn list(conf Config, filter BuildLogFilter, raw bool) ! { c := client.new(conf.address, conf.api_key) - logs := c.get_build_logs(filter)!.data - - print_log_list(logs, raw)! -} - -// list prints a list of all build logs for a given target. -fn list_for_target(conf Config, target_id int, raw bool) ! { - c := client.new(conf.address, conf.api_key) - logs := c.get_build_logs_for_target(target_id)!.data + logs := c.get_build_logs(filter)! print_log_list(logs, raw)! } @@ -199,7 +191,7 @@ fn list_for_target(conf Config, target_id int, raw bool) ! { // info print the detailed info for a given build log. fn info(conf Config, id int) ! { c := client.new(conf.address, conf.api_key) - log := c.get_build_log(id)!.data + log := c.get_build_log(id)! print(log) } diff --git a/src/console/targets/targets.v b/src/console/targets/targets.v index b527896..b277410 100644 --- a/src/console/targets/targets.v +++ b/src/console/targets/targets.v @@ -215,8 +215,7 @@ pub fn cmd() cli.Command { } c := client.new(conf.address, conf.api_key) - res := c.queue_job(target_id, arch, force)! - println(res.message) + c.queue_job(target_id, arch, force)! } else { build(conf, target_id, force)! } @@ -245,23 +244,19 @@ fn list(conf Config, filter TargetFilter, raw bool) ! { // add adds a new repository to the server's list. fn add(conf Config, t &NewTarget, raw bool) ! { c := client.new(conf.address, conf.api_key) - res := c.add_target(t)! + target_id := c.add_target(t)! if raw { - println(res.data) + println(target_id) } else { - println('Target added with id $res.data') + println('Target added with id $target_id') } } // remove removes a repository from the server's list. fn remove(conf Config, id string) ! { - id_int := id.int() - - if id_int != 0 { - c := client.new(conf.address, conf.api_key) - c.remove_target(id_int)! - } + c := client.new(conf.address, conf.api_key) + c.remove_target(id.int())! } // patch patches a given repository with the provided params. @@ -274,22 +269,13 @@ fn patch(conf Config, id string, params map[string]string) ! { } } - id_int := id.int() - if id_int != 0 { - c := client.new(conf.address, conf.api_key) - c.patch_target(id_int, params)! - } + c := client.new(conf.address, conf.api_key) + c.patch_target(id.int(), params)! } // info shows detailed information for a given repo. fn info(conf Config, id string) ! { - id_int := id.int() - - if id_int == 0 { - return - } - c := client.new(conf.address, conf.api_key) - repo := c.get_target(id_int)! + repo := c.get_target(id.int())! println(repo) } From b634775ca387d9827f933af1c7c1a521e1c4926b Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Thu, 15 Dec 2022 10:46:58 +0100 Subject: [PATCH 40/51] refactor(server): clean up server responses a bit --- src/server/api_logs.v | 23 +++++++++++------------ src/server/api_targets.v | 11 +++++------ src/web/web.v | 7 ------- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/src/server/api_logs.v b/src/server/api_logs.v index fcbf024..c7521dd 100644 --- a/src/server/api_logs.v +++ b/src/server/api_logs.v @@ -1,7 +1,6 @@ module server import web -import net.http import net.urllib import web.response { new_data_response, new_response } import db @@ -15,7 +14,7 @@ import models { BuildLog, BuildLogFilter } ['/api/v1/logs'; auth; get] fn (mut app App) v1_get_logs() web.Result { filter := models.from_params(app.query) or { - return app.json(http.Status.bad_request, new_response('Invalid query parameters.')) + return app.json(.bad_request, new_response('Invalid query parameters.')) } logs := app.db.get_build_logs(filter) @@ -25,7 +24,7 @@ fn (mut app App) v1_get_logs() web.Result { // v1_get_single_log returns the build log with the given id. ['/api/v1/logs/:id'; auth; get] fn (mut app App) v1_get_single_log(id int) web.Result { - log := app.db.get_build_log(id) or { return app.not_found() } + log := app.db.get_build_log(id) or { return app.status(.not_found) } return app.json(.ok, new_data_response(log)) } @@ -33,7 +32,7 @@ fn (mut app App) v1_get_single_log(id int) web.Result { // v1_get_log_content returns the actual build log file for the given id. ['/api/v1/logs/:id/content'; auth; get] fn (mut app App) v1_get_log_content(id int) web.Result { - log := app.db.get_build_log(id) or { return app.not_found() } + log := app.db.get_build_log(id) or { return app.status(.not_found) } file_name := log.start_time.custom_format('YYYY-MM-DD_HH-mm-ss') full_path := os.join_path(app.conf.data_dir, logs_dir_name, log.target_id.str(), log.arch, file_name) @@ -57,25 +56,25 @@ fn (mut app App) v1_post_log() web.Result { start_time_int := app.query['startTime'].int() if start_time_int == 0 { - return app.json(http.Status.bad_request, new_response('Invalid or missing start time.')) + return app.json(.bad_request, new_response('Invalid or missing start time.')) } start_time := time.unix(start_time_int) end_time_int := app.query['endTime'].int() if end_time_int == 0 { - return app.json(http.Status.bad_request, new_response('Invalid or missing end time.')) + return app.json(.bad_request, new_response('Invalid or missing end time.')) } end_time := time.unix(end_time_int) if 'exitCode' !in app.query { - return app.json(http.Status.bad_request, new_response('Missing exit code.')) + return app.json(.bad_request, new_response('Missing exit code.')) } exit_code := app.query['exitCode'].int() if 'arch' !in app.query { - return app.json(http.Status.bad_request, new_response("Missing parameter 'arch'.")) + return app.json(.bad_request, new_response("Missing parameter 'arch'.")) } arch := app.query['arch'] @@ -83,7 +82,7 @@ fn (mut app App) v1_post_log() web.Result { target_id := app.query['target'].int() if !app.db.target_exists(target_id) { - return app.json(http.Status.bad_request, new_response('Unknown target.')) + return app.json(.bad_request, new_response('Unknown target.')) } // Store log in db @@ -105,7 +104,7 @@ fn (mut app App) v1_post_log() web.Result { os.mkdir_all(repo_logs_dir) or { app.lerror("Couldn't create dir '$repo_logs_dir'.") - return app.json(http.Status.internal_server_error, new_response('An error occured while processing the request.')) + return app.status(.internal_server_error) } } @@ -117,10 +116,10 @@ fn (mut app App) v1_post_log() web.Result { util.reader_to_file(mut app.reader, length.int(), full_path) or { app.lerror('An error occured while receiving logs: $err.msg()') - return app.json(http.Status.internal_server_error, new_response('Failed to upload logs.')) + return app.status(.internal_server_error) } } else { - return app.status(http.Status.length_required) + return app.status(.length_required) } return app.json(.ok, new_data_response(log_id)) diff --git a/src/server/api_targets.v b/src/server/api_targets.v index dc39d37..cd5cb0a 100644 --- a/src/server/api_targets.v +++ b/src/server/api_targets.v @@ -1,7 +1,6 @@ module server import web -import net.http import web.response { new_data_response, new_response } import db import models { Target, TargetArch, TargetFilter } @@ -10,7 +9,7 @@ import models { Target, TargetArch, TargetFilter } ['/api/v1/targets'; auth; get] fn (mut app App) v1_get_targets() web.Result { filter := models.from_params(app.query) or { - return app.json(http.Status.bad_request, new_response('Invalid query parameters.')) + return app.json(.bad_request, new_response('Invalid query parameters.')) } targets := app.db.get_targets(filter) @@ -20,7 +19,7 @@ fn (mut app App) v1_get_targets() web.Result { // v1_get_single_target returns the information for a single target. ['/api/v1/targets/:id'; auth; get] fn (mut app App) v1_get_single_target(id int) web.Result { - target := app.db.get_target(id) or { return app.not_found() } + target := app.db.get_target(id) or { return app.status(.not_found) } return app.json(.ok, new_data_response(target)) } @@ -37,12 +36,12 @@ fn (mut app App) v1_post_target() web.Result { } mut new_target := models.from_params(params) or { - return app.json(http.Status.bad_request, new_response(err.msg())) + return app.json(.bad_request, new_response(err.msg())) } // Ensure someone doesn't submit an invalid kind if new_target.kind !in models.valid_kinds { - return app.json(http.Status.bad_request, new_response('Invalid kind.')) + return app.json(.bad_request, new_response('Invalid kind.')) } id := app.db.add_target(new_target) @@ -61,7 +60,7 @@ fn (mut app App) v1_delete_target(id int) web.Result { app.db.delete_target(id) app.job_queue.invalidate(id) - return app.json(.ok, new_response('')) + return app.status(.ok) } // v1_patch_target updates a target's data with the given query params. diff --git a/src/web/web.v b/src/web/web.v index 1b40e7a..565baff 100644 --- a/src/web/web.v +++ b/src/web/web.v @@ -260,13 +260,6 @@ pub fn (mut ctx Context) redirect(url string) Result { return Result{} } -// not_found Send an not_found response -pub fn (mut ctx Context) not_found() Result { - ctx.send_custom_response(http_404) or {} - - return Result{} -} - interface DbInterface { db voidptr } From dbbe5c1e51cbd54483d2a4aee89a194960106ff5 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Thu, 15 Dec 2022 12:09:43 +0100 Subject: [PATCH 41/51] fix(agent): remove infinite loop and account for externally removed images --- src/agent/images.v | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/agent/images.v b/src/agent/images.v index 185192e..dd32656 100644 --- a/src/agent/images.v +++ b/src/agent/images.v @@ -73,7 +73,21 @@ fn (mut m ImageManager) clean_old_images() { // wasn't deleted. Therefore, we move the index over. If the function // returns true, the array's length has decreased by one so we don't // move the index. - dd.remove_image(m.images[image][i]) or { i += 1 } + dd.remove_image(m.images[image][i]) or { + // The image was removed by an external event + if err.code() == 404 { + m.images[image].delete(i) + } + // The image couldn't be removed, so we need to keep track of + // it + else { + i += 1 + } + + continue + } + + m.images[image].delete(i) } } } From a48358fd75101b82aa78c5b196324f08719b918f Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Thu, 15 Dec 2022 23:47:41 +0100 Subject: [PATCH 42/51] fix: don't run prepare step twice in builds --- src/build/build_script_git.sh | 4 ++-- src/build/build_script_git_branch.sh | 4 ++-- src/build/build_script_url.sh | 4 ++-- src/build/shell.v | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/build/build_script_git.sh b/src/build/build_script_git.sh index 73e0965..2644243 100644 --- a/src/build/build_script_git.sh +++ b/src/build/build_script_git.sh @@ -16,5 +16,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0 echo -e '+ [ "$(id -u)" == 0 ] && exit 0' [ "$(id -u)" == 0 ] && exit 0 -echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' -MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done +echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' +MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done diff --git a/src/build/build_script_git_branch.sh b/src/build/build_script_git_branch.sh index be1ff4f..9f36bdc 100644 --- a/src/build/build_script_git_branch.sh +++ b/src/build/build_script_git_branch.sh @@ -16,5 +16,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0 echo -e '+ [ "$(id -u)" == 0 ] && exit 0' [ "$(id -u)" == 0 ] && exit 0 -echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' -MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done +echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' +MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done diff --git a/src/build/build_script_url.sh b/src/build/build_script_url.sh index 3bc97e1..2d27de7 100644 --- a/src/build/build_script_url.sh +++ b/src/build/build_script_url.sh @@ -18,5 +18,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0 echo -e '+ [ "$(id -u)" == 0 ] && exit 0' [ "$(id -u)" == 0 ] && exit 0 -echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' -MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done +echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' +MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done diff --git a/src/build/shell.v b/src/build/shell.v index ac61e07..6aa2413 100644 --- a/src/build/shell.v +++ b/src/build/shell.v @@ -79,7 +79,7 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st } commands << [ - 'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $repo_url/publish; done', + 'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $repo_url/publish; done', ] return echo_commands(commands).join('\n') From 1ce7b9d5715d8d93deda284bd8b0dbd3d113dc26 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Fri, 16 Dec 2022 11:21:28 +0100 Subject: [PATCH 43/51] feat: add option to specify subdirectory in repo to use --- src/build/build.v | 4 +- .../{build_script_git.sh => scripts/git.sh} | 0 .../git_branch.sh} | 0 src/build/scripts/git_path.sh | 20 +++++++ src/build/scripts/git_path_spaces.sh | 20 +++++++ .../{build_script_url.sh => scripts/url.sh} | 0 src/build/shell.v | 7 ++- src/build/shell_test.v | 60 +++++++++++++------ src/client/targets.v | 1 + src/console/targets/targets.v | 11 ++++ src/db/db.v | 2 + src/db/migrations/005-repo-path/down.sql | 1 + src/db/migrations/005-repo-path/up.sql | 1 + src/models/targets.v | 21 ++++--- 14 files changed, 120 insertions(+), 28 deletions(-) rename src/build/{build_script_git.sh => scripts/git.sh} (100%) rename src/build/{build_script_git_branch.sh => scripts/git_branch.sh} (100%) create mode 100644 src/build/scripts/git_path.sh create mode 100644 src/build/scripts/git_path_spaces.sh rename src/build/{build_script_url.sh => scripts/url.sh} (100%) create mode 100644 src/db/migrations/005-repo-path/down.sql create mode 100644 src/db/migrations/005-repo-path/up.sql diff --git a/src/build/build.v b/src/build/build.v index 3d916bf..c6aa7f1 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -22,6 +22,7 @@ pub: kind string url string branch string + path string repo string base_image string force bool @@ -29,7 +30,7 @@ pub: // str return a single-line string representation of a build log pub fn (c BuildConfig) str() string { - return '{ target: $c.target_id, kind: $c.kind, url: $c.url, branch: $c.branch, repo: $c.repo, base_image: $c.base_image, force: $c.force }' + return '{ target: $c.target_id, kind: $c.kind, url: $c.url, branch: $c.branch, path: $c.path, repo: $c.repo, base_image: $c.base_image, force: $c.force }' } // create_build_image creates a builder image given some base image which can @@ -116,6 +117,7 @@ pub fn build_target(address string, api_key string, base_image_id string, target kind: target.kind url: target.url branch: target.branch + path: target.path repo: target.repo base_image: base_image_id force: force diff --git a/src/build/build_script_git.sh b/src/build/scripts/git.sh similarity index 100% rename from src/build/build_script_git.sh rename to src/build/scripts/git.sh diff --git a/src/build/build_script_git_branch.sh b/src/build/scripts/git_branch.sh similarity index 100% rename from src/build/build_script_git_branch.sh rename to src/build/scripts/git_branch.sh diff --git a/src/build/scripts/git_path.sh b/src/build/scripts/git_path.sh new file mode 100644 index 0000000..65b7fb9 --- /dev/null +++ b/src/build/scripts/git_path.sh @@ -0,0 +1,20 @@ +echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf' +echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf +echo -e '+ pacman -Syu --needed --noconfirm' +pacman -Syu --needed --noconfirm +echo -e '+ su builder' +su builder +echo -e '+ git clone --single-branch --depth 1 '\''https://examplerepo.com'\'' repo' +git clone --single-branch --depth 1 'https://examplerepo.com' repo +echo -e '+ cd '\''repo/example/path'\''' +cd 'repo/example/path' +echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm' +makepkg --nobuild --syncdeps --needed --noconfirm +echo -e '+ source PKGBUILD' +source PKGBUILD +echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0' +curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0 +echo -e '+ [ "$(id -u)" == 0 ] && exit 0' +[ "$(id -u)" == 0 ] && exit 0 +echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' +MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done diff --git a/src/build/scripts/git_path_spaces.sh b/src/build/scripts/git_path_spaces.sh new file mode 100644 index 0000000..b632b91 --- /dev/null +++ b/src/build/scripts/git_path_spaces.sh @@ -0,0 +1,20 @@ +echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf' +echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf +echo -e '+ pacman -Syu --needed --noconfirm' +pacman -Syu --needed --noconfirm +echo -e '+ su builder' +su builder +echo -e '+ git clone --single-branch --depth 1 '\''https://examplerepo.com'\'' repo' +git clone --single-branch --depth 1 'https://examplerepo.com' repo +echo -e '+ cd '\''repo/example/path with spaces'\''' +cd 'repo/example/path with spaces' +echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm' +makepkg --nobuild --syncdeps --needed --noconfirm +echo -e '+ source PKGBUILD' +source PKGBUILD +echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0' +curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0 +echo -e '+ [ "$(id -u)" == 0 ] && exit 0' +[ "$(id -u)" == 0 ] && exit 0 +echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' +MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done diff --git a/src/build/build_script_url.sh b/src/build/scripts/url.sh similarity index 100% rename from src/build/build_script_url.sh rename to src/build/scripts/url.sh diff --git a/src/build/shell.v b/src/build/shell.v index 6aa2413..c459a99 100644 --- a/src/build/shell.v +++ b/src/build/shell.v @@ -59,8 +59,13 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st } } + commands << if config.path != '' { + "cd 'repo/$config.path'" + } else { + 'cd repo' + } + commands << [ - 'cd repo', 'makepkg --nobuild --syncdeps --needed --noconfirm', 'source PKGBUILD', ] diff --git a/src/build/shell_test.v b/src/build/shell_test.v index 8bb22d9..e44c5ff 100644 --- a/src/build/shell_test.v +++ b/src/build/shell_test.v @@ -1,5 +1,46 @@ module build +fn test_create_build_script_git() { + config := BuildConfig{ + target_id: 1 + kind: 'git' + url: 'https://examplerepo.com' + repo: 'vieter' + base_image: 'not-used:latest' + } + + build_script := create_build_script('https://example.com', config, 'x86_64') + expected := $embed_file('scripts/git.sh') + + assert build_script == expected.to_string().trim_space() +} + +fn test_create_build_script_git_path() { + mut config := BuildConfig{ + target_id: 1 + kind: 'git' + url: 'https://examplerepo.com' + repo: 'vieter' + path: 'example/path' + base_image: 'not-used:latest' + } + + mut build_script := create_build_script('https://example.com', config, 'x86_64') + mut expected := $embed_file('scripts/git_path.sh') + + assert build_script == expected.to_string().trim_space() + + config = BuildConfig{ + ...config + path: 'example/path with spaces' + } + + build_script = create_build_script('https://example.com', config, 'x86_64') + expected = $embed_file('scripts/git_path_spaces.sh') + + assert build_script == expected.to_string().trim_space() +} + fn test_create_build_script_git_branch() { config := BuildConfig{ target_id: 1 @@ -11,22 +52,7 @@ fn test_create_build_script_git_branch() { } build_script := create_build_script('https://example.com', config, 'x86_64') - expected := $embed_file('build_script_git_branch.sh') - - assert build_script == expected.to_string().trim_space() -} - -fn test_create_build_script_git() { - config := BuildConfig{ - target_id: 1 - kind: 'git' - url: 'https://examplerepo.com' - repo: 'vieter' - base_image: 'not-used:latest' - } - - build_script := create_build_script('https://example.com', config, 'x86_64') - expected := $embed_file('build_script_git.sh') + expected := $embed_file('scripts/git_branch.sh') assert build_script == expected.to_string().trim_space() } @@ -41,7 +67,7 @@ fn test_create_build_script_url() { } build_script := create_build_script('https://example.com', config, 'x86_64') - expected := $embed_file('build_script_url.sh') + expected := $embed_file('scripts/url.sh') assert build_script == expected.to_string().trim_space() } diff --git a/src/client/targets.v b/src/client/targets.v index 40bfdae..da6a9e4 100644 --- a/src/client/targets.v +++ b/src/client/targets.v @@ -44,6 +44,7 @@ pub struct NewTarget { url string branch string repo string + path string arch []string } diff --git a/src/console/targets/targets.v b/src/console/targets/targets.v index b277410..a134926 100644 --- a/src/console/targets/targets.v +++ b/src/console/targets/targets.v @@ -82,6 +82,11 @@ pub fn cmd() cli.Command { description: "Which branch to clone; only applies to kind 'git'." flag: cli.FlagType.string }, + cli.Flag{ + name: 'path' + description: 'Subdirectory inside Git repository to use.' + flag: cli.FlagType.string + }, ] execute: fn (cmd cli.Command) ! { config_file := cmd.flags.get_string('config-file')! @@ -92,6 +97,7 @@ pub fn cmd() cli.Command { url: cmd.args[0] repo: cmd.args[1] branch: cmd.flags.get_string('branch') or { '' } + path: cmd.flags.get_string('path') or { '' } } raw := cmd.flags.get_bool('raw')! @@ -159,6 +165,11 @@ pub fn cmd() cli.Command { description: 'Kind of target.' flag: cli.FlagType.string }, + cli.Flag{ + name: 'path' + description: 'Subdirectory inside Git repository to use.' + flag: cli.FlagType.string + }, ] execute: fn (cmd cli.Command) ! { config_file := cmd.flags.get_string('config-file')! diff --git a/src/db/db.v b/src/db/db.v index 1a0160e..98ee000 100644 --- a/src/db/db.v +++ b/src/db/db.v @@ -18,12 +18,14 @@ const ( $embed_file('migrations/002-rename-to-targets/up.sql'), $embed_file('migrations/003-target-url-type/up.sql'), $embed_file('migrations/004-nullable-branch/up.sql'), + $embed_file('migrations/005-repo-path/up.sql'), ] migrations_down = [ $embed_file('migrations/001-initial/down.sql'), $embed_file('migrations/002-rename-to-targets/down.sql'), $embed_file('migrations/003-target-url-type/down.sql'), $embed_file('migrations/004-nullable-branch/down.sql'), + $embed_file('migrations/005-repo-path/down.sql'), ] ) diff --git a/src/db/migrations/005-repo-path/down.sql b/src/db/migrations/005-repo-path/down.sql new file mode 100644 index 0000000..8a6f021 --- /dev/null +++ b/src/db/migrations/005-repo-path/down.sql @@ -0,0 +1 @@ +ALTER TABLE Target DROP COLUMN path; diff --git a/src/db/migrations/005-repo-path/up.sql b/src/db/migrations/005-repo-path/up.sql new file mode 100644 index 0000000..f7e5c29 --- /dev/null +++ b/src/db/migrations/005-repo-path/up.sql @@ -0,0 +1 @@ +ALTER TABLE Target ADD COLUMN path TEXT; diff --git a/src/models/targets.v b/src/models/targets.v index c8aa535..cb60650 100644 --- a/src/models/targets.v +++ b/src/models/targets.v @@ -28,21 +28,24 @@ pub mut: repo string [nonull] // Cron schedule describing how frequently to build the repo. schedule string + // Subdirectory in the Git repository to cd into + path string // On which architectures the package is allowed to be built. In reality, - // this controls which builders will periodically build the image. + // this controls which agents will build this package when scheduled. arch []TargetArch [fkey: 'target_id'] } // str returns a string representation. -pub fn (gr &Target) str() string { +pub fn (t &Target) str() string { mut parts := [ - 'id: $gr.id', - 'kind: $gr.kind', - 'url: $gr.url', - 'branch: $gr.branch', - 'repo: $gr.repo', - 'schedule: $gr.schedule', - 'arch: ${gr.arch.map(it.value).join(', ')}', + 'id: $t.id', + 'kind: $t.kind', + 'url: $t.url', + 'branch: $t.branch', + 'path: $t.path', + 'repo: $t.repo', + 'schedule: $t.schedule', + 'arch: ${t.arch.map(it.value).join(', ')}', ] str := parts.join('\n') From 489931eaa809bf4b0997c86f2da2dd38f86d3f0f Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Fri, 16 Dec 2022 11:37:51 +0100 Subject: [PATCH 44/51] fix: don't buffer stdout even if not a terminal --- Dockerfile | 1 + src/main.v | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/Dockerfile b/Dockerfile index 210ae66..a27ad44 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,6 +23,7 @@ RUN if [ -n "${CI_COMMIT_SHA}" ]; then \ "https://s3.rustybever.be/vieter/commits/${CI_COMMIT_SHA}/vieter-$(echo "${TARGETPLATFORM}" | sed 's:/:-:g')" && \ chmod +x vieter ; \ else \ + cd src && v install && cd .. && \ LDFLAGS='-lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3 -static' make prod && \ mv pvieter vieter ; \ fi diff --git a/src/main.v b/src/main.v index 34387bf..fe0364f 100644 --- a/src/main.v +++ b/src/main.v @@ -12,6 +12,11 @@ import cron import agent fn main() { + // Stop buffering output so logs always show up immediately + unsafe { + C.setbuf(C.stdout, 0) + } + mut app := cli.Command{ name: 'vieter' description: 'Vieter is a lightweight implementation of an Arch repository server.' From 0604de26c48c4fd8e4c3285ce4d6008a6e1d64ca Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Fri, 16 Dec 2022 14:33:16 +0100 Subject: [PATCH 45/51] feat(agent): ensure images exist when starting build --- src/agent/daemon.v | 19 +++++++++++++++---- src/agent/images.v | 42 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 49 insertions(+), 12 deletions(-) diff --git a/src/agent/daemon.v b/src/agent/daemon.v index 0647733..8fa3816 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -80,13 +80,24 @@ pub fn (mut d AgentDaemon) run() { last_poll_time = time.now() for config in new_configs { - // TODO handle this better than to just skip the config // Make sure a recent build base image is available for // building the config - d.images.refresh_image(config.base_image) or { - d.lerror(err.msg()) - continue + if !d.images.up_to_date(config.base_image) { + d.linfo('Building builder image from base image $config.base_image') + + // TODO handle this better than to just skip the config + d.images.refresh_image(config.base_image) or { + d.lerror(err.msg()) + continue + } } + + // It's technically still possible that the build image is + // removed in the very short period between building the + // builder image and starting a build container with it. If + // this happens, faith really just didn't want you to do this + // build. + d.start_build(config) } diff --git a/src/agent/images.v b/src/agent/images.v index dd32656..23b741d 100644 --- a/src/agent/images.v +++ b/src/agent/images.v @@ -33,16 +33,42 @@ pub fn (m &ImageManager) get(base_image string) string { return m.images[base_image].last() } -// refresh_image builds a new builder image from the given base image if the -// previous builder image is too old or non-existent. This function will do -// nothing if these conditions aren't met, so it's safe to call it every time -// you want to ensure an image is up to date. -fn (mut m ImageManager) refresh_image(base_image string) ! { - if base_image in m.timestamps - && m.timestamps[base_image].add_seconds(m.max_image_age) > time.now() { - return +// up_to_date returns whether the last known builder image is exists and is up +// to date. If this function returns true, the last builder image may be used +// to perform a build. +pub fn (mut m ImageManager) up_to_date(base_image string) bool { + if base_image !in m.timestamps + || m.timestamps[base_image].add_seconds(m.max_image_age) <= time.now() { + return false } + // It's possible the image has been removed by some external event, so we + // check whether it actually exists as well. + mut dd := docker.new_conn() or { return false } + + defer { + dd.close() or {} + } + + dd.image_inspect(m.images[base_image].last()) or { + // Image doesn't exist, so we stop tracking it + if err.code() == 404 { + m.images[base_image].delete_last() + m.timestamps.delete(base_image) + } + + // If the inspect fails, it's either because the image doesn't exist or + // because of some other error. Either we can't know *for certain* that + // the image exists, so we return false. + return false + } + + return true +} + +// refresh_image builds a new builder image from the given base image. This +// function should only be called if `up_to_date` return false. +fn (mut m ImageManager) refresh_image(base_image string) ! { // TODO use better image tags for built images new_image := build.create_build_image(base_image) or { return error('Failed to build builder image from base image $base_image') From af4c9e1d004ac2d6277f1333d95963eda1cf3f02 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Fri, 16 Dec 2022 16:35:40 +0100 Subject: [PATCH 46/51] chore: updated changelog --- CHANGELOG.md | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c55e16b..54d833a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,24 +7,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased](https://git.rustybever.be/vieter-v/vieter/src/branch/dev) +### Added + +* Allow specifying subdirectory inside Git repository +* Added option to deploy using agent-server architecture instead of cron daemon +* Allow scheduling builds on the server from the CLI tool instead of building + them locally +* Allow force-building packages, meaning the build won't check if the + repository is already up to date + ### Changed * Migrated codebase to V 0.3.2 * Cron expression parser now uses bitfields instead of bool arrays -* Added option to deploy using agent-server architecture instead of cron daemon -* Allow force-building packages, meaning the build won't check if the - repository is already up to date -* Allow scheduling builds on the server from the CLI tool instead of building - them locally ### Fixed * Arch value for target is now properly set if not provided -* All API endpoints now return proper JSON on success - * CLI no longer exits with non-zero status code when removing/patching - target * Allow NULL values for branch in database * Endpoint for adding targets now returns the correct id +* CLI now correctly errors and doesn't error when sending requests +* Fixed possible infinite loop when removing old build images +* Check whether build image still exists before starting build +* Don't run makepkg `prepare()` function twice +* Don't buffer stdout in Docker containers ## [0.4.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.4.0) From fe3e6e2babce6b41973b10599d9c38c5ba09dcc1 Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Fri, 16 Dec 2022 18:18:25 +0100 Subject: [PATCH 47/51] chore: some final revisions before pr merge --- src/agent/images.v | 12 ++++++------ src/client/client.v | 12 ++++++------ src/console/targets/targets.v | 21 +++++++++------------ 3 files changed, 21 insertions(+), 24 deletions(-) diff --git a/src/agent/images.v b/src/agent/images.v index 23b741d..1fec567 100644 --- a/src/agent/images.v +++ b/src/agent/images.v @@ -33,9 +33,9 @@ pub fn (m &ImageManager) get(base_image string) string { return m.images[base_image].last() } -// up_to_date returns whether the last known builder image is exists and is up -// to date. If this function returns true, the last builder image may be used -// to perform a build. +// up_to_date returns true if the last known builder image exists and is up to +// date. If this function returns true, the last builder image may be used to +// perform a build. pub fn (mut m ImageManager) up_to_date(base_image string) bool { if base_image !in m.timestamps || m.timestamps[base_image].add_seconds(m.max_image_age) <= time.now() { @@ -58,8 +58,8 @@ pub fn (mut m ImageManager) up_to_date(base_image string) bool { } // If the inspect fails, it's either because the image doesn't exist or - // because of some other error. Either we can't know *for certain* that - // the image exists, so we return false. + // because of some other error. Either way, we can't know *for certain* + // that the image exists, so we return false. return false } @@ -67,7 +67,7 @@ pub fn (mut m ImageManager) up_to_date(base_image string) bool { } // refresh_image builds a new builder image from the given base image. This -// function should only be called if `up_to_date` return false. +// function should only be called if `up_to_date` returned false. fn (mut m ImageManager) refresh_image(base_image string) ! { // TODO use better image tags for built images new_image := build.create_build_image(base_image) or { diff --git a/src/client/client.v b/src/client/client.v index 5f24197..cce4e70 100644 --- a/src/client/client.v +++ b/src/client/client.v @@ -57,12 +57,7 @@ fn (c &Client) send_request(method Method, url string, params map[string]stri // output as a Response object. fn (c &Client) send_request_with_body(method Method, url string, params map[string]string, body string) !Response { res := c.send_request_raw(method, url, params, body)! - status := http.status_from_int(res.status_code) - - // Just return an empty successful response - if status.is_success() && res.body == '' { - return new_data_response(T{}) - } + status := res.status() // Non-successful requests are expected to return either an empty body or // Response @@ -77,6 +72,11 @@ fn (c &Client) send_request_with_body(method Method, url string, params map[s return error('Status $res.status_code ($status.str()): $data.message') } + // Just return an empty successful response + if res.body == '' { + return new_data_response(T{}) + } + data := json.decode(Response, res.body)! return data diff --git a/src/console/targets/targets.v b/src/console/targets/targets.v index a134926..94deebd 100644 --- a/src/console/targets/targets.v +++ b/src/console/targets/targets.v @@ -13,7 +13,7 @@ struct Config { base_image string = 'archlinux:base-devel' } -// cmd returns the cli submodule that handles the repos API interaction +// cmd returns the cli submodule that handles the targets API interaction pub fn cmd() cli.Command { return cli.Command{ name: 'targets' @@ -236,14 +236,11 @@ pub fn cmd() cli.Command { } } -// get_repo_by_prefix tries to find the repo with the given prefix in its -// ID. If multiple or none are found, an error is raised. - // list prints out a list of all repositories. fn list(conf Config, filter TargetFilter, raw bool) ! { c := client.new(conf.address, conf.api_key) - repos := c.get_targets(filter)! - data := repos.map([it.id.str(), it.kind, it.url, it.repo]) + targets := c.get_targets(filter)! + data := targets.map([it.id.str(), it.kind, it.url, it.repo]) if raw { println(console.tabbed_table(data)) @@ -252,7 +249,7 @@ fn list(conf Config, filter TargetFilter, raw bool) ! { } } -// add adds a new repository to the server's list. +// add adds a new target to the server's list. fn add(conf Config, t &NewTarget, raw bool) ! { c := client.new(conf.address, conf.api_key) target_id := c.add_target(t)! @@ -264,13 +261,13 @@ fn add(conf Config, t &NewTarget, raw bool) ! { } } -// remove removes a repository from the server's list. +// remove removes a target from the server's list. fn remove(conf Config, id string) ! { c := client.new(conf.address, conf.api_key) c.remove_target(id.int())! } -// patch patches a given repository with the provided params. +// patch patches a given target with the provided params. fn patch(conf Config, id string, params map[string]string) ! { // We check the cron expression first because it's useless to send an // invalid one to the server. @@ -284,9 +281,9 @@ fn patch(conf Config, id string, params map[string]string) ! { c.patch_target(id.int(), params)! } -// info shows detailed information for a given repo. +// info shows detailed information for a given target. fn info(conf Config, id string) ! { c := client.new(conf.address, conf.api_key) - repo := c.get_target(id.int())! - println(repo) + target := c.get_target(id.int())! + println(target) } From 402fef475a8e036c9db2bf84cdfd7556cb6f2093 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Fri, 16 Dec 2022 20:38:26 +0100 Subject: [PATCH 48/51] fix: actually use path setting when building --- src/agent/daemon.v | 3 ++- src/build/build.v | 30 ++---------------------------- src/build/queue.v | 13 ++----------- src/build/shell.v | 2 ++ src/build/shell_test.v | 2 ++ src/client/jobs.v | 2 +- src/models/builds.v | 18 ++++++++++++++++++ src/models/targets.v | 15 +++++++++++++++ 8 files changed, 44 insertions(+), 41 deletions(-) create mode 100644 src/models/builds.v diff --git a/src/agent/daemon.v b/src/agent/daemon.v index 8fa3816..c55d0db 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -2,7 +2,8 @@ module agent import log import sync.stdatomic -import build { BuildConfig } +import build +import models { BuildConfig } import client import time import os diff --git a/src/build/build.v b/src/build/build.v index c6aa7f1..712c93b 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -6,7 +6,7 @@ import time import os import strings import util -import models { Target } +import models { BuildConfig, Target } const ( container_build_dir = '/build' @@ -16,23 +16,6 @@ const ( '/usr/local/bin', '/usr/bin/site_perl', '/usr/bin/vendor_perl', '/usr/bin/core_perl'] ) -pub struct BuildConfig { -pub: - target_id int - kind string - url string - branch string - path string - repo string - base_image string - force bool -} - -// str return a single-line string representation of a build log -pub fn (c BuildConfig) str() string { - return '{ target: $c.target_id, kind: $c.kind, url: $c.url, branch: $c.branch, path: $c.path, repo: $c.repo, base_image: $c.base_image, force: $c.force }' -} - // create_build_image creates a builder image given some base image which can // then be used to build & package Arch images. It mostly just updates the // system, install some necessary packages & creates a non-root user to run @@ -112,16 +95,7 @@ pub: // build_target builds the given target. Internally it calls `build_config`. pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool) !BuildResult { - config := BuildConfig{ - target_id: target.id - kind: target.kind - url: target.url - branch: target.branch - path: target.path - repo: target.repo - base_image: base_image_id - force: force - } + config := target.as_build_config(base_image_id, force) return build_config(address, api_key, config) } diff --git a/src/build/queue.v b/src/build/queue.v index 7902173..e74529c 100644 --- a/src/build/queue.v +++ b/src/build/queue.v @@ -1,6 +1,6 @@ module build -import models { Target } +import models { BuildConfig, Target } import cron.expression { CronExpression, parse_expression } import time import datatypes { MinHeap } @@ -80,16 +80,7 @@ pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! { mut job := BuildJob{ created: time.now() single: input.single - config: BuildConfig{ - target_id: input.target.id - kind: input.target.kind - url: input.target.url - branch: input.target.branch - repo: input.target.repo - // TODO make this configurable - base_image: q.default_base_image - force: input.force - } + config: input.target.as_build_config(q.default_base_image, input.force) } if !input.now { diff --git a/src/build/shell.v b/src/build/shell.v index c459a99..16f93b5 100644 --- a/src/build/shell.v +++ b/src/build/shell.v @@ -1,5 +1,7 @@ module build +import models { BuildConfig } + // escape_shell_string escapes any characters that could be interpreted // incorrectly by a shell. The resulting value should be safe to use inside an // echo statement. diff --git a/src/build/shell_test.v b/src/build/shell_test.v index e44c5ff..e23d964 100644 --- a/src/build/shell_test.v +++ b/src/build/shell_test.v @@ -1,5 +1,7 @@ module build +import models { BuildConfig } + fn test_create_build_script_git() { config := BuildConfig{ target_id: 1 diff --git a/src/client/jobs.v b/src/client/jobs.v index a545499..784639e 100644 --- a/src/client/jobs.v +++ b/src/client/jobs.v @@ -1,6 +1,6 @@ module client -import build { BuildConfig } +import models { BuildConfig } // poll_jobs requests a list of new build jobs from the server. pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig { diff --git a/src/models/builds.v b/src/models/builds.v new file mode 100644 index 0000000..926a53c --- /dev/null +++ b/src/models/builds.v @@ -0,0 +1,18 @@ +module models + +pub struct BuildConfig { +pub: + target_id int + kind string + url string + branch string + path string + repo string + base_image string + force bool +} + +// str return a single-line string representation of a build log +pub fn (c BuildConfig) str() string { + return '{ target: $c.target_id, kind: $c.kind, url: $c.url, branch: $c.branch, path: $c.path, repo: $c.repo, base_image: $c.base_image, force: $c.force }' +} diff --git a/src/models/targets.v b/src/models/targets.v index cb60650..af3cb0d 100644 --- a/src/models/targets.v +++ b/src/models/targets.v @@ -52,6 +52,21 @@ pub fn (t &Target) str() string { return str } +// as_build_config converts a Target into a BuildConfig, given some extra +// needed information. +pub fn (t &Target) as_build_config(base_image string, force bool) BuildConfig { + return BuildConfig{ + target_id: t.id + kind: t.kind + url: t.url + branch: t.branch + path: t.path + repo: t.repo + base_image: base_image + force: force + } +} + [params] pub struct TargetFilter { pub mut: From 1797c0f5606e630d6dc14aeadf5de0b49d10f4a4 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Fri, 16 Dec 2022 21:47:02 +0100 Subject: [PATCH 49/51] fix(agent): correctly calculate sleep time --- src/agent/daemon.v | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/agent/daemon.v b/src/agent/daemon.v index c55d0db..b5a6968 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -71,6 +71,8 @@ pub fn (mut d AgentDaemon) run() { // clustered together (especially when mostly using the global cron // schedule), so there's a much higher chance jobs are available. if finished > 0 || time.now() >= last_poll_time.add_seconds(d.conf.polling_frequency) { + d.ldebug('Polling for new jobs') + new_configs := d.client.poll_jobs(d.conf.arch, finished + empty) or { d.lerror('Failed to poll jobs: $err.msg()') @@ -78,6 +80,9 @@ pub fn (mut d AgentDaemon) run() { time.sleep(5 * time.second) continue } + + d.ldebug('Received $new_configs.len jobs') + last_poll_time = time.now() for config in new_configs { @@ -105,16 +110,19 @@ pub fn (mut d AgentDaemon) run() { // No new jobs were scheduled and the agent isn't doing anything, // so we just wait until the next polling period. if new_configs.len == 0 && finished + empty == d.conf.max_concurrent_builds { - sleep_time = time.now() - last_poll_time + sleep_time = last_poll_time.add_seconds(d.conf.polling_frequency) - time.now() } } // The agent is not doing anything, so we just wait until the next poll // time else if finished + empty == d.conf.max_concurrent_builds { - sleep_time = time.now() - last_poll_time + sleep_time = last_poll_time.add_seconds(d.conf.polling_frequency) - time.now() } - time.sleep(sleep_time) + if sleep_time > 0 { + d.ldebug('Sleeping for $sleep_time') + time.sleep(sleep_time) + } } } From b067f9c589abcd1885b9bed5c8551c56374dfe11 Mon Sep 17 00:00:00 2001 From: Jef Roosens Date: Fri, 16 Dec 2022 22:06:26 +0100 Subject: [PATCH 50/51] refactor: streamline agent loop code --- src/agent/daemon.v | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/agent/daemon.v b/src/agent/daemon.v index b5a6968..62f36c2 100644 --- a/src/agent/daemon.v +++ b/src/agent/daemon.v @@ -46,16 +46,22 @@ pub fn (mut d AgentDaemon) run() { // This is just so that the very first time the loop is ran, the jobs are // always polled mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency) - mut sleep_time := 1 * time.second - mut finished, mut empty := 0, 0 + mut sleep_time := 0 * time.second + mut finished, mut empty, mut running := 0, 0, 0 for { + if sleep_time > 0 { + d.ldebug('Sleeping for $sleep_time') + time.sleep(sleep_time) + } + finished, empty = d.update_atomics() + running = d.conf.max_concurrent_builds - finished - empty // No new finished builds and no free slots, so there's nothing to be // done if finished + empty == 0 { - time.sleep(1 * time.second) + sleep_time = 1 * time.second continue } @@ -77,7 +83,7 @@ pub fn (mut d AgentDaemon) run() { d.lerror('Failed to poll jobs: $err.msg()') // TODO pick a better delay here - time.sleep(5 * time.second) + sleep_time = 5 * time.second continue } @@ -105,23 +111,16 @@ pub fn (mut d AgentDaemon) run() { // build. d.start_build(config) - } - - // No new jobs were scheduled and the agent isn't doing anything, - // so we just wait until the next polling period. - if new_configs.len == 0 && finished + empty == d.conf.max_concurrent_builds { - sleep_time = last_poll_time.add_seconds(d.conf.polling_frequency) - time.now() + running++ } } + // The agent is not doing anything, so we just wait until the next poll // time - else if finished + empty == d.conf.max_concurrent_builds { + if running == 0 { sleep_time = last_poll_time.add_seconds(d.conf.polling_frequency) - time.now() - } - - if sleep_time > 0 { - d.ldebug('Sleeping for $sleep_time') - time.sleep(sleep_time) + } else { + sleep_time = 1 * time.second } } } From f9bb4b81deef489ac1254de85c20f6a703c5f33c Mon Sep 17 00:00:00 2001 From: Chewing_Bever Date: Sat, 17 Dec 2022 14:00:51 +0100 Subject: [PATCH 51/51] chore: bump versions --- CHANGELOG.md | 2 ++ PKGBUILD | 2 +- src/main.v | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 54d833a..27d9096 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased](https://git.rustybever.be/vieter-v/vieter/src/branch/dev) +## [0.5.0-rc.1](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0-rc.1) + ### Added * Allow specifying subdirectory inside Git repository diff --git a/PKGBUILD b/PKGBUILD index b600ba0..94db654 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -3,7 +3,7 @@ pkgbase='vieter' pkgname='vieter' -pkgver='0.4.0' +pkgver='0.5.0-rc.1' pkgrel=1 pkgdesc="Lightweight Arch repository server & package build system" depends=('glibc' 'openssl' 'libarchive' 'sqlite') diff --git a/src/main.v b/src/main.v index fe0364f..1053c2f 100644 --- a/src/main.v +++ b/src/main.v @@ -20,7 +20,7 @@ fn main() { mut app := cli.Command{ name: 'vieter' description: 'Vieter is a lightweight implementation of an Arch repository server.' - version: '0.4.0' + version: '0.5.0-rc.1' flags: [ cli.Flag{ flag: cli.FlagType.string