Merge pull request 'Release 0.5.0: release candidate 1' (#316) from release-0.5.0-rc.1 into main
ci/woodpecker/tag/lint Pipeline was successful Details
ci/woodpecker/tag/docs Pipeline was successful Details
ci/woodpecker/tag/arch-rel Pipeline failed Details
ci/woodpecker/tag/build Pipeline was successful Details
ci/woodpecker/tag/man Pipeline was successful Details
ci/woodpecker/tag/test Pipeline was successful Details
ci/woodpecker/tag/gitea Pipeline was successful Details
ci/woodpecker/tag/docker Pipeline was successful Details
ci/woodpecker/push/docs Pipeline was successful Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/arch Pipeline was successful Details
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/man Pipeline was successful Details
ci/woodpecker/push/test Pipeline was successful Details
ci/woodpecker/push/docker Pipeline was successful Details
ci/woodpecker/push/deploy Pipeline was successful Details

Reviewed-on: #316
pull/330/head 0.5.0-rc.1
Jef Roosens 2022-12-17 14:13:05 +01:00
commit 8b72a9fc0f
79 changed files with 1793 additions and 724 deletions

1
.gitignore vendored
View File

@ -26,6 +26,7 @@ gdb.txt
# Generated docs # Generated docs
_docs/ _docs/
docs/resources/_gen/
/man/ /man/
# VLS logs # VLS logs

View File

@ -9,7 +9,7 @@ skip_clone: true
pipeline: pipeline:
build: build:
image: 'menci/archlinuxarm:base-devel' image: 'git.rustybever.be/vieter-v/vieter-builder'
commands: commands:
# Add the vieter repository so we can use the compiler # Add the vieter repository so we can use the compiler
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf - echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf

View File

@ -9,7 +9,7 @@ skip_clone: true
pipeline: pipeline:
build: build:
image: 'menci/archlinuxarm:base-devel' image: 'git.rustybever.be/vieter-v/vieter-builder'
commands: commands:
# Add the vieter repository so we can use the compiler # Add the vieter repository so we can use the compiler
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf - echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'chewingbever/vlang:0.3' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
matrix: matrix:
PLATFORM: PLATFORM:

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'chewingbever/vlang:0.3' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
platform: 'linux/amd64' platform: 'linux/amd64'
branches: branches:

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'chewingbever/vlang:0.3' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
platform: 'linux/amd64' platform: 'linux/amd64'
branches: [ 'main' ] branches: [ 'main' ]

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'chewingbever/vlang:0.3' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
# These checks already get performed on the feature branches # These checks already get performed on the feature branches
branches: branches:
@ -7,10 +7,21 @@ branches:
platform: 'linux/amd64' platform: 'linux/amd64'
pipeline: pipeline:
# vfmt seems to get confused if these aren't present
install-modules:
image: *vlang_image
pull: true
commands:
- export VMODULES=$PWD/.vmodules
- 'cd src && v install'
when:
event: [pull_request]
lint: lint:
image: *vlang_image image: *vlang_image
pull: true pull: true
commands: commands:
- export VMODULES=$PWD/.vmodules
- make lint - make lint
when: when:
event: [ pull_request ] event: [pull_request]

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'chewingbever/vlang:0.3' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
platform: 'linux/amd64' platform: 'linux/amd64'
branches: branches:

View File

@ -1,5 +1,5 @@
variables: variables:
- &vlang_image 'chewingbever/vlang:0.3' - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2'
matrix: matrix:
PLATFORM: PLATFORM:

View File

@ -7,6 +7,33 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/vieter-v/vieter/src/branch/dev) ## [Unreleased](https://git.rustybever.be/vieter-v/vieter/src/branch/dev)
## [0.5.0-rc.1](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0-rc.1)
### Added
* Allow specifying subdirectory inside Git repository
* Added option to deploy using agent-server architecture instead of cron daemon
* Allow scheduling builds on the server from the CLI tool instead of building
them locally
* Allow force-building packages, meaning the build won't check if the
repository is already up to date
### Changed
* Migrated codebase to V 0.3.2
* Cron expression parser now uses bitfields instead of bool arrays
### Fixed
* Arch value for target is now properly set if not provided
* Allow NULL values for branch in database
* Endpoint for adding targets now returns the correct id
* CLI now correctly errors and doesn't error when sending requests
* Fixed possible infinite loop when removing old build images
* Check whether build image still exists before starting build
* Don't run makepkg `prepare()` function twice
* Don't buffer stdout in Docker containers
## [0.4.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.4.0) ## [0.4.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.4.0)
### Added ### Added

View File

@ -1,4 +1,4 @@
FROM chewingbever/vlang:0.3 AS builder FROM git.rustybever.be/chewing_bever/vlang:0.3.2 AS builder
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG CI_COMMIT_SHA ARG CI_COMMIT_SHA
@ -23,6 +23,7 @@ RUN if [ -n "${CI_COMMIT_SHA}" ]; then \
"https://s3.rustybever.be/vieter/commits/${CI_COMMIT_SHA}/vieter-$(echo "${TARGETPLATFORM}" | sed 's:/:-:g')" && \ "https://s3.rustybever.be/vieter/commits/${CI_COMMIT_SHA}/vieter-$(echo "${TARGETPLATFORM}" | sed 's:/:-:g')" && \
chmod +x vieter ; \ chmod +x vieter ; \
else \ else \
cd src && v install && cd .. && \
LDFLAGS='-lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3 -static' make prod && \ LDFLAGS='-lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3 -static' make prod && \
mv pvieter vieter ; \ mv pvieter vieter ; \
fi fi

View File

@ -3,7 +3,7 @@ SRC_DIR := src
SOURCES != find '$(SRC_DIR)' -iname '*.v' SOURCES != find '$(SRC_DIR)' -iname '*.v'
V_PATH ?= v V_PATH ?= v
V := $(V_PATH) -showcc -gc boehm V := $(V_PATH) -showcc -gc boehm -W -d use_openssl
all: vieter all: vieter
@ -92,9 +92,9 @@ clean:
.PHONY: autofree .PHONY: autofree
autofree: afvieter autofree: afvieter
afvieter: $(SOURCES) afvieter: $(SOURCES)
$(V_PATH) -showcc -autofree -o afvieter $(SRC_DIR) $(V) -showcc -autofree -o afvieter $(SRC_DIR)
.PHONY: skip-unused .PHONY: skip-unused
skip-unused: suvieter skip-unused: suvieter
suvieter: $(SOURCES) suvieter: $(SOURCES)
$(V_PATH) -showcc -skip-unused -o suvieter $(SRC_DIR) $(V) -skip-unused -o suvieter $(SRC_DIR)

View File

@ -3,7 +3,7 @@
pkgbase='vieter' pkgbase='vieter'
pkgname='vieter' pkgname='vieter'
pkgver='0.4.0' pkgver='0.5.0-rc.1'
pkgrel=1 pkgrel=1
pkgdesc="Lightweight Arch repository server & package build system" pkgdesc="Lightweight Arch repository server & package build system"
depends=('glibc' 'openssl' 'libarchive' 'sqlite') depends=('glibc' 'openssl' 'libarchive' 'sqlite')

View File

@ -21,7 +21,8 @@ quicker.
I chose [V](https://vlang.io/) as I've been very intrigued by this language for I chose [V](https://vlang.io/) as I've been very intrigued by this language for
a while now. I wanted a fast language that I could code while relaxing, without a while now. I wanted a fast language that I could code while relaxing, without
having to exert too much mental effort & V seemed like the right choice for having to exert too much mental effort & V seemed like the right choice for
that. that. Sadly, this didn't quite turn out the way I expected, but I'm sticking
with it anyways ;p
## Features ## Features
@ -49,7 +50,7 @@ update`.
I used to maintain a mirror that tracked the latest master, but nowadays, I I used to maintain a mirror that tracked the latest master, but nowadays, I
maintain a Docker image containing the specific compiler version that Vieter maintain a Docker image containing the specific compiler version that Vieter
builds with. Currently, this is V 0.3. builds with. Currently, this is V 0.3.2.
## Contributing ## Contributing

View File

@ -17,7 +17,7 @@ If a variable is both present in the config file & as an environment variable,
the value in the environment variable is used. the value in the environment variable is used.
{{< hint info >}} {{< hint info >}}
**Note** **Note**
All environment variables can also be provided from a file by appending them All environment variables can also be provided from a file by appending them
with `_FILE`. This for example allows you to provide the API key from a Docker with `_FILE`. This for example allows you to provide the API key from a Docker
secrets file. secrets file.
@ -97,3 +97,25 @@ configuration variable required for each command.
build`. build`.
* Default: `archlinux:base-devel` * Default: `archlinux:base-devel`
### `vieter agent`
* `log_level`: log verbosity level. Value should be one of `FATAL`, `ERROR`,
`WARN`, `INFO` or `DEBUG`.
* Default: `WARN`
* `address`: *public* URL of the Vieter repository server to build for. From
this server jobs are retrieved. All built packages are published to this
server.
* `api_key`: API key of the above server.
* `data_dir`: directory to store log file in.
* `max_concurrent_builds`: how many builds to run at the same time.
* Default: `1`
* `polling_frequency`: how often (in seconds) to poll the server for new
builds. Note that the agent might poll more frequently when it's actively
processing builds.
* `image_rebuild_frequency`: Vieter periodically builds images that are then
used as a basis for running build containers. This is to prevent each build
from downloading an entire repository worth of dependencies. This setting
defines how frequently (in minutes) to rebuild these images.
* Default: `1440` (every 24 hours)
* `arch`: architecture for which this agent should pull down builds (e.g.
`x86_64`)

View File

@ -21,7 +21,7 @@ branch. This branch will be the most up to date, but does not give any
guarantees about stability, so beware! guarantees about stability, so beware!
Thanks to the single-binary design of Vieter, this image can be used both for Thanks to the single-binary design of Vieter, this image can be used both for
the repository server & the cron daemon. the repository server, the cron daemon and the agent.
Below is an example compose file to set up both the repository server & the Below is an example compose file to set up both the repository server & the
cron daemon: cron daemon:
@ -76,7 +76,7 @@ architectures will build on both.
## Binary ## Binary
On the On the
[releases](https://git.rustybever.be/vieter/vieter/releases) [releases](https://git.rustybever.be/vieter-v/vieter/releases)
page, you can find statically compiled binaries for all page, you can find statically compiled binaries for all
released versions. This is the same binary as used inside released versions. This is the same binary as used inside
the Docker images. the Docker images.
@ -106,5 +106,5 @@ guarantee that a compiler update won't temporarily break them.
## Building from source ## Building from source
The project [README](https://git.rustybever.be/vieter/vieter#building) contains The project [README](https://git.rustybever.be/vieter-v/vieter#building)
instructions for building Vieter from source. contains instructions for building Vieter from source.

View File

@ -37,6 +37,6 @@ Each section can consist of as many of these parts as necessary.
## CLI tool ## CLI tool
The Vieter binary contains a command that shows you the next matching times for The Vieter binary contains a command that shows you the next matching times for
a given expression. This can be useful to understand the syntax. For more a given expression. This can be useful for understanding the syntax. For more
information, see information, see
[vieter-schedule(1)](https://rustybever.be/man/vieter/vieter-schedule.1.html). [vieter-schedule(1)](https://rustybever.be/man/vieter/vieter-schedule.1.html).

27
src/agent/agent.v 100644
View File

@ -0,0 +1,27 @@
module agent
import log
import os
import util
const log_file_name = 'vieter.agent.log'
// agent starts an agent service
pub fn agent(conf Config) ! {
log_level := log.level_from_tag(conf.log_level) or {
return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
}
mut logger := log.Log{
level: log_level
}
os.mkdir_all(conf.data_dir) or { util.exit_with_message(1, 'Failed to create data directory.') }
log_file := os.join_path_single(conf.data_dir, agent.log_file_name)
logger.set_full_logpath(log_file)
logger.log_to_console_too()
mut d := agent_init(logger, conf)
d.run()
}

31
src/agent/cli.v 100644
View File

@ -0,0 +1,31 @@
module agent
import cli
import conf as vconf
struct Config {
pub:
log_level string = 'WARN'
// Architecture that the agent represents
arch string
api_key string
address string
data_dir string
max_concurrent_builds int = 1
polling_frequency int = 30
image_rebuild_frequency int = 1440
}
// cmd returns the cli module that handles the cron daemon.
pub fn cmd() cli.Command {
return cli.Command{
name: 'agent'
description: 'Start an agent daemon.'
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
agent(conf)!
}
}
}

197
src/agent/daemon.v 100644
View File

@ -0,0 +1,197 @@
module agent
import log
import sync.stdatomic
import build
import models { BuildConfig }
import client
import time
import os
const (
build_empty = 0
build_running = 1
build_done = 2
)
struct AgentDaemon {
logger shared log.Log
conf Config
client client.Client
mut:
images ImageManager
// Which builds are currently running; length is conf.max_concurrent_builds
builds []BuildConfig
// Atomic variables used to detect when a build has finished; length is
// conf.max_concurrent_builds
atomics []u64
}
// agent_init initializes a new agent
fn agent_init(logger log.Log, conf Config) AgentDaemon {
mut d := AgentDaemon{
logger: logger
client: client.new(conf.address, conf.api_key)
conf: conf
images: new_image_manager(conf.image_rebuild_frequency * 60)
builds: []BuildConfig{len: conf.max_concurrent_builds}
atomics: []u64{len: conf.max_concurrent_builds}
}
return d
}
// run starts the actual agent daemon. This function will run forever.
pub fn (mut d AgentDaemon) run() {
// This is just so that the very first time the loop is ran, the jobs are
// always polled
mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency)
mut sleep_time := 0 * time.second
mut finished, mut empty, mut running := 0, 0, 0
for {
if sleep_time > 0 {
d.ldebug('Sleeping for $sleep_time')
time.sleep(sleep_time)
}
finished, empty = d.update_atomics()
running = d.conf.max_concurrent_builds - finished - empty
// No new finished builds and no free slots, so there's nothing to be
// done
if finished + empty == 0 {
sleep_time = 1 * time.second
continue
}
// Builds have finished, so old builder images might have freed up.
// TODO this might query the docker daemon too frequently.
if finished > 0 {
d.images.clean_old_images()
}
// The agent will always poll for new jobs after at most
// `polling_frequency` seconds. However, when jobs have finished, the
// agent will also poll for new jobs. This is because jobs are often
// clustered together (especially when mostly using the global cron
// schedule), so there's a much higher chance jobs are available.
if finished > 0 || time.now() >= last_poll_time.add_seconds(d.conf.polling_frequency) {
d.ldebug('Polling for new jobs')
new_configs := d.client.poll_jobs(d.conf.arch, finished + empty) or {
d.lerror('Failed to poll jobs: $err.msg()')
// TODO pick a better delay here
sleep_time = 5 * time.second
continue
}
d.ldebug('Received $new_configs.len jobs')
last_poll_time = time.now()
for config in new_configs {
// Make sure a recent build base image is available for
// building the config
if !d.images.up_to_date(config.base_image) {
d.linfo('Building builder image from base image $config.base_image')
// TODO handle this better than to just skip the config
d.images.refresh_image(config.base_image) or {
d.lerror(err.msg())
continue
}
}
// It's technically still possible that the build image is
// removed in the very short period between building the
// builder image and starting a build container with it. If
// this happens, faith really just didn't want you to do this
// build.
d.start_build(config)
running++
}
}
// The agent is not doing anything, so we just wait until the next poll
// time
if running == 0 {
sleep_time = last_poll_time.add_seconds(d.conf.polling_frequency) - time.now()
} else {
sleep_time = 1 * time.second
}
}
}
// update_atomics checks for each build whether it's completed, and sets it to
// empty again if so. The return value is a tuple `(finished, empty)` where
// `finished` is how many builds were just finished and thus set to empty, and
// `empty` is how many build slots were already empty. The amount of running
// builds can then be calculated by substracting these two values from the
// total allowed concurrent builds.
fn (mut d AgentDaemon) update_atomics() (int, int) {
mut finished := 0
mut empty := 0
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == agent.build_done {
stdatomic.store_u64(&d.atomics[i], agent.build_empty)
finished++
} else if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty {
empty++
}
}
return finished, empty
}
// start_build starts a build for the given BuildConfig.
fn (mut d AgentDaemon) start_build(config BuildConfig) bool {
for i in 0 .. d.atomics.len {
if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty {
stdatomic.store_u64(&d.atomics[i], agent.build_running)
d.builds[i] = config
go d.run_build(i, config)
return true
}
}
return false
}
// run_build actually starts the build process for a given target.
fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
d.linfo('started build: $config')
// 0 means success, 1 means failure
mut status := 0
new_config := BuildConfig{
...config
base_image: d.images.get(config.base_image)
}
res := build.build_config(d.client.address, d.client.api_key, new_config) or {
d.ldebug('build_config error: $err.msg()')
status = 1
build.BuildResult{}
}
if status == 0 {
d.linfo('Uploading build logs for $config')
// TODO use the arch value here
build_arch := os.uname().machine
d.client.add_build_log(config.target_id, res.start_time, res.end_time, build_arch,
res.exit_code, res.logs) or { d.lerror('Failed to upload logs for $config') }
} else {
d.lwarn('an error occurred during build: $config')
}
stdatomic.store_u64(&d.atomics[build_index], agent.build_done)
}

119
src/agent/images.v 100644
View File

@ -0,0 +1,119 @@
module agent
import time
import docker
import build
// An ImageManager is a utility that creates builder images from given base
// images, updating these builder images if they've become too old. This
// structure can manage images from any number of base images, paving the way
// for configurable base images per target/repository.
struct ImageManager {
max_image_age int [required]
mut:
// For each base image, one or more builder images can exist at the same
// time
images map[string][]string [required]
// For each base image, we track when its newest image was built
timestamps map[string]time.Time [required]
}
// new_image_manager initializes a new image manager.
fn new_image_manager(max_image_age int) ImageManager {
return ImageManager{
max_image_age: max_image_age
images: map[string][]string{}
timestamps: map[string]time.Time{}
}
}
// get returns the name of the newest image for the given base image. Note that
// this function should only be called *after* a first call to `refresh_image`.
pub fn (m &ImageManager) get(base_image string) string {
return m.images[base_image].last()
}
// up_to_date returns true if the last known builder image exists and is up to
// date. If this function returns true, the last builder image may be used to
// perform a build.
pub fn (mut m ImageManager) up_to_date(base_image string) bool {
if base_image !in m.timestamps
|| m.timestamps[base_image].add_seconds(m.max_image_age) <= time.now() {
return false
}
// It's possible the image has been removed by some external event, so we
// check whether it actually exists as well.
mut dd := docker.new_conn() or { return false }
defer {
dd.close() or {}
}
dd.image_inspect(m.images[base_image].last()) or {
// Image doesn't exist, so we stop tracking it
if err.code() == 404 {
m.images[base_image].delete_last()
m.timestamps.delete(base_image)
}
// If the inspect fails, it's either because the image doesn't exist or
// because of some other error. Either way, we can't know *for certain*
// that the image exists, so we return false.
return false
}
return true
}
// refresh_image builds a new builder image from the given base image. This
// function should only be called if `up_to_date` returned false.
fn (mut m ImageManager) refresh_image(base_image string) ! {
// TODO use better image tags for built images
new_image := build.create_build_image(base_image) or {
return error('Failed to build builder image from base image $base_image')
}
m.images[base_image] << new_image
m.timestamps[base_image] = time.now()
}
// clean_old_images removes all older builder images that are no longer in use.
// The function will always leave at least one builder image, namely the newest
// one.
fn (mut m ImageManager) clean_old_images() {
mut dd := docker.new_conn() or { return }
defer {
dd.close() or {}
}
mut i := 0
for image in m.images.keys() {
i = 0
for i < m.images[image].len - 1 {
// For each builder image, we try to remove it by calling the Docker
// API. If the function returns an error or false, that means the image
// wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't
// move the index.
dd.remove_image(m.images[image][i]) or {
// The image was removed by an external event
if err.code() == 404 {
m.images[image].delete(i)
}
// The image couldn't be removed, so we need to keep track of
// it
else {
i += 1
}
continue
}
m.images[image].delete(i)
}
}
}

35
src/agent/log.v 100644
View File

@ -0,0 +1,35 @@
module agent
import log
// log a message with the given level
pub fn (mut d AgentDaemon) log(msg string, level log.Level) {
lock d.logger {
d.logger.send_output(msg, level)
}
}
// lfatal create a log message with the fatal level
pub fn (mut d AgentDaemon) lfatal(msg string) {
d.log(msg, log.Level.fatal)
}
// lerror create a log message with the error level
pub fn (mut d AgentDaemon) lerror(msg string) {
d.log(msg, log.Level.error)
}
// lwarn create a log message with the warn level
pub fn (mut d AgentDaemon) lwarn(msg string) {
d.log(msg, log.Level.warn)
}
// linfo create a log message with the info level
pub fn (mut d AgentDaemon) linfo(msg string) {
d.log(msg, log.Level.info)
}
// ldebug create a log message with the debug level
pub fn (mut d AgentDaemon) ldebug(msg string) {
d.log(msg, log.Level.debug)
}

View File

@ -1,12 +1,12 @@
module build module build
import vieter_v.docker import docker
import encoding.base64 import encoding.base64
import time import time
import os import os
import strings import strings
import util import util
import models { Target } import models { BuildConfig, Target }
const ( const (
container_build_dir = '/build' container_build_dir = '/build'
@ -21,8 +21,8 @@ const (
// system, install some necessary packages & creates a non-root user to run // system, install some necessary packages & creates a non-root user to run
// makepkg with. The base image should be some Linux distribution that uses // makepkg with. The base image should be some Linux distribution that uses
// Pacman as its package manager. // Pacman as its package manager.
pub fn create_build_image(base_image string) ?string { pub fn create_build_image(base_image string) !string {
mut dd := docker.new_conn()? mut dd := docker.new_conn()!
defer { defer {
dd.close() or {} dd.close() or {}
@ -57,15 +57,15 @@ pub fn create_build_image(base_image string) ?string {
image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' } image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' }
// We pull the provided image // We pull the provided image
dd.pull_image(image_name, image_tag)? dd.pull_image(image_name, image_tag)!
id := dd.container_create(c)?.id id := dd.container_create(c)!.id
// id := docker.create_container(c)? // id := docker.create_container(c)!
dd.container_start(id)? dd.container_start(id)!
// This loop waits until the container has stopped, so we can remove it after // This loop waits until the container has stopped, so we can remove it after
for { for {
data := dd.container_inspect(id)? data := dd.container_inspect(id)!
if !data.state.running { if !data.state.running {
break break
@ -79,8 +79,8 @@ pub fn create_build_image(base_image string) ?string {
// TODO also add the base image's name into the image name to prevent // TODO also add the base image's name into the image name to prevent
// conflicts. // conflicts.
tag := time.sys_mono_now().str() tag := time.sys_mono_now().str()
image := dd.create_image_from_container(id, 'vieter-build', tag)? image := dd.create_image_from_container(id, 'vieter-build', tag)!
dd.container_remove(id)? dd.container_remove(id)!
return image.id return image.id
} }
@ -93,25 +93,32 @@ pub:
logs string logs string
} }
// build_target builds, packages & publishes a given Arch package based on the // build_target builds the given target. Internally it calls `build_config`.
pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool) !BuildResult {
config := target.as_build_config(base_image_id, force)
return build_config(address, api_key, config)
}
// build_config builds, packages & publishes a given Arch package based on the
// provided target. The base image ID should be of an image previously created // provided target. The base image ID should be of an image previously created
// by create_build_image. It returns the logs of the container. // by create_build_image. It returns the logs of the container.
pub fn build_target(address string, api_key string, base_image_id string, target &Target) ?BuildResult { pub fn build_config(address string, api_key string, config BuildConfig) !BuildResult {
mut dd := docker.new_conn()? mut dd := docker.new_conn()!
defer { defer {
dd.close() or {} dd.close() or {}
} }
build_arch := os.uname().machine build_arch := os.uname().machine
build_script := create_build_script(address, target, build_arch) build_script := create_build_script(address, config, build_arch)
// We convert the build script into a base64 string, which then gets passed // We convert the build script into a base64 string, which then gets passed
// to the container as an env var // to the container as an env var
base64_script := base64.encode_str(build_script) base64_script := base64.encode_str(build_script)
c := docker.NewContainer{ c := docker.NewContainer{
image: '$base_image_id' image: '$config.base_image'
env: [ env: [
'BUILD_SCRIPT=$base64_script', 'BUILD_SCRIPT=$base64_script',
'API_KEY=$api_key', 'API_KEY=$api_key',
@ -125,25 +132,25 @@ pub fn build_target(address string, api_key string, base_image_id string, target
user: '0:0' user: '0:0'
} }
id := dd.container_create(c)?.id id := dd.container_create(c)!.id
dd.container_start(id)? dd.container_start(id)!
mut data := dd.container_inspect(id)? mut data := dd.container_inspect(id)!
// This loop waits until the container has stopped, so we can remove it after // This loop waits until the container has stopped, so we can remove it after
for data.state.running { for data.state.running {
time.sleep(1 * time.second) time.sleep(1 * time.second)
data = dd.container_inspect(id)? data = dd.container_inspect(id)!
} }
mut logs_stream := dd.container_get_logs(id)? mut logs_stream := dd.container_get_logs(id)!
// Read in the entire stream // Read in the entire stream
mut logs_builder := strings.new_builder(10 * 1024) mut logs_builder := strings.new_builder(10 * 1024)
util.reader_to_writer(mut logs_stream, mut logs_builder)? util.reader_to_writer(mut logs_stream, mut logs_builder)!
dd.container_remove(id)? dd.container_remove(id)!
return BuildResult{ return BuildResult{
start_time: data.state.start_time start_time: data.state.start_time

220
src/build/queue.v 100644
View File

@ -0,0 +1,220 @@
module build
import models { BuildConfig, Target }
import cron.expression { CronExpression, parse_expression }
import time
import datatypes { MinHeap }
import util
struct BuildJob {
pub mut:
// Time at which this build job was created/queued
created time.Time
// Next timestamp from which point this job is allowed to be executed
timestamp time.Time
// Required for calculating next timestamp after having pop'ed a job
ce CronExpression
// Actual build config sent to the agent
config BuildConfig
// Whether this is a one-time job
single bool
}
// Allows BuildJob structs to be sorted according to their timestamp in
// MinHeaps
fn (r1 BuildJob) < (r2 BuildJob) bool {
return r1.timestamp < r2.timestamp
}
// The build job queue is responsible for managing the list of scheduled builds
// for each architecture. Agents receive jobs from this queue.
pub struct BuildJobQueue {
// Schedule to use for targets without explicitely defined cron expression
default_schedule CronExpression
// Base image to use for targets without defined base image
default_base_image string
mut:
mutex shared util.Dummy
// For each architecture, a priority queue is tracked
queues map[string]MinHeap<BuildJob>
// When a target is removed from the server or edited, its previous build
// configs will be invalid. This map allows for those to be simply skipped
// by ignoring any build configs created before this timestamp.
invalidated map[int]time.Time
}
// new_job_queue initializes a new job queue
pub fn new_job_queue(default_schedule CronExpression, default_base_image string) BuildJobQueue {
return BuildJobQueue{
default_schedule: default_schedule
default_base_image: default_base_image
invalidated: map[int]time.Time{}
}
}
// insert_all executes insert for each architecture of the given Target.
pub fn (mut q BuildJobQueue) insert_all(target Target) ! {
for arch in target.arch {
q.insert(target: target, arch: arch.value)!
}
}
[params]
pub struct InsertConfig {
target Target [required]
arch string [required]
single bool
force bool
now bool
}
// insert a new target's job into the queue for the given architecture. This
// job will then be endlessly rescheduled after being pop'ed, unless removed
// explicitely.
pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! {
lock q.mutex {
if input.arch !in q.queues {
q.queues[input.arch] = MinHeap<BuildJob>{}
}
mut job := BuildJob{
created: time.now()
single: input.single
config: input.target.as_build_config(q.default_base_image, input.force)
}
if !input.now {
ce := if input.target.schedule != '' {
parse_expression(input.target.schedule) or {
return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()")
}
} else {
q.default_schedule
}
job.timestamp = ce.next_from_now()!
job.ce = ce
} else {
job.timestamp = time.now()
}
q.queues[input.arch].insert(job)
}
}
// reschedule the given job by calculating the next timestamp and re-adding it
// to its respective queue. This function is called by the pop functions
// *after* having pop'ed the job.
fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) ! {
new_timestamp := job.ce.next_from_now()!
new_job := BuildJob{
...job
created: time.now()
timestamp: new_timestamp
}
q.queues[arch].insert(new_job)
}
// pop_invalid pops all invalid jobs.
fn (mut q BuildJobQueue) pop_invalid(arch string) {
for {
job := q.queues[arch].peek() or { return }
if job.config.target_id in q.invalidated
&& job.created < q.invalidated[job.config.target_id] {
// This pop *should* never fail according to the source code
q.queues[arch].pop() or {}
} else {
break
}
}
}
// peek shows the first job for the given architecture that's ready to be
// executed, if present.
pub fn (mut q BuildJobQueue) peek(arch string) ?BuildJob {
// Even peek requires a write lock, because pop_invalid can modify the data
// structure
lock q.mutex {
if arch !in q.queues {
return none
}
q.pop_invalid(arch)
job := q.queues[arch].peek()?
if job.timestamp < time.now() {
return job
}
}
return none
}
// pop removes the first job for the given architecture that's ready to be
// executed from the queue and returns it, if present.
pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob {
lock q.mutex {
if arch !in q.queues {
return none
}
q.pop_invalid(arch)
mut job := q.queues[arch].peek()?
if job.timestamp < time.now() {
job = q.queues[arch].pop()?
if !job.single {
// TODO how do we handle this properly? Is it even possible for a
// cron expression to not return a next time if it's already been
// used before?
q.reschedule(job, arch) or {}
}
return job
}
}
return none
}
// pop_n tries to pop at most n available jobs for the given architecture.
pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob {
lock q.mutex {
if arch !in q.queues {
return []
}
mut out := []BuildJob{}
for out.len < n {
q.pop_invalid(arch)
mut job := q.queues[arch].peek() or { break }
if job.timestamp < time.now() {
job = q.queues[arch].pop() or { break }
if !job.single {
// TODO idem
q.reschedule(job, arch) or {}
}
out << job
} else {
break
}
}
return out
}
return []
}
// invalidate a target's old build jobs.
pub fn (mut q BuildJobQueue) invalidate(target_id int) {
q.invalidated[target_id] = time.now()
}

View File

@ -16,5 +16,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0 curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0' echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0 [ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -16,5 +16,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0 curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0' echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0 [ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -0,0 +1,20 @@
echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf'
echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
echo -e '+ pacman -Syu --needed --noconfirm'
pacman -Syu --needed --noconfirm
echo -e '+ su builder'
su builder
echo -e '+ git clone --single-branch --depth 1 '\''https://examplerepo.com'\'' repo'
git clone --single-branch --depth 1 'https://examplerepo.com' repo
echo -e '+ cd '\''repo/example/path'\'''
cd 'repo/example/path'
echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm'
makepkg --nobuild --syncdeps --needed --noconfirm
echo -e '+ source PKGBUILD'
source PKGBUILD
echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0'
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -0,0 +1,20 @@
echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf'
echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
echo -e '+ pacman -Syu --needed --noconfirm'
pacman -Syu --needed --noconfirm
echo -e '+ su builder'
su builder
echo -e '+ git clone --single-branch --depth 1 '\''https://examplerepo.com'\'' repo'
git clone --single-branch --depth 1 'https://examplerepo.com' repo
echo -e '+ cd '\''repo/example/path with spaces'\'''
cd 'repo/example/path with spaces'
echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm'
makepkg --nobuild --syncdeps --needed --noconfirm
echo -e '+ source PKGBUILD'
source PKGBUILD
echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0'
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -18,5 +18,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0 curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0' echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0 [ "$(id -u)" == 0 ] && exit 0
echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done' echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done

View File

@ -1,6 +1,6 @@
module build module build
import models { Target } import models { BuildConfig }
// escape_shell_string escapes any characters that could be interpreted // escape_shell_string escapes any characters that could be interpreted
// incorrectly by a shell. The resulting value should be safe to use inside an // incorrectly by a shell. The resulting value should be safe to use inside an
@ -23,13 +23,13 @@ pub fn echo_commands(cmds []string) []string {
} }
// create_build_script generates a shell script that builds a given Target. // create_build_script generates a shell script that builds a given Target.
fn create_build_script(address string, target &Target, build_arch string) string { fn create_build_script(address string, config BuildConfig, build_arch string) string {
repo_url := '$address/$target.repo' repo_url := '$address/$config.repo'
mut commands := [ mut commands := [
// This will later be replaced by a proper setting for changing the // This will later be replaced by a proper setting for changing the
// mirrorlist // mirrorlist
"echo -e '[$target.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf" "echo -e '[$config.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
// We need to update the package list of the repo we just added above. // We need to update the package list of the repo we just added above.
// This should however not pull in a lot of packages as long as the // This should however not pull in a lot of packages as long as the
// builder image is rebuilt frequently. // builder image is rebuilt frequently.
@ -38,22 +38,22 @@ fn create_build_script(address string, target &Target, build_arch string) string
'su builder', 'su builder',
] ]
commands << match target.kind { commands << match config.kind {
'git' { 'git' {
if target.branch == '' { if config.branch == '' {
[ [
"git clone --single-branch --depth 1 '$target.url' repo", "git clone --single-branch --depth 1 '$config.url' repo",
] ]
} else { } else {
[ [
"git clone --single-branch --depth 1 --branch $target.branch '$target.url' repo", "git clone --single-branch --depth 1 --branch $config.branch '$config.url' repo",
] ]
} }
} }
'url' { 'url' {
[ [
'mkdir repo', 'mkdir repo',
"curl -o repo/PKGBUILD -L '$target.url'", "curl -o repo/PKGBUILD -L '$config.url'",
] ]
} }
else { else {
@ -61,19 +61,32 @@ fn create_build_script(address string, target &Target, build_arch string) string
} }
} }
commands << if config.path != '' {
"cd 'repo/$config.path'"
} else {
'cd repo'
}
commands << [ commands << [
'cd repo',
'makepkg --nobuild --syncdeps --needed --noconfirm', 'makepkg --nobuild --syncdeps --needed --noconfirm',
'source PKGBUILD', 'source PKGBUILD',
]
if !config.force {
// The build container checks whether the package is already present on // The build container checks whether the package is already present on
// the server. // the server.
'curl -s --head --fail $repo_url/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0', commands << [
// If the above curl command succeeds, we don't need to rebuild the 'curl -s --head --fail $repo_url/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
// package. However, because we're in a su shell, the exit command will // If the above curl command succeeds, we don't need to rebuild the
// drop us back into the root shell. Therefore, we must check whether // package. However, because we're in a su shell, the exit command will
// we're in root so we don't proceed. // drop us back into the root shell. Therefore, we must check whether
'[ "\$(id -u)" == 0 ] && exit 0', // we're in root so we don't proceed.
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $repo_url/publish; done', '[ "\$(id -u)" == 0 ] && exit 0',
]
}
commands << [
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $repo_url/publish; done',
] ]
return echo_commands(commands).join('\n') return echo_commands(commands).join('\n')

View File

@ -1,43 +1,75 @@
module build module build
import models { Target } import models { BuildConfig }
fn test_create_build_script_git_branch() { fn test_create_build_script_git() {
target := Target{ config := BuildConfig{
id: 1 target_id: 1
kind: 'git' kind: 'git'
url: 'https://examplerepo.com' url: 'https://examplerepo.com'
branch: 'main'
repo: 'vieter' repo: 'vieter'
base_image: 'not-used:latest'
} }
build_script := create_build_script('https://example.com', target, 'x86_64')
expected := $embed_file('build_script_git_branch.sh') build_script := create_build_script('https://example.com', config, 'x86_64')
expected := $embed_file('scripts/git.sh')
assert build_script == expected.to_string().trim_space() assert build_script == expected.to_string().trim_space()
} }
fn test_create_build_script_git() { fn test_create_build_script_git_path() {
target := Target{ mut config := BuildConfig{
id: 1 target_id: 1
kind: 'git' kind: 'git'
url: 'https://examplerepo.com' url: 'https://examplerepo.com'
repo: 'vieter' repo: 'vieter'
path: 'example/path'
base_image: 'not-used:latest'
} }
build_script := create_build_script('https://example.com', target, 'x86_64')
expected := $embed_file('build_script_git.sh') mut build_script := create_build_script('https://example.com', config, 'x86_64')
mut expected := $embed_file('scripts/git_path.sh')
assert build_script == expected.to_string().trim_space()
config = BuildConfig{
...config
path: 'example/path with spaces'
}
build_script = create_build_script('https://example.com', config, 'x86_64')
expected = $embed_file('scripts/git_path_spaces.sh')
assert build_script == expected.to_string().trim_space()
}
fn test_create_build_script_git_branch() {
config := BuildConfig{
target_id: 1
kind: 'git'
url: 'https://examplerepo.com'
branch: 'main'
repo: 'vieter'
base_image: 'not-used:latest'
}
build_script := create_build_script('https://example.com', config, 'x86_64')
expected := $embed_file('scripts/git_branch.sh')
assert build_script == expected.to_string().trim_space() assert build_script == expected.to_string().trim_space()
} }
fn test_create_build_script_url() { fn test_create_build_script_url() {
target := Target{ config := BuildConfig{
id: 1 target_id: 1
kind: 'url' kind: 'url'
url: 'https://examplerepo.com' url: 'https://examplerepo.com'
repo: 'vieter' repo: 'vieter'
base_image: 'not-used:latest'
} }
build_script := create_build_script('https://example.com', target, 'x86_64')
expected := $embed_file('build_script_url.sh') build_script := create_build_script('https://example.com', config, 'x86_64')
expected := $embed_file('scripts/url.sh')
assert build_script == expected.to_string().trim_space() assert build_script == expected.to_string().trim_space()
} }

View File

@ -2,7 +2,7 @@ module client
import net.http { Method } import net.http { Method }
import net.urllib import net.urllib
import web.response { Response } import web.response { Response, new_data_response }
import json import json
pub struct Client { pub struct Client {
@ -21,7 +21,7 @@ pub fn new(address string, api_key string) Client {
// send_request_raw sends an HTTP request, returning the http.Response object. // send_request_raw sends an HTTP request, returning the http.Response object.
// It encodes the params so that they're safe to pass as HTTP query parameters. // It encodes the params so that they're safe to pass as HTTP query parameters.
fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) ?http.Response { fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) !http.Response {
mut full_url := '$c.address$url' mut full_url := '$c.address$url'
if params.len > 0 { if params.len > 0 {
@ -38,31 +38,53 @@ fn (c &Client) send_request_raw(method Method, url string, params map[string]str
full_url = '$full_url?$params_str' full_url = '$full_url?$params_str'
} }
mut req := http.new_request(method, full_url, body)? // Looking at the source code, this function doesn't actually fail, so I'm
req.add_custom_header('X-Api-Key', c.api_key)? // not sure why it returns an optional
mut req := http.new_request(method, full_url, body) or { return error('') }
req.add_custom_header('X-Api-Key', c.api_key)!
res := req.do()? res := req.do()!
return res return res
} }
// send_request<T> just calls send_request_with_body<T> with an empty body. // send_request<T> just calls send_request_with_body<T> with an empty body.
fn (c &Client) send_request<T>(method Method, url string, params map[string]string) ?Response<T> { fn (c &Client) send_request<T>(method Method, url string, params map[string]string) !Response<T> {
return c.send_request_with_body<T>(method, url, params, '') return c.send_request_with_body<T>(method, url, params, '')
} }
// send_request_with_body<T> calls send_request_raw_response & parses its // send_request_with_body<T> calls send_request_raw_response & parses its
// output as a Response<T> object. // output as a Response<T> object.
fn (c &Client) send_request_with_body<T>(method Method, url string, params map[string]string, body string) ?Response<T> { fn (c &Client) send_request_with_body<T>(method Method, url string, params map[string]string, body string) !Response<T> {
res_text := c.send_request_raw_response(method, url, params, body)? res := c.send_request_raw(method, url, params, body)!
data := json.decode(Response<T>, res_text)? status := res.status()
// Non-successful requests are expected to return either an empty body or
// Response<string>
if status.is_error() {
// A non-successful status call will have an empty body
if res.body == '' {
return error('Error $res.status_code ($status.str()): (empty response)')
}
data := json.decode(Response<string>, res.body)!
return error('Status $res.status_code ($status.str()): $data.message')
}
// Just return an empty successful response
if res.body == '' {
return new_data_response(T{})
}
data := json.decode(Response<T>, res.body)!
return data return data
} }
// send_request_raw_response returns the raw text response for an HTTP request. // send_request_raw_response returns the raw text response for an HTTP request.
fn (c &Client) send_request_raw_response(method Method, url string, params map[string]string, body string) ?string { fn (c &Client) send_request_raw_response(method Method, url string, params map[string]string, body string) !string {
res := c.send_request_raw(method, url, params, body)? res := c.send_request_raw(method, url, params, body)!
return res.body return res.body
} }

23
src/client/jobs.v 100644
View File

@ -0,0 +1,23 @@
module client
import models { BuildConfig }
// poll_jobs requests a list of new build jobs from the server.
pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig {
data := c.send_request<[]BuildConfig>(.get, '/api/v1/jobs/poll', {
'arch': arch
'max': max.str()
})!
return data.data
}
// queue_job adds a new one-time build job for the given target to the job
// queue.
pub fn (c &Client) queue_job(target_id int, arch string, force bool) ! {
c.send_request<string>(.post, '/api/v1/jobs/queue', {
'target': target_id.str()
'arch': arch
'force': force.str()
})!
}

View File

@ -6,40 +6,29 @@ import web.response { Response }
import time import time
// get_build_logs returns all build logs. // get_build_logs returns all build logs.
pub fn (c &Client) get_build_logs(filter BuildLogFilter) ?Response<[]BuildLog> { pub fn (c &Client) get_build_logs(filter BuildLogFilter) ![]BuildLog {
params := models.params_from(filter) params := models.params_from(filter)
data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)? data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)!
return data return data.data
}
// get_build_logs_for_target returns all build logs for a given target.
pub fn (c &Client) get_build_logs_for_target(target_id int) ?Response<[]BuildLog> {
params := {
'repo': target_id.str()
}
data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)?
return data
} }
// get_build_log returns a specific build log. // get_build_log returns a specific build log.
pub fn (c &Client) get_build_log(id int) ?Response<BuildLog> { pub fn (c &Client) get_build_log(id int) !BuildLog {
data := c.send_request<BuildLog>(Method.get, '/api/v1/logs/$id', {})? data := c.send_request<BuildLog>(Method.get, '/api/v1/logs/$id', {})!
return data return data.data
} }
// get_build_log_content returns the contents of the build log file. // get_build_log_content returns the contents of the build log file.
pub fn (c &Client) get_build_log_content(id int) ?string { pub fn (c &Client) get_build_log_content(id int) !string {
data := c.send_request_raw_response(Method.get, '/api/v1/logs/$id/content', {}, '')? data := c.send_request_raw_response(Method.get, '/api/v1/logs/$id/content', {}, '')!
return data return data
} }
// add_build_log adds a new build log to the server. // add_build_log adds a new build log to the server.
pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) ?Response<int> { pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) !Response<int> {
params := { params := {
'target': target_id.str() 'target': target_id.str()
'startTime': start_time.unix_time().str() 'startTime': start_time.unix_time().str()
@ -48,7 +37,7 @@ pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time t
'exitCode': exit_code.str() 'exitCode': exit_code.str()
} }
data := c.send_request_with_body<int>(Method.post, '/api/v1/logs', params, content)? data := c.send_request_with_body<int>(Method.post, '/api/v1/logs', params, content)!
return data return data
} }

View File

@ -2,24 +2,23 @@ module client
import models { Target, TargetFilter } import models { Target, TargetFilter }
import net.http { Method } import net.http { Method }
import web.response { Response }
// get_targets returns a list of targets, given a filter object. // get_targets returns a list of targets, given a filter object.
pub fn (c &Client) get_targets(filter TargetFilter) ?[]Target { pub fn (c &Client) get_targets(filter TargetFilter) ![]Target {
params := models.params_from(filter) params := models.params_from(filter)
data := c.send_request<[]Target>(Method.get, '/api/v1/targets', params)? data := c.send_request<[]Target>(Method.get, '/api/v1/targets', params)!
return data.data return data.data
} }
// get_all_targets retrieves *all* targs from the API using the default // get_all_targets retrieves *all* targs from the API using the default
// limit. // limit.
pub fn (c &Client) get_all_targets() ?[]Target { pub fn (c &Client) get_all_targets() ![]Target {
mut targets := []Target{} mut targets := []Target{}
mut offset := u64(0) mut offset := u64(0)
for { for {
sub_targets := c.get_targets(offset: offset)? sub_targets := c.get_targets(offset: offset)!
if sub_targets.len == 0 { if sub_targets.len == 0 {
break break
@ -34,8 +33,8 @@ pub fn (c &Client) get_all_targets() ?[]Target {
} }
// get_target returns the target for a specific id. // get_target returns the target for a specific id.
pub fn (c &Client) get_target(id int) ?Target { pub fn (c &Client) get_target(id int) !Target {
data := c.send_request<Target>(Method.get, '/api/v1/targets/$id', {})? data := c.send_request<Target>(Method.get, '/api/v1/targets/$id', {})!
return data.data return data.data
} }
@ -45,28 +44,29 @@ pub struct NewTarget {
url string url string
branch string branch string
repo string repo string
path string
arch []string arch []string
} }
// add_target adds a new target to the server. // add_target adds a new target to the server.
pub fn (c &Client) add_target(t NewTarget) ?Response<int> { pub fn (c &Client) add_target(t NewTarget) !int {
params := models.params_from<NewTarget>(t) params := models.params_from<NewTarget>(t)
data := c.send_request<int>(Method.post, '/api/v1/targets', params)? data := c.send_request<int>(Method.post, '/api/v1/targets', params)!
return data return data.data
} }
// remove_target removes the target with the given id from the server. // remove_target removes the target with the given id from the server.
pub fn (c &Client) remove_target(id int) ?Response<string> { pub fn (c &Client) remove_target(id int) !string {
data := c.send_request<string>(Method.delete, '/api/v1/targets/$id', {})? data := c.send_request<string>(Method.delete, '/api/v1/targets/$id', {})!
return data return data.data
} }
// patch_target sends a PATCH request to the given target with the params as // patch_target sends a PATCH request to the given target with the params as
// payload. // payload.
pub fn (c &Client) patch_target(id int, params map[string]string) ?Response<string> { pub fn (c &Client) patch_target(id int, params map[string]string) !string {
data := c.send_request<string>(Method.patch, '/api/v1/targets/$id', params)? data := c.send_request<string>(Method.patch, '/api/v1/targets/$id', params)!
return data return data.data
} }

View File

@ -3,8 +3,8 @@ module aur
import cli import cli
import console import console
import client import client
import vieter_v.aur import aur
import vieter_v.conf as vconf import conf as vconf
struct Config { struct Config {
address string [required] address string [required]
@ -21,12 +21,12 @@ pub fn cmd() cli.Command {
name: 'search' name: 'search'
description: 'Search for packages.' description: 'Search for packages.'
required_args: 1 required_args: 1
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
c := aur.new() c := aur.new()
pkgs := c.search(cmd.args[0])? pkgs := c.search(cmd.args[0])!
data := pkgs.map([it.name, it.description]) data := pkgs.map([it.name, it.description])
println(console.pretty_table(['name', 'description'], data)?) println(console.pretty_table(['name', 'description'], data)!)
} }
}, },
cli.Command{ cli.Command{
@ -34,12 +34,12 @@ pub fn cmd() cli.Command {
usage: 'repo pkg-name [pkg-name...]' usage: 'repo pkg-name [pkg-name...]'
description: 'Add the given AUR package(s) to Vieter. Non-existent packages will be silently ignored.' description: 'Add the given AUR package(s) to Vieter. Non-existent packages will be silently ignored.'
required_args: 2 required_args: 2
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
c := aur.new() c := aur.new()
pkgs := c.info(cmd.args[1..])? pkgs := c.info(cmd.args[1..])!
vc := client.new(conf.address, conf.api_key) vc := client.new(conf.address, conf.api_key)

View File

@ -13,7 +13,7 @@ pub fn tabbed_table(data [][]string) string {
// pretty_table converts a list of string data into a pretty table. Many thanks // pretty_table converts a list of string data into a pretty table. Many thanks
// to @hungrybluedev in the Vlang Discord for providing this code! // to @hungrybluedev in the Vlang Discord for providing this code!
// https://ptb.discord.com/channels/592103645835821068/592106336838352923/970278787143045192 // https://ptb.discord.com/channels/592103645835821068/592106336838352923/970278787143045192
pub fn pretty_table(header []string, data [][]string) ?string { pub fn pretty_table(header []string, data [][]string) !string {
column_count := header.len column_count := header.len
mut column_widths := []int{len: column_count, init: header[it].len} mut column_widths := []int{len: column_count, init: header[it].len}
@ -26,7 +26,7 @@ pub fn pretty_table(header []string, data [][]string) ?string {
} }
} }
single_line_length := arrays.sum(column_widths)? + (column_count + 1) * 3 - 4 single_line_length := arrays.sum(column_widths)! + (column_count + 1) * 3 - 4
horizontal_line := '+' + strings.repeat(`-`, single_line_length) + '+' horizontal_line := '+' + strings.repeat(`-`, single_line_length) + '+'
mut buffer := strings.new_builder(data.len * single_line_length) mut buffer := strings.new_builder(data.len * single_line_length)
@ -64,12 +64,12 @@ pub fn pretty_table(header []string, data [][]string) ?string {
// export_man_pages recursively generates all man pages for the given // export_man_pages recursively generates all man pages for the given
// cli.Command & writes them to the given directory. // cli.Command & writes them to the given directory.
pub fn export_man_pages(cmd cli.Command, path string) ? { pub fn export_man_pages(cmd cli.Command, path string) ! {
man := cmd.manpage() man := cmd.manpage()
os.write_file(os.join_path_single(path, cmd.full_name().replace(' ', '-') + '.1'), os.write_file(os.join_path_single(path, cmd.full_name().replace(' ', '-') + '.1'),
man)? man)!
for sub_cmd in cmd.commands { for sub_cmd in cmd.commands {
export_man_pages(sub_cmd, path)? export_man_pages(sub_cmd, path)!
} }
} }

View File

@ -1,7 +1,7 @@
module logs module logs
import cli import cli
import vieter_v.conf as vconf import conf as vconf
import client import client
import console import console
import time import time
@ -63,30 +63,30 @@ pub fn cmd() cli.Command {
flag: cli.FlagType.string flag: cli.FlagType.string
}, },
] ]
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
mut filter := BuildLogFilter{} mut filter := BuildLogFilter{}
limit := cmd.flags.get_int('limit')? limit := cmd.flags.get_int('limit')!
if limit != 0 { if limit != 0 {
filter.limit = u64(limit) filter.limit = u64(limit)
} }
offset := cmd.flags.get_int('offset')? offset := cmd.flags.get_int('offset')!
if offset != 0 { if offset != 0 {
filter.offset = u64(offset) filter.offset = u64(offset)
} }
target_id := cmd.flags.get_int('target')? target_id := cmd.flags.get_int('target')!
if target_id != 0 { if target_id != 0 {
filter.target = target_id filter.target = target_id
} }
tz_offset := time.offset() tz_offset := time.offset()
if cmd.flags.get_bool('today')? { if cmd.flags.get_bool('today')! {
today := time.now() today := time.now()
filter.after = time.new_time(time.Time{ filter.after = time.new_time(time.Time{
@ -98,12 +98,12 @@ pub fn cmd() cli.Command {
} }
// The -today flag overwrites any of the other date flags. // The -today flag overwrites any of the other date flags.
else { else {
day_str := cmd.flags.get_string('day')? day_str := cmd.flags.get_string('day')!
before_str := cmd.flags.get_string('before')? before_str := cmd.flags.get_string('before')!
after_str := cmd.flags.get_string('after')? after_str := cmd.flags.get_string('after')!
if day_str != '' { if day_str != '' {
day := time.parse_rfc3339(day_str)? day := time.parse_rfc3339(day_str)!
day_utc := time.new_time(time.Time{ day_utc := time.new_time(time.Time{
year: day.year year: day.year
month: day.month month: day.month
@ -118,24 +118,24 @@ pub fn cmd() cli.Command {
filter.before = day_utc.add_days(1) filter.before = day_utc.add_days(1)
} else { } else {
if before_str != '' { if before_str != '' {
filter.before = time.parse(before_str)?.add_seconds(-tz_offset) filter.before = time.parse(before_str)!.add_seconds(-tz_offset)
} }
if after_str != '' { if after_str != '' {
filter.after = time.parse(after_str)?.add_seconds(-tz_offset) filter.after = time.parse(after_str)!.add_seconds(-tz_offset)
} }
} }
} }
if cmd.flags.get_bool('failed')? { if cmd.flags.get_bool('failed')! {
filter.exit_codes = [ filter.exit_codes = [
'!0', '!0',
] ]
} }
raw := cmd.flags.get_bool('raw')? raw := cmd.flags.get_bool('raw')!
list(conf, filter, raw)? list(conf, filter, raw)!
} }
}, },
cli.Command{ cli.Command{
@ -143,12 +143,12 @@ pub fn cmd() cli.Command {
required_args: 1 required_args: 1
usage: 'id' usage: 'id'
description: 'Show all info for a specific build log.' description: 'Show all info for a specific build log.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
id := cmd.args[0].int() id := cmd.args[0].int()
info(conf, id)? info(conf, id)!
} }
}, },
cli.Command{ cli.Command{
@ -156,12 +156,12 @@ pub fn cmd() cli.Command {
required_args: 1 required_args: 1
usage: 'id' usage: 'id'
description: 'Output the content of a build log to stdout.' description: 'Output the content of a build log to stdout.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
id := cmd.args[0].int() id := cmd.args[0].int()
content(conf, id)? content(conf, id)!
} }
}, },
] ]
@ -169,46 +169,38 @@ pub fn cmd() cli.Command {
} }
// print_log_list prints a list of logs. // print_log_list prints a list of logs.
fn print_log_list(logs []BuildLog, raw bool) ? { fn print_log_list(logs []BuildLog, raw bool) ! {
data := logs.map([it.id.str(), it.target_id.str(), it.start_time.local().str(), data := logs.map([it.id.str(), it.target_id.str(), it.start_time.local().str(),
it.exit_code.str()]) it.exit_code.str()])
if raw { if raw {
println(console.tabbed_table(data)) println(console.tabbed_table(data))
} else { } else {
println(console.pretty_table(['id', 'target', 'start time', 'exit code'], data)?) println(console.pretty_table(['id', 'target', 'start time', 'exit code'], data)!)
} }
} }
// list prints a list of all build logs. // list prints a list of all build logs.
fn list(conf Config, filter BuildLogFilter, raw bool) ? { fn list(conf Config, filter BuildLogFilter, raw bool) ! {
c := client.new(conf.address, conf.api_key) c := client.new(conf.address, conf.api_key)
logs := c.get_build_logs(filter)?.data logs := c.get_build_logs(filter)!
print_log_list(logs, raw)? print_log_list(logs, raw)!
}
// list prints a list of all build logs for a given target.
fn list_for_target(conf Config, target_id int, raw bool) ? {
c := client.new(conf.address, conf.api_key)
logs := c.get_build_logs_for_target(target_id)?.data
print_log_list(logs, raw)?
} }
// info print the detailed info for a given build log. // info print the detailed info for a given build log.
fn info(conf Config, id int) ? { fn info(conf Config, id int) ! {
c := client.new(conf.address, conf.api_key) c := client.new(conf.address, conf.api_key)
log := c.get_build_log(id)?.data log := c.get_build_log(id)!
print(log) print(log)
} }
// content outputs the contents of the log file for a given build log to // content outputs the contents of the log file for a given build log to
// stdout. // stdout.
fn content(conf Config, id int) ? { fn content(conf Config, id int) ! {
c := client.new(conf.address, conf.api_key) c := client.new(conf.address, conf.api_key)
content := c.get_build_log_content(id)? content := c.get_build_log_content(id)!
println(content) println(content)
} }

View File

@ -11,11 +11,11 @@ pub fn cmd() cli.Command {
description: 'Generate all man pages & save them in the given directory.' description: 'Generate all man pages & save them in the given directory.'
usage: 'dir' usage: 'dir'
required_args: 1 required_args: 1
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
root := cmd.root() root := cmd.root()
os.mkdir_all(cmd.args[0])? os.mkdir_all(cmd.args[0])!
console.export_man_pages(root, cmd.args[0])? console.export_man_pages(root, cmd.args[0])!
} }
} }
} }

View File

@ -18,11 +18,11 @@ pub fn cmd() cli.Command {
default_value: ['5'] default_value: ['5']
}, },
] ]
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
ce := parse_expression(cmd.args.join(' '))? ce := parse_expression(cmd.args.join(' '))!
count := cmd.flags.get_int('count')? count := cmd.flags.get_int('count')!
for t in ce.next_n(time.now(), count)? { for t in ce.next_n(time.now(), count)! {
println(t) println(t)
} }
} }

View File

@ -1,34 +1,34 @@
module targets module targets
import client import client
import vieter_v.docker import docker
import os import os
import build import build
// build locally builds the target with the given id. // build locally builds the target with the given id.
fn build(conf Config, target_id int) ? { fn build(conf Config, target_id int, force bool) ! {
c := client.new(conf.address, conf.api_key) c := client.new(conf.address, conf.api_key)
target := c.get_target(target_id)? target := c.get_target(target_id)!
build_arch := os.uname().machine build_arch := os.uname().machine
println('Creating base image...') println('Creating base image...')
image_id := build.create_build_image(conf.base_image)? image_id := build.create_build_image(conf.base_image)!
println('Running build...') println('Running build...')
res := build.build_target(conf.address, conf.api_key, image_id, target)? res := build.build_target(conf.address, conf.api_key, image_id, target, force)!
println('Removing build image...') println('Removing build image...')
mut dd := docker.new_conn()? mut dd := docker.new_conn()!
defer { defer {
dd.close() or {} dd.close() or {}
} }
dd.remove_image(image_id)? dd.remove_image(image_id)!
println('Uploading logs to Vieter...') println('Uploading logs to Vieter...')
c.add_build_log(target.id, res.start_time, res.end_time, build_arch, res.exit_code, c.add_build_log(target.id, res.start_time, res.end_time, build_arch, res.exit_code,
res.logs)? res.logs)!
} }

View File

@ -1,7 +1,7 @@
module targets module targets
import cli import cli
import vieter_v.conf as vconf import conf as vconf
import cron.expression { parse_expression } import cron.expression { parse_expression }
import client { NewTarget } import client { NewTarget }
import console import console
@ -13,7 +13,7 @@ struct Config {
base_image string = 'archlinux:base-devel' base_image string = 'archlinux:base-devel'
} }
// cmd returns the cli submodule that handles the repos API interaction // cmd returns the cli submodule that handles the targets API interaction
pub fn cmd() cli.Command { pub fn cmd() cli.Command {
return cli.Command{ return cli.Command{
name: 'targets' name: 'targets'
@ -39,30 +39,30 @@ pub fn cmd() cli.Command {
flag: cli.FlagType.string flag: cli.FlagType.string
}, },
] ]
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
mut filter := TargetFilter{} mut filter := TargetFilter{}
limit := cmd.flags.get_int('limit')? limit := cmd.flags.get_int('limit')!
if limit != 0 { if limit != 0 {
filter.limit = u64(limit) filter.limit = u64(limit)
} }
offset := cmd.flags.get_int('offset')? offset := cmd.flags.get_int('offset')!
if offset != 0 { if offset != 0 {
filter.offset = u64(offset) filter.offset = u64(offset)
} }
repo := cmd.flags.get_string('repo')? repo := cmd.flags.get_string('repo')!
if repo != '' { if repo != '' {
filter.repo = repo filter.repo = repo
} }
raw := cmd.flags.get_bool('raw')? raw := cmd.flags.get_bool('raw')!
list(conf, filter, raw)? list(conf, filter, raw)!
} }
}, },
cli.Command{ cli.Command{
@ -82,21 +82,27 @@ pub fn cmd() cli.Command {
description: "Which branch to clone; only applies to kind 'git'." description: "Which branch to clone; only applies to kind 'git'."
flag: cli.FlagType.string flag: cli.FlagType.string
}, },
cli.Flag{
name: 'path'
description: 'Subdirectory inside Git repository to use.'
flag: cli.FlagType.string
},
] ]
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
t := NewTarget{ t := NewTarget{
kind: cmd.flags.get_string('kind')? kind: cmd.flags.get_string('kind')!
url: cmd.args[0] url: cmd.args[0]
repo: cmd.args[1] repo: cmd.args[1]
branch: cmd.flags.get_string('branch') or { '' } branch: cmd.flags.get_string('branch') or { '' }
path: cmd.flags.get_string('path') or { '' }
} }
raw := cmd.flags.get_bool('raw')? raw := cmd.flags.get_bool('raw')!
add(conf, t, raw)? add(conf, t, raw)!
} }
}, },
cli.Command{ cli.Command{
@ -104,11 +110,11 @@ pub fn cmd() cli.Command {
required_args: 1 required_args: 1
usage: 'id' usage: 'id'
description: 'Remove a target that matches the given id.' description: 'Remove a target that matches the given id.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
remove(conf, cmd.args[0])? remove(conf, cmd.args[0])!
} }
}, },
cli.Command{ cli.Command{
@ -116,11 +122,11 @@ pub fn cmd() cli.Command {
required_args: 1 required_args: 1
usage: 'id' usage: 'id'
description: 'Show detailed information for the target matching the id.' description: 'Show detailed information for the target matching the id.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
info(conf, cmd.args[0])? info(conf, cmd.args[0])!
} }
}, },
cli.Command{ cli.Command{
@ -159,10 +165,15 @@ pub fn cmd() cli.Command {
description: 'Kind of target.' description: 'Kind of target.'
flag: cli.FlagType.string flag: cli.FlagType.string
}, },
cli.Flag{
name: 'path'
description: 'Subdirectory inside Git repository to use.'
flag: cli.FlagType.string
},
] ]
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
found := cmd.flags.get_all_found() found := cmd.flags.get_all_found()
@ -170,11 +181,11 @@ pub fn cmd() cli.Command {
for f in found { for f in found {
if f.name != 'config-file' { if f.name != 'config-file' {
params[f.name] = f.get_string()? params[f.name] = f.get_string()!
} }
} }
patch(conf, cmd.args[0], params)? patch(conf, cmd.args[0], params)!
} }
}, },
cli.Command{ cli.Command{
@ -182,58 +193,82 @@ pub fn cmd() cli.Command {
required_args: 1 required_args: 1
usage: 'id' usage: 'id'
description: 'Build the target with the given id & publish it.' description: 'Build the target with the given id & publish it.'
execute: fn (cmd cli.Command) ? { flags: [
config_file := cmd.flags.get_string('config-file')? cli.Flag{
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? name: 'force'
description: 'Build the target without checking whether it needs to be renewed.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'remote'
description: 'Schedule the build on the server instead of running it locally.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'arch'
description: 'Architecture to schedule build for. Required when using -remote.'
flag: cli.FlagType.string
},
]
execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
build(conf, cmd.args[0].int())? remote := cmd.flags.get_bool('remote')!
force := cmd.flags.get_bool('force')!
target_id := cmd.args[0].int()
if remote {
arch := cmd.flags.get_string('arch')!
if arch == '' {
return error('When scheduling the build remotely, you have to specify an architecture.')
}
c := client.new(conf.address, conf.api_key)
c.queue_job(target_id, arch, force)!
} else {
build(conf, target_id, force)!
}
} }
}, },
] ]
} }
} }
// get_repo_by_prefix tries to find the repo with the given prefix in its
// ID. If multiple or none are found, an error is raised.
// list prints out a list of all repositories. // list prints out a list of all repositories.
fn list(conf Config, filter TargetFilter, raw bool) ? { fn list(conf Config, filter TargetFilter, raw bool) ! {
c := client.new(conf.address, conf.api_key) c := client.new(conf.address, conf.api_key)
repos := c.get_targets(filter)? targets := c.get_targets(filter)!
data := repos.map([it.id.str(), it.kind, it.url, it.repo]) data := targets.map([it.id.str(), it.kind, it.url, it.repo])
if raw { if raw {
println(console.tabbed_table(data)) println(console.tabbed_table(data))
} else { } else {
println(console.pretty_table(['id', 'kind', 'url', 'repo'], data)?) println(console.pretty_table(['id', 'kind', 'url', 'repo'], data)!)
} }
} }
// add adds a new repository to the server's list. // add adds a new target to the server's list.
fn add(conf Config, t &NewTarget, raw bool) ? { fn add(conf Config, t &NewTarget, raw bool) ! {
c := client.new(conf.address, conf.api_key) c := client.new(conf.address, conf.api_key)
res := c.add_target(t)? target_id := c.add_target(t)!
if raw { if raw {
println(res.data) println(target_id)
} else { } else {
println('Target added with id $res.data') println('Target added with id $target_id')
} }
} }
// remove removes a repository from the server's list. // remove removes a target from the server's list.
fn remove(conf Config, id string) ? { fn remove(conf Config, id string) ! {
id_int := id.int() c := client.new(conf.address, conf.api_key)
c.remove_target(id.int())!
if id_int != 0 {
c := client.new(conf.address, conf.api_key)
res := c.remove_target(id_int)?
println(res.message)
}
} }
// patch patches a given repository with the provided params. // patch patches a given target with the provided params.
fn patch(conf Config, id string, params map[string]string) ? { fn patch(conf Config, id string, params map[string]string) ! {
// We check the cron expression first because it's useless to send an // We check the cron expression first because it's useless to send an
// invalid one to the server. // invalid one to the server.
if 'schedule' in params && params['schedule'] != '' { if 'schedule' in params && params['schedule'] != '' {
@ -242,24 +277,13 @@ fn patch(conf Config, id string, params map[string]string) ? {
} }
} }
id_int := id.int()
if id_int != 0 {
c := client.new(conf.address, conf.api_key)
res := c.patch_target(id_int, params)?
println(res.message)
}
}
// info shows detailed information for a given repo.
fn info(conf Config, id string) ? {
id_int := id.int()
if id_int == 0 {
return
}
c := client.new(conf.address, conf.api_key) c := client.new(conf.address, conf.api_key)
repo := c.get_target(id_int)? c.patch_target(id.int(), params)!
println(repo) }
// info shows detailed information for a given target.
fn info(conf Config, id string) ! {
c := client.new(conf.address, conf.api_key)
target := c.get_target(id.int())!
println(target)
} }

View File

@ -1,7 +1,7 @@
module cron module cron
import cli import cli
import vieter_v.conf as vconf import conf as vconf
struct Config { struct Config {
pub: pub:
@ -22,11 +22,11 @@ pub fn cmd() cli.Command {
return cli.Command{ return cli.Command{
name: 'cron' name: 'cron'
description: 'Start the cron service that periodically runs builds.' description: 'Start the cron service that periodically runs builds.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
cron(conf)? cron(conf)!
} }
} }
} }

View File

@ -8,7 +8,7 @@ import os
const log_file_name = 'vieter.cron.log' const log_file_name = 'vieter.cron.log'
// cron starts a cron daemon & starts periodically scheduling builds. // cron starts a cron daemon & starts periodically scheduling builds.
pub fn cron(conf Config) ? { pub fn cron(conf Config) ! {
// Configure logger // Configure logger
log_level := log.level_from_tag(conf.log_level) or { log_level := log.level_from_tag(conf.log_level) or {
return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
@ -27,7 +27,7 @@ pub fn cron(conf Config) ? {
} }
mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce, mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)? conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)!
d.run() d.run()
} }

View File

@ -79,7 +79,7 @@ fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
mut status := 0 mut status := 0
res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(), res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.target) or { &sb.target, false) or {
d.ldebug('build_target error: $err.msg()') d.ldebug('build_target error: $err.msg()')
status = 1 status = 1

View File

@ -6,7 +6,7 @@ import datatypes { MinHeap }
import cron.expression { CronExpression, parse_expression } import cron.expression { CronExpression, parse_expression }
import math import math
import build import build
import vieter_v.docker import docker
import os import os
import client import client
import models { Target } import models { Target }
@ -53,7 +53,7 @@ mut:
// init_daemon initializes a new Daemon object. It renews the targets & // init_daemon initializes a new Daemon object. It renews the targets &
// populates the build queue for the first time. // populates the build queue for the first time.
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon { pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) !Daemon {
mut d := Daemon{ mut d := Daemon{
client: client.new(address, api_key) client: client.new(address, api_key)
base_image: base_image base_image: base_image
@ -207,7 +207,7 @@ fn (mut d Daemon) renew_queue() {
// For some reason, using // For some reason, using
// ```v // ```v
// for d.queue.len() > 0 && d.queue.peek() ?.timestamp < now { // for d.queue.len() > 0 && d.queue.peek() !.timestamp < now {
//``` //```
// here causes the function to prematurely just exit, without any errors or anything, very weird // here causes the function to prematurely just exit, without any errors or anything, very weird
// https://github.com/vlang/v/issues/14042 // https://github.com/vlang/v/issues/14042

View File

@ -3,33 +3,33 @@ module daemon
import log import log
// log reate a log message with the given level // log reate a log message with the given level
pub fn (mut d Daemon) log(msg &string, level log.Level) { pub fn (mut d Daemon) log(msg string, level log.Level) {
lock d.logger { lock d.logger {
d.logger.send_output(msg, level) d.logger.send_output(msg, level)
} }
} }
// lfatal create a log message with the fatal level // lfatal create a log message with the fatal level
pub fn (mut d Daemon) lfatal(msg &string) { pub fn (mut d Daemon) lfatal(msg string) {
d.log(msg, log.Level.fatal) d.log(msg, log.Level.fatal)
} }
// lerror create a log message with the error level // lerror create a log message with the error level
pub fn (mut d Daemon) lerror(msg &string) { pub fn (mut d Daemon) lerror(msg string) {
d.log(msg, log.Level.error) d.log(msg, log.Level.error)
} }
// lwarn create a log message with the warn level // lwarn create a log message with the warn level
pub fn (mut d Daemon) lwarn(msg &string) { pub fn (mut d Daemon) lwarn(msg string) {
d.log(msg, log.Level.warn) d.log(msg, log.Level.warn)
} }
// linfo create a log message with the info level // linfo create a log message with the info level
pub fn (mut d Daemon) linfo(msg &string) { pub fn (mut d Daemon) linfo(msg string) {
d.log(msg, log.Level.info) d.log(msg, log.Level.info)
} }
// ldebug create a log message with the debug level // ldebug create a log message with the debug level
pub fn (mut d Daemon) ldebug(msg &string) { pub fn (mut d Daemon) ldebug(msg string) {
d.log(msg, log.Level.debug) d.log(msg, log.Level.debug)
} }

View File

@ -12,7 +12,7 @@ pub struct CronExpression {
// next calculates the earliest time this cron expression is valid. It will // next calculates the earliest time this cron expression is valid. It will
// always pick a moment in the future, even if ref matches completely up to the // always pick a moment in the future, even if ref matches completely up to the
// minute. This function conciously does not take gap years into account. // minute. This function conciously does not take gap years into account.
pub fn (ce &CronExpression) next(ref time.Time) ?time.Time { pub fn (ce &CronExpression) next(ref time.Time) !time.Time {
// If the given ref matches the next cron occurence up to the minute, it // If the given ref matches the next cron occurence up to the minute, it
// will return that value. Because we always want to return a value in the // will return that value. Because we always want to return a value in the
// future, we artifically shift the ref 60 seconds to make sure we always // future, we artifically shift the ref 60 seconds to make sure we always
@ -117,159 +117,20 @@ pub fn (ce &CronExpression) next(ref time.Time) ?time.Time {
// next_from_now returns the result of ce.next(ref) where ref is the result of // next_from_now returns the result of ce.next(ref) where ref is the result of
// time.now(). // time.now().
pub fn (ce &CronExpression) next_from_now() ?time.Time { pub fn (ce &CronExpression) next_from_now() !time.Time {
return ce.next(time.now()) return ce.next(time.now())
} }
// next_n returns the n next occurences of the expression, given a starting // next_n returns the n next occurences of the expression, given a starting
// time. // time.
pub fn (ce &CronExpression) next_n(ref time.Time, n int) ?[]time.Time { pub fn (ce &CronExpression) next_n(ref time.Time, n int) ![]time.Time {
mut times := []time.Time{cap: n} mut times := []time.Time{cap: n}
times << ce.next(ref)? times << ce.next(ref)!
for i in 1 .. n { for i in 1 .. n {
times << ce.next(times[i - 1])? times << ce.next(times[i - 1])!
} }
return times return times
} }
// parse_range parses a given string into a range of sorted integers, if
// possible.
fn parse_range(s string, min int, max int, mut bitv []bool) ? {
mut start := min
mut end := max
mut interval := 1
exps := s.split('/')
if exps.len > 2 {
return error('Invalid expression.')
}
if exps[0] != '*' {
dash_parts := exps[0].split('-')
if dash_parts.len > 2 {
return error('Invalid expression.')
}
start = dash_parts[0].int()
// The builtin parsing functions return zero if the string can't be
// parsed into a number, so we have to explicitely check whether they
// actually entered zero or if it's an invalid number.
if start == 0 && dash_parts[0] != '0' {
return error('Invalid number.')
}
// Check whether the start value is out of range
if start < min || start > max {
return error('Out of range.')
}
if dash_parts.len == 2 {
end = dash_parts[1].int()
if end == 0 && dash_parts[1] != '0' {
return error('Invalid number.')
}
if end < start || end > max {
return error('Out of range.')
}
}
}
if exps.len > 1 {
interval = exps[1].int()
// interval being zero is always invalid, but we want to check why
// it's invalid for better error messages.
if interval == 0 {
if exps[1] != '0' {
return error('Invalid number.')
} else {
return error('Step size zero not allowed.')
}
}
if interval > max - min {
return error('Step size too large.')
}
}
// Here, s solely consists of a number, so that's the only value we
// should return.
else if exps[0] != '*' && !exps[0].contains('-') {
bitv[start - min] = true
return
}
for start <= end {
bitv[start - min] = true
start += interval
}
}
// bitv_to_ints converts a bit vector into an array containing the
// corresponding values.
fn bitv_to_ints(bitv []bool, min int) []int {
mut out := []int{}
for i in 0 .. bitv.len {
if bitv[i] {
out << min + i
}
}
return out
}
// parse_part parses a given part of a cron expression & returns the
// corresponding array of ints.
fn parse_part(s string, min int, max int) ?[]int {
mut bitv := []bool{len: max - min + 1, init: false}
for range in s.split(',') {
parse_range(range, min, max, mut bitv)?
}
return bitv_to_ints(bitv, min)
}
// parse_expression parses an entire cron expression string into a
// CronExpression object, if possible.
pub fn parse_expression(exp string) ?CronExpression {
// The filter allows for multiple spaces between parts
mut parts := exp.split(' ').filter(it != '')
if parts.len < 2 || parts.len > 4 {
return error('Expression must contain between 2 and 4 space-separated parts.')
}
// For ease of use, we allow the user to only specify as many parts as they
// need.
for parts.len < 4 {
parts << '*'
}
mut part_results := [][]int{}
mins := [0, 0, 1, 1]
maxs := [59, 23, 31, 12]
// This for loop allows us to more clearly propagate the error to the user.
for i, min in mins {
part_results << parse_part(parts[i], min, maxs[i]) or {
return error('An error occurred with part $i: $err.msg()')
}
}
return CronExpression{
minutes: part_results[0]
hours: part_results[1]
days: part_results[2]
months: part_results[3]
}
}

View File

@ -0,0 +1,146 @@
module expression
import bitfield
// parse_range parses a given string into a range of sorted integers. Its
// result is a BitField with set bits for all numbers in the result.
fn parse_range(s string, min int, max int) !bitfield.BitField {
mut start := min
mut end := max
mut interval := 1
mut bf := bitfield.new(max - min + 1)
exps := s.split('/')
if exps.len > 2 {
return error('Invalid expression.')
}
if exps[0] != '*' {
dash_parts := exps[0].split('-')
if dash_parts.len > 2 {
return error('Invalid expression.')
}
start = dash_parts[0].int()
// The builtin parsing functions return zero if the string can't be
// parsed into a number, so we have to explicitely check whether they
// actually entered zero or if it's an invalid number.
if start == 0 && dash_parts[0] != '0' {
return error('Invalid number.')
}
// Check whether the start value is out of range
if start < min || start > max {
return error('Out of range.')
}
if dash_parts.len == 2 {
end = dash_parts[1].int()
if end == 0 && dash_parts[1] != '0' {
return error('Invalid number.')
}
if end < start || end > max {
return error('Out of range.')
}
}
}
if exps.len > 1 {
interval = exps[1].int()
// interval being zero is always invalid, but we want to check why
// it's invalid for better error messages.
if interval == 0 {
if exps[1] != '0' {
return error('Invalid number.')
} else {
return error('Step size zero not allowed.')
}
}
if interval > max - min {
return error('Step size too large.')
}
}
// Here, s solely consists of a number, so that's the only value we
// should return.
else if exps[0] != '*' && !exps[0].contains('-') {
bf.set_bit(start - min)
return bf
}
for start <= end {
bf.set_bit(start - min)
start += interval
}
return bf
}
// bf_to_ints takes a BitField and converts it into the expected list of actual
// integers.
fn bf_to_ints(bf bitfield.BitField, min int) []int {
mut out := []int{}
for i in 0 .. bf.get_size() {
if bf.get_bit(i) == 1 {
out << min + i
}
}
return out
}
// parse_part parses a given part of a cron expression & returns the
// corresponding array of ints.
fn parse_part(s string, min int, max int) ![]int {
mut bf := bitfield.new(max - min + 1)
for range in s.split(',') {
bf2 := parse_range(range, min, max)!
bf = bitfield.bf_or(bf, bf2)
}
return bf_to_ints(bf, min)
}
// parse_expression parses an entire cron expression string into a
// CronExpression object, if possible.
pub fn parse_expression(exp string) !CronExpression {
// The filter allows for multiple spaces between parts
mut parts := exp.split(' ').filter(it != '')
if parts.len < 2 || parts.len > 4 {
return error('Expression must contain between 2 and 4 space-separated parts.')
}
// For ease of use, we allow the user to only specify as many parts as they
// need.
for parts.len < 4 {
parts << '*'
}
mut part_results := [][]int{}
mins := [0, 0, 1, 1]
maxs := [59, 23, 31, 12]
// This for loop allows us to more clearly propagate the error to the user.
for i, min in mins {
part_results << parse_part(parts[i], min, maxs[i]) or {
return error('An error occurred with part $i: $err.msg()')
}
}
return CronExpression{
minutes: part_results[0]
hours: part_results[1]
days: part_results[2]
months: part_results[3]
}
}

View File

@ -3,96 +3,87 @@ module expression
// parse_range_error returns the returned error message. If the result is '', // parse_range_error returns the returned error message. If the result is '',
// that means the function didn't error. // that means the function didn't error.
fn parse_range_error(s string, min int, max int) string { fn parse_range_error(s string, min int, max int) string {
mut bitv := []bool{len: max - min + 1, init: false} parse_range(s, min, max) or { return err.msg }
parse_range(s, min, max, mut bitv) or { return err.msg }
return '' return ''
} }
// =====parse_range===== // =====parse_range=====
fn test_range_star_range() ? { fn test_range_star_range() ! {
mut bitv := []bool{len: 6, init: false} bf := parse_range('*', 0, 5)!
parse_range('*', 0, 5, mut bitv)?
assert bitv == [true, true, true, true, true, true] assert bf_to_ints(bf, 0) == [0, 1, 2, 3, 4, 5]
} }
fn test_range_number() ? { fn test_range_number() ! {
mut bitv := []bool{len: 6, init: false} bf := parse_range('4', 0, 5)!
parse_range('4', 0, 5, mut bitv)?
assert bitv_to_ints(bitv, 0) == [4] assert bf_to_ints(bf, 0) == [4]
} }
fn test_range_number_too_large() ? { fn test_range_number_too_large() ! {
assert parse_range_error('10', 0, 6) == 'Out of range.' assert parse_range_error('10', 0, 6) == 'Out of range.'
} }
fn test_range_number_too_small() ? { fn test_range_number_too_small() ! {
assert parse_range_error('0', 2, 6) == 'Out of range.' assert parse_range_error('0', 2, 6) == 'Out of range.'
} }
fn test_range_number_invalid() ? { fn test_range_number_invalid() ! {
assert parse_range_error('x', 0, 6) == 'Invalid number.' assert parse_range_error('x', 0, 6) == 'Invalid number.'
} }
fn test_range_step_star_1() ? { fn test_range_step_star_1() ! {
mut bitv := []bool{len: 21, init: false} bf := parse_range('*/4', 0, 20)!
parse_range('*/4', 0, 20, mut bitv)?
assert bitv_to_ints(bitv, 0) == [0, 4, 8, 12, 16, 20] assert bf_to_ints(bf, 0) == [0, 4, 8, 12, 16, 20]
} }
fn test_range_step_star_2() ? { fn test_range_step_star_2() ! {
mut bitv := []bool{len: 8, init: false} bf := parse_range('*/3', 1, 8)!
parse_range('*/3', 1, 8, mut bitv)?
assert bitv_to_ints(bitv, 1) == [1, 4, 7] assert bf_to_ints(bf, 1) == [1, 4, 7]
} }
fn test_range_step_star_too_large() ? { fn test_range_step_star_too_large() ! {
assert parse_range_error('*/21', 0, 20) == 'Step size too large.' assert parse_range_error('*/21', 0, 20) == 'Step size too large.'
} }
fn test_range_step_zero() ? { fn test_range_step_zero() ! {
assert parse_range_error('*/0', 0, 20) == 'Step size zero not allowed.' assert parse_range_error('*/0', 0, 20) == 'Step size zero not allowed.'
} }
fn test_range_step_number() ? { fn test_range_step_number() ! {
mut bitv := []bool{len: 21, init: false} bf := parse_range('5/4', 2, 22)!
parse_range('5/4', 2, 22, mut bitv)?
assert bitv_to_ints(bitv, 2) == [5, 9, 13, 17, 21] assert bf_to_ints(bf, 2) == [5, 9, 13, 17, 21]
} }
fn test_range_step_number_too_large() ? { fn test_range_step_number_too_large() ! {
assert parse_range_error('10/4', 0, 5) == 'Out of range.' assert parse_range_error('10/4', 0, 5) == 'Out of range.'
} }
fn test_range_step_number_too_small() ? { fn test_range_step_number_too_small() ! {
assert parse_range_error('2/4', 5, 10) == 'Out of range.' assert parse_range_error('2/4', 5, 10) == 'Out of range.'
} }
fn test_range_dash() ? { fn test_range_dash() ! {
mut bitv := []bool{len: 10, init: false} bf := parse_range('4-8', 0, 9)!
parse_range('4-8', 0, 9, mut bitv)?
assert bitv_to_ints(bitv, 0) == [4, 5, 6, 7, 8] assert bf_to_ints(bf, 0) == [4, 5, 6, 7, 8]
} }
fn test_range_dash_step() ? { fn test_range_dash_step() ! {
mut bitv := []bool{len: 10, init: false} bf := parse_range('4-8/2', 0, 9)!
parse_range('4-8/2', 0, 9, mut bitv)?
assert bitv_to_ints(bitv, 0) == [4, 6, 8] assert bf_to_ints(bf, 0) == [4, 6, 8]
} }
// =====parse_part===== // =====parse_part=====
fn test_part_single() ? { fn test_part_single() ! {
assert parse_part('*', 0, 5)? == [0, 1, 2, 3, 4, 5] assert parse_part('*', 0, 5)! == [0, 1, 2, 3, 4, 5]
} }
fn test_part_multiple() ? { fn test_part_multiple() ! {
assert parse_part('*/2,2/3', 1, 8)? == [1, 2, 3, 5, 7, 8] assert parse_part('*/2,2/3', 1, 8)! == [1, 2, 3, 5, 7, 8]
} }

View File

@ -2,12 +2,12 @@ module expression
import time { parse } import time { parse }
fn util_test_time(exp string, t1_str string, t2_str string) ? { fn util_test_time(exp string, t1_str string, t2_str string) ! {
ce := parse_expression(exp)? ce := parse_expression(exp)!
t1 := parse(t1_str)? t1 := parse(t1_str)!
t2 := parse(t2_str)? t2 := parse(t2_str)!
t3 := ce.next(t1)? t3 := ce.next(t1)!
assert t2.year == t3.year assert t2.year == t3.year
assert t2.month == t3.month assert t2.month == t3.month
@ -16,19 +16,19 @@ fn util_test_time(exp string, t1_str string, t2_str string) ? {
assert t2.minute == t3.minute assert t2.minute == t3.minute
} }
fn test_next_simple() ? { fn test_next_simple() ! {
// Very simple // Very simple
util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')? util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')!
// Overlap to next day // Overlap to next day
util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')? util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')!
util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')? util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')!
util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')? util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')!
// Overlap to next month // Overlap to next month
util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')? util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')!
// Overlap to next year // Overlap to next year
util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')? util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')!
} }

View File

@ -17,17 +17,21 @@ const (
$embed_file('migrations/001-initial/up.sql'), $embed_file('migrations/001-initial/up.sql'),
$embed_file('migrations/002-rename-to-targets/up.sql'), $embed_file('migrations/002-rename-to-targets/up.sql'),
$embed_file('migrations/003-target-url-type/up.sql'), $embed_file('migrations/003-target-url-type/up.sql'),
$embed_file('migrations/004-nullable-branch/up.sql'),
$embed_file('migrations/005-repo-path/up.sql'),
] ]
migrations_down = [ migrations_down = [
$embed_file('migrations/001-initial/down.sql'), $embed_file('migrations/001-initial/down.sql'),
$embed_file('migrations/002-rename-to-targets/down.sql'), $embed_file('migrations/002-rename-to-targets/down.sql'),
$embed_file('migrations/003-target-url-type/down.sql'), $embed_file('migrations/003-target-url-type/down.sql'),
$embed_file('migrations/004-nullable-branch/down.sql'),
$embed_file('migrations/005-repo-path/down.sql'),
] ]
) )
// init initializes a database & adds the correct tables. // init initializes a database & adds the correct tables.
pub fn init(db_path string) ?VieterDb { pub fn init(db_path string) !VieterDb {
conn := sqlite.connect(db_path)? conn := sqlite.connect(db_path)!
sql conn { sql conn {
create table MigrationVersion create table MigrationVersion
@ -60,7 +64,7 @@ pub fn init(db_path string) ?VieterDb {
res := conn.exec_none(part) res := conn.exec_none(part)
if res != sqlite.sqlite_done { if res != sqlite.sqlite_done {
return error('An error occurred while applying migration $version_num') return error('An error occurred while applying migration $version_num: SQLite error code $res')
} }
} }

View File

@ -84,6 +84,8 @@ pub fn (db &VieterDb) add_build_log(log BuildLog) int {
insert log into BuildLog insert log into BuildLog
} }
// Here, this does work because a log doesn't contain any foreign keys,
// meaning the ORM only has to do a single add
inserted_id := db.conn.last_id() as int inserted_id := db.conn.last_id() as int
return inserted_id return inserted_id

View File

@ -0,0 +1,26 @@
-- This down won't really work because it'll throw NOT NULL errors, but I'm
-- just putting it here for future reference (still not sure whether I'm even
-- gonna use these)
PRAGMA foreign_keys=off;
BEGIN TRANSACTION;
ALTER TABLE Target RENAME TO _Target_old;
CREATE TABLE Target (
id INTEGER PRIMARY KEY,
url TEXT NOT NULL,
branch TEXT NOT NULL,
repo TEXT NOT NULL,
schedule TEXT,
kind TEXT NOT NULL DEFAULT 'git'
);
INSERT INTO Target (id, url, branch, repo, schedule, kind)
SELECT id, url, branch, repo, schedule, kind FROM _Target_old;
DROP TABLE _Target_old;
COMMIT;
PRAGMA foreign_keys=on;

View File

@ -0,0 +1,23 @@
PRAGMA foreign_keys=off;
BEGIN TRANSACTION;
ALTER TABLE Target RENAME TO _Target_old;
CREATE TABLE Target (
id INTEGER PRIMARY KEY,
url TEXT NOT NULL,
branch TEXT,
repo TEXT NOT NULL,
schedule TEXT,
kind TEXT NOT NULL DEFAULT 'git'
);
INSERT INTO Target (id, url, branch, repo, schedule, kind)
SELECT id, url, branch, repo, schedule, kind FROM _Target_old;
DROP TABLE _Target_old;
COMMIT;
PRAGMA foreign_keys=on;

View File

@ -0,0 +1 @@
ALTER TABLE Target DROP COLUMN path;

View File

@ -0,0 +1 @@
ALTER TABLE Target ADD COLUMN path TEXT;

View File

@ -38,14 +38,17 @@ pub fn (db &VieterDb) get_target(target_id int) ?Target {
} }
// add_target inserts the given target into the database. // add_target inserts the given target into the database.
pub fn (db &VieterDb) add_target(repo Target) int { pub fn (db &VieterDb) add_target(target Target) int {
sql db.conn { sql db.conn {
insert repo into Target insert target into Target
} }
inserted_id := db.conn.last_id() as int // ID of inserted target is the largest id
inserted_target := sql db.conn {
select from Target order by id desc limit 1
}
return inserted_id return inserted_target.id
} }
// delete_target deletes the target with the given id from the database. // delete_target deletes the target with the given id from the database.

View File

@ -9,12 +9,18 @@ import console.schedule
import console.man import console.man
import console.aur import console.aur
import cron import cron
import agent
fn main() { fn main() {
// Stop buffering output so logs always show up immediately
unsafe {
C.setbuf(C.stdout, 0)
}
mut app := cli.Command{ mut app := cli.Command{
name: 'vieter' name: 'vieter'
description: 'Vieter is a lightweight implementation of an Arch repository server.' description: 'Vieter is a lightweight implementation of an Arch repository server.'
version: '0.4.0' version: '0.5.0-rc.1'
flags: [ flags: [
cli.Flag{ cli.Flag{
flag: cli.FlagType.string flag: cli.FlagType.string
@ -40,6 +46,7 @@ fn main() {
schedule.cmd(), schedule.cmd(),
man.cmd(), man.cmd(),
aur.cmd(), aur.cmd(),
agent.cmd(),
] ]
} }
app.setup() app.setup()

View File

@ -0,0 +1,18 @@
module models
pub struct BuildConfig {
pub:
target_id int
kind string
url string
branch string
path string
repo string
base_image string
force bool
}
// str return a single-line string representation of a build log
pub fn (c BuildConfig) str() string {
return '{ target: $c.target_id, kind: $c.kind, url: $c.url, branch: $c.branch, path: $c.path, repo: $c.repo, base_image: $c.base_image, force: $c.force }'
}

View File

@ -28,27 +28,45 @@ pub mut:
repo string [nonull] repo string [nonull]
// Cron schedule describing how frequently to build the repo. // Cron schedule describing how frequently to build the repo.
schedule string schedule string
// Subdirectory in the Git repository to cd into
path string
// On which architectures the package is allowed to be built. In reality, // On which architectures the package is allowed to be built. In reality,
// this controls which builders will periodically build the image. // this controls which agents will build this package when scheduled.
arch []TargetArch [fkey: 'target_id'] arch []TargetArch [fkey: 'target_id']
} }
// str returns a string representation. // str returns a string representation.
pub fn (gr &Target) str() string { pub fn (t &Target) str() string {
mut parts := [ mut parts := [
'id: $gr.id', 'id: $t.id',
'kind: $gr.kind', 'kind: $t.kind',
'url: $gr.url', 'url: $t.url',
'branch: $gr.branch', 'branch: $t.branch',
'repo: $gr.repo', 'path: $t.path',
'schedule: $gr.schedule', 'repo: $t.repo',
'arch: ${gr.arch.map(it.value).join(', ')}', 'schedule: $t.schedule',
'arch: ${t.arch.map(it.value).join(', ')}',
] ]
str := parts.join('\n') str := parts.join('\n')
return str return str
} }
// as_build_config converts a Target into a BuildConfig, given some extra
// needed information.
pub fn (t &Target) as_build_config(base_image string, force bool) BuildConfig {
return BuildConfig{
target_id: t.id
kind: t.kind
url: t.url
branch: t.branch
path: t.path
repo: t.repo
base_image: base_image
force: force
}
}
[params] [params]
pub struct TargetFilter { pub struct TargetFilter {
pub mut: pub mut:

View File

@ -0,0 +1,5 @@
# package
This module handles both parsing the published Arch tarballs & the contents of
their `.PKGINFO` files, as well as generating the contents of the database
archives' `desc` & `files` files.

View File

@ -0,0 +1,103 @@
module package
// format_entry returns a string properly formatted to be added to a desc file.
[inline]
fn format_entry(key string, value string) string {
return '\n%$key%\n$value\n'
}
// full_name returns the properly formatted name for the package, including
// version & architecture
pub fn (pkg &Pkg) full_name() string {
p := pkg.info
return '$p.name-$p.version-$p.arch'
}
// filename returns the correct filename of the package file
pub fn (pkg &Pkg) filename() string {
ext := match pkg.compression {
0 { '.tar' }
1 { '.tar.gz' }
6 { '.tar.xz' }
14 { '.tar.zst' }
else { panic("Another compression code shouldn't be possible. Faulty code: $pkg.compression") }
}
return '${pkg.full_name()}.pkg$ext'
}
// to_desc returns a desc file valid string representation
pub fn (pkg &Pkg) to_desc() !string {
p := pkg.info
// filename
mut desc := '%FILENAME%\n$pkg.filename()\n'
desc += format_entry('NAME', p.name)
desc += format_entry('BASE', p.base)
desc += format_entry('VERSION', p.version)
if p.description.len > 0 {
desc += format_entry('DESC', p.description)
}
if p.groups.len > 0 {
desc += format_entry('GROUPS', p.groups.join_lines())
}
desc += format_entry('CSIZE', p.csize.str())
desc += format_entry('ISIZE', p.size.str())
sha256sum := pkg.checksum()!
desc += format_entry('SHA256SUM', sha256sum)
// TODO add pgpsig stuff
if p.url.len > 0 {
desc += format_entry('URL', p.url)
}
if p.licenses.len > 0 {
desc += format_entry('LICENSE', p.licenses.join_lines())
}
desc += format_entry('ARCH', p.arch)
desc += format_entry('BUILDDATE', p.build_date.str())
desc += format_entry('PACKAGER', p.packager)
if p.replaces.len > 0 {
desc += format_entry('REPLACES', p.replaces.join_lines())
}
if p.conflicts.len > 0 {
desc += format_entry('CONFLICTS', p.conflicts.join_lines())
}
if p.provides.len > 0 {
desc += format_entry('PROVIDES', p.provides.join_lines())
}
if p.depends.len > 0 {
desc += format_entry('DEPENDS', p.depends.join_lines())
}
if p.optdepends.len > 0 {
desc += format_entry('OPTDEPENDS', p.optdepends.join_lines())
}
if p.makedepends.len > 0 {
desc += format_entry('MAKEDEPENDS', p.makedepends.join_lines())
}
if p.checkdepends.len > 0 {
desc += format_entry('CHECKDEPENDS', p.checkdepends.join_lines())
}
return '$desc\n'
}
// to_files returns a files file valid string representation
pub fn (pkg &Pkg) to_files() string {
return '%FILES%\n$pkg.files.join_lines()\n'
}

View File

@ -43,12 +43,12 @@ pub mut:
} }
// checksum calculates the sha256 hash of the package // checksum calculates the sha256 hash of the package
pub fn (p &Pkg) checksum() ?string { pub fn (p &Pkg) checksum() !string {
return util.hash_file(p.path) return util.hash_file(p.path)
} }
// parse_pkg_info_string parses a PkgInfo object from a string // parse_pkg_info_string parses a PkgInfo object from a string
fn parse_pkg_info_string(pkg_info_str &string) ?PkgInfo { fn parse_pkg_info_string(pkg_info_str &string) !PkgInfo {
mut pkg_info := PkgInfo{} mut pkg_info := PkgInfo{}
// Iterate over the entire string // Iterate over the entire string
@ -101,7 +101,7 @@ fn parse_pkg_info_string(pkg_info_str &string) ?PkgInfo {
// read_pkg_archive extracts the file list & .PKGINFO contents from an archive // read_pkg_archive extracts the file list & .PKGINFO contents from an archive
// NOTE: this command only supports zstd-, xz- & gzip-compressed tarballs. // NOTE: this command only supports zstd-, xz- & gzip-compressed tarballs.
pub fn read_pkg_archive(pkg_path string) ?Pkg { pub fn read_pkg_archive(pkg_path string) !Pkg {
if !os.is_file(pkg_path) { if !os.is_file(pkg_path) {
return error("'$pkg_path' doesn't exist or isn't a file.") return error("'$pkg_path' doesn't exist or isn't a file.")
} }
@ -159,7 +159,7 @@ pub fn read_pkg_archive(pkg_path string) ?Pkg {
pkg_text := unsafe { buf.vstring_with_len(size).clone() } pkg_text := unsafe { buf.vstring_with_len(size).clone() }
pkg_info = parse_pkg_info_string(pkg_text)? pkg_info = parse_pkg_info_string(pkg_text)!
} else { } else {
C.archive_read_data_skip(a) C.archive_read_data_skip(a)
} }
@ -174,104 +174,3 @@ pub fn read_pkg_archive(pkg_path string) ?Pkg {
compression: compression_code compression: compression_code
} }
} }
// format_entry returns a string properly formatted to be added to a desc file.
fn format_entry(key string, value string) string {
return '\n%$key%\n$value\n'
}
// full_name returns the properly formatted name for the package, including
// version & architecture
pub fn (pkg &Pkg) full_name() string {
p := pkg.info
return '$p.name-$p.version-$p.arch'
}
// filename returns the correct filename of the package file
pub fn (pkg &Pkg) filename() string {
ext := match pkg.compression {
0 { '.tar' }
1 { '.tar.gz' }
6 { '.tar.xz' }
14 { '.tar.zst' }
else { panic("Another compression code shouldn't be possible. Faulty code: $pkg.compression") }
}
return '${pkg.full_name()}.pkg$ext'
}
// to_desc returns a desc file valid string representation
pub fn (pkg &Pkg) to_desc() ?string {
p := pkg.info
// filename
mut desc := '%FILENAME%\n$pkg.filename()\n'
desc += format_entry('NAME', p.name)
desc += format_entry('BASE', p.base)
desc += format_entry('VERSION', p.version)
if p.description.len > 0 {
desc += format_entry('DESC', p.description)
}
if p.groups.len > 0 {
desc += format_entry('GROUPS', p.groups.join_lines())
}
desc += format_entry('CSIZE', p.csize.str())
desc += format_entry('ISIZE', p.size.str())
sha256sum := pkg.checksum()?
desc += format_entry('SHA256SUM', sha256sum)
// TODO add pgpsig stuff
if p.url.len > 0 {
desc += format_entry('URL', p.url)
}
if p.licenses.len > 0 {
desc += format_entry('LICENSE', p.licenses.join_lines())
}
desc += format_entry('ARCH', p.arch)
desc += format_entry('BUILDDATE', p.build_date.str())
desc += format_entry('PACKAGER', p.packager)
if p.replaces.len > 0 {
desc += format_entry('REPLACES', p.replaces.join_lines())
}
if p.conflicts.len > 0 {
desc += format_entry('CONFLICTS', p.conflicts.join_lines())
}
if p.provides.len > 0 {
desc += format_entry('PROVIDES', p.provides.join_lines())
}
if p.depends.len > 0 {
desc += format_entry('DEPENDS', p.depends.join_lines())
}
if p.optdepends.len > 0 {
desc += format_entry('OPTDEPENDS', p.optdepends.join_lines())
}
if p.makedepends.len > 0 {
desc += format_entry('MAKEDEPENDS', p.makedepends.join_lines())
}
if p.checkdepends.len > 0 {
desc += format_entry('CHECKDEPENDS', p.checkdepends.join_lines())
}
return '$desc\n'
}
// to_files returns a files file valid string representation
pub fn (pkg &Pkg) to_files() string {
return '%FILES%\n$pkg.files.join_lines()\n'
}

43
src/repo/README.md 100644
View File

@ -0,0 +1,43 @@
# repo
This module manages the contents of the various repositories stored within a
Vieter instance.
## Terminology
* Arch-repository (arch-repo): specific architecture of a given repository. This is what
Pacman actually uses as a repository, and contains its own `.db` & `.files`
files.
* Repository (repo): a collection of arch-repositories. A single repository can
contain packages of different architectures, with each package being stored
in that specific architecture' arch-repository.
* Repository group (repo-group): a collection of repositories. Each Vieter
instance consists of a single repository group, which manages all underlying
repositories & arch-repositories.
## Arch-repository layout
An arch-repository (aka a regular Pacman repository) consists of a directory
with the following files (`{repo}` should be replaced with the name of the
repository):
* One or more package directories. These directories follow the naming scheme
`${pkgname}-${pkgver}-${pkgrel}`. Each of these directories contains two
files, `desc` & `files`. The `desc` file is a list of the package's metadata,
while `files` contains a list of all files that the package contains. The
latter is used when using `pacman -F`.
* `{repo}.db` & `{repo}.db.tar.gz`: the database file of the repository. This
is just a compressed tarball of all package directories, but only their
`desc` files. Both these files should have the same content (`repo-add`
creates a symlink, but Vieter just serves the same file for both routes)
* `{repo}.files` & `{repo}.files.tar.gz`: the same as the `.db` file, but this
also contains the `files` files, instead of just the `desc` files.
## Filesystem layout
The repository part of Vieter consists of two directories. One is the `repos`
directory inside the configured `data_dir`, while the other is the configured
`pkg_dir`. `repos` contains only the repository group, while `pkg_dir` contains
the actual package archives. `pkg_dir` is the directory that can take up a
significant amount of memory, while `repos` solely consists of small text
files.

View File

@ -29,7 +29,7 @@ pub:
} }
// new creates a new RepoGroupManager & creates the directories as needed // new creates a new RepoGroupManager & creates the directories as needed
pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager { pub fn new(repos_dir string, pkg_dir string, default_arch string) !RepoGroupManager {
if !os.is_dir(repos_dir) { if !os.is_dir(repos_dir) {
os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') } os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') }
} }
@ -49,27 +49,27 @@ pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupMana
// pkg archive. It's a wrapper around add_pkg_in_repo that parses the archive // pkg archive. It's a wrapper around add_pkg_in_repo that parses the archive
// file, passes the result to add_pkg_in_repo, and hard links the archive to // file, passes the result to add_pkg_in_repo, and hard links the archive to
// the right subdirectories in r.pkg_dir if it was successfully added. // the right subdirectories in r.pkg_dir if it was successfully added.
pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult { pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) !RepoAddResult {
pkg := package.read_pkg_archive(pkg_path) or { pkg := package.read_pkg_archive(pkg_path) or {
return error('Failed to read package file: $err.msg()') return error('Failed to read package file: $err.msg()')
} }
archs := r.add_pkg_in_repo(repo, pkg)? archs := r.add_pkg_in_repo(repo, pkg)!
// If the add was successful, we move the file to the packages directory // If the add was successful, we move the file to the packages directory
for arch in archs { for arch in archs {
repo_pkg_path := os.real_path(os.join_path(r.pkg_dir, repo, arch)) repo_pkg_path := os.real_path(os.join_path(r.pkg_dir, repo, arch))
dest_path := os.join_path_single(repo_pkg_path, pkg.filename()) dest_path := os.join_path_single(repo_pkg_path, pkg.filename())
os.mkdir_all(repo_pkg_path)? os.mkdir_all(repo_pkg_path)!
// We create hard links so that "any" arch packages aren't stored // We create hard links so that "any" arch packages aren't stored
// multiple times // multiple times
os.link(pkg_path, dest_path)? os.link(pkg_path, dest_path)!
} }
// After linking, we can remove the original file // After linking, we can remove the original file
os.rm(pkg_path)? os.rm(pkg_path)!
return RepoAddResult{ return RepoAddResult{
name: pkg.info.name name: pkg.info.name
@ -85,11 +85,11 @@ pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?Re
// r.default_arch. If this arch-repo doesn't exist yet, it is created. If the // r.default_arch. If this arch-repo doesn't exist yet, it is created. If the
// architecture isn't 'any', the package is only added to the specific // architecture isn't 'any', the package is only added to the specific
// architecture. // architecture.
fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]string { fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]string {
// A package not of arch 'any' can be handled easily by adding it to the // A package not of arch 'any' can be handled easily by adding it to the
// respective repo // respective repo
if pkg.info.arch != 'any' { if pkg.info.arch != 'any' {
r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)? r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)!
return [pkg.info.arch] return [pkg.info.arch]
} }
@ -104,7 +104,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin
// If this is the first package that's added to the repo, the directory // If this is the first package that's added to the repo, the directory
// won't exist yet // won't exist yet
if os.exists(repo_dir) { if os.exists(repo_dir) {
arch_repos = os.ls(repo_dir)? arch_repos = os.ls(repo_dir)!
} }
// The default_arch should always be updated when a package with arch 'any' // The default_arch should always be updated when a package with arch 'any'
@ -118,7 +118,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin
// not know which arch-repositories did succeed in adding the package, if // not know which arch-repositories did succeed in adding the package, if
// any. // any.
for arch in arch_repos { for arch in arch_repos {
r.add_pkg_in_arch_repo(repo, arch, pkg)? r.add_pkg_in_arch_repo(repo, arch, pkg)!
} }
return arch_repos return arch_repos
@ -128,24 +128,24 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin
// arch-repo. It records the package's data in the arch-repo's desc & files // arch-repo. It records the package's data in the arch-repo's desc & files
// files, and afterwards updates the db & files archives to reflect these // files, and afterwards updates the db & files archives to reflect these
// changes. // changes.
fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ? { fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ! {
pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version') pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version')
// Remove the previous version of the package, if present // Remove the previous version of the package, if present
r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)? r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)!
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') } os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') }
os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()?) or { os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()!) or {
os.rmdir_all(pkg_dir)? os.rmdir_all(pkg_dir)!
return error('Failed to write desc file.') return error('Failed to write desc file.')
} }
os.write_file(os.join_path_single(pkg_dir, 'files'), pkg.to_files()) or { os.write_file(os.join_path_single(pkg_dir, 'files'), pkg.to_files()) or {
os.rmdir_all(pkg_dir)? os.rmdir_all(pkg_dir)!
return error('Failed to write files file.') return error('Failed to write files file.')
} }
r.sync(repo, arch)? r.sync(repo, arch)!
} }

View File

@ -5,7 +5,7 @@ import os
// remove_pkg_from_arch_repo removes a package from an arch-repo's database. It // remove_pkg_from_arch_repo removes a package from an arch-repo's database. It
// returns false if the package wasn't present in the database. It also // returns false if the package wasn't present in the database. It also
// optionally re-syncs the repo archives. // optionally re-syncs the repo archives.
pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) ?bool { pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) !bool {
repo_dir := os.join_path(r.repos_dir, repo, arch) repo_dir := os.join_path(r.repos_dir, repo, arch)
// If the repository doesn't exist yet, the result is automatically false // If the repository doesn't exist yet, the result is automatically false
@ -15,7 +15,7 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string,
// We iterate over every directory in the repo dir // We iterate over every directory in the repo dir
// TODO filter so we only check directories // TODO filter so we only check directories
for d in os.ls(repo_dir)? { for d in os.ls(repo_dir)! {
// Because a repository only allows a single version of each package, // Because a repository only allows a single version of each package,
// we need only compare whether the name of the package is the same, // we need only compare whether the name of the package is the same,
// not the version. // not the version.
@ -25,22 +25,22 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string,
// We lock the mutex here to prevent other routines from creating a // We lock the mutex here to prevent other routines from creating a
// new archive while we remove an entry // new archive while we remove an entry
lock r.mutex { lock r.mutex {
os.rmdir_all(os.join_path_single(repo_dir, d))? os.rmdir_all(os.join_path_single(repo_dir, d))!
} }
// Also remove the package archive // Also remove the package archive
repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch) repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch)
archives := os.ls(repo_pkg_dir)?.filter(it.split('-')#[..-3].join('-') == name) archives := os.ls(repo_pkg_dir)!.filter(it.split('-')#[..-3].join('-') == name)
for archive_name in archives { for archive_name in archives {
full_path := os.join_path_single(repo_pkg_dir, archive_name) full_path := os.join_path_single(repo_pkg_dir, archive_name)
os.rm(full_path)? os.rm(full_path)!
} }
// Sync the db archives if requested // Sync the db archives if requested
if sync { if sync {
r.sync(repo, arch)? r.sync(repo, arch)!
} }
return true return true
@ -51,7 +51,7 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string,
} }
// remove_arch_repo removes an arch-repo & its packages. // remove_arch_repo removes an arch-repo & its packages.
pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) ?bool { pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) !bool {
repo_dir := os.join_path(r.repos_dir, repo, arch) repo_dir := os.join_path(r.repos_dir, repo, arch)
// If the repository doesn't exist yet, the result is automatically false // If the repository doesn't exist yet, the result is automatically false
@ -59,16 +59,16 @@ pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) ?bool {
return false return false
} }
os.rmdir_all(repo_dir)? os.rmdir_all(repo_dir)!
pkg_dir := os.join_path(r.pkg_dir, repo, arch) pkg_dir := os.join_path(r.pkg_dir, repo, arch)
os.rmdir_all(pkg_dir)? os.rmdir_all(pkg_dir)!
return true return true
} }
// remove_repo removes a repo & its packages. // remove_repo removes a repo & its packages.
pub fn (r &RepoGroupManager) remove_repo(repo string) ?bool { pub fn (r &RepoGroupManager) remove_repo(repo string) !bool {
repo_dir := os.join_path_single(r.repos_dir, repo) repo_dir := os.join_path_single(r.repos_dir, repo)
// If the repository doesn't exist yet, the result is automatically false // If the repository doesn't exist yet, the result is automatically false
@ -76,10 +76,10 @@ pub fn (r &RepoGroupManager) remove_repo(repo string) ?bool {
return false return false
} }
os.rmdir_all(repo_dir)? os.rmdir_all(repo_dir)!
pkg_dir := os.join_path_single(r.pkg_dir, repo) pkg_dir := os.join_path_single(r.pkg_dir, repo)
os.rmdir_all(pkg_dir)? os.rmdir_all(pkg_dir)!
return true return true
} }

View File

@ -32,7 +32,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
} }
// sync regenerates the repository archive files. // sync regenerates the repository archive files.
fn (r &RepoGroupManager) sync(repo string, arch string) ? { fn (r &RepoGroupManager) sync(repo string, arch string) ! {
subrepo_path := os.join_path(r.repos_dir, repo, arch) subrepo_path := os.join_path(r.repos_dir, repo, arch)
lock r.mutex { lock r.mutex {
@ -54,7 +54,7 @@ fn (r &RepoGroupManager) sync(repo string, arch string) ? {
C.archive_write_open_filename(a_files, &char(files_path.str)) C.archive_write_open_filename(a_files, &char(files_path.str))
// Iterate over each directory // Iterate over each directory
for d in os.ls(subrepo_path)?.filter(os.is_dir(os.join_path_single(subrepo_path, for d in os.ls(subrepo_path)!.filter(os.is_dir(os.join_path_single(subrepo_path,
it))) { it))) {
// desc // desc
mut inner_path := os.join_path_single(d, 'desc') mut inner_path := os.join_path_single(d, 'desc')

View File

@ -0,0 +1,49 @@
module server
import web
import web.response { new_data_response, new_response }
// v1_poll_job_queue allows agents to poll for new build jobs.
['/api/v1/jobs/poll'; auth; get]
fn (mut app App) v1_poll_job_queue() web.Result {
arch := app.query['arch'] or {
return app.json(.bad_request, new_response('Missing arch query arg.'))
}
max_str := app.query['max'] or {
return app.json(.bad_request, new_response('Missing max query arg.'))
}
max := max_str.int()
mut out := app.job_queue.pop_n(arch, max).map(it.config)
return app.json(.ok, new_data_response(out))
}
// v1_queue_job allows queueing a new one-time build job for the given target.
['/api/v1/jobs/queue'; auth; post]
fn (mut app App) v1_queue_job() web.Result {
target_id := app.query['target'] or {
return app.json(.bad_request, new_response('Missing target query arg.'))
}.int()
arch := app.query['arch'] or {
return app.json(.bad_request, new_response('Missing arch query arg.'))
}
if arch == '' {
app.json(.bad_request, new_response('Empty arch query arg.'))
}
force := 'force' in app.query
target := app.db.get_target(target_id) or {
return app.json(.bad_request, new_response('Unknown target id.'))
}
app.job_queue.insert(target: target, arch: arch, single: true, now: true, force: force) or {
return app.status(.internal_server_error)
}
return app.status(.ok)
}

View File

@ -1,7 +1,6 @@
module server module server
import web import web
import net.http
import net.urllib import net.urllib
import web.response { new_data_response, new_response } import web.response { new_data_response, new_response }
import db import db
@ -15,7 +14,7 @@ import models { BuildLog, BuildLogFilter }
['/api/v1/logs'; auth; get] ['/api/v1/logs'; auth; get]
fn (mut app App) v1_get_logs() web.Result { fn (mut app App) v1_get_logs() web.Result {
filter := models.from_params<BuildLogFilter>(app.query) or { filter := models.from_params<BuildLogFilter>(app.query) or {
return app.json(http.Status.bad_request, new_response('Invalid query parameters.')) return app.json(.bad_request, new_response('Invalid query parameters.'))
} }
logs := app.db.get_build_logs(filter) logs := app.db.get_build_logs(filter)
@ -25,7 +24,7 @@ fn (mut app App) v1_get_logs() web.Result {
// v1_get_single_log returns the build log with the given id. // v1_get_single_log returns the build log with the given id.
['/api/v1/logs/:id'; auth; get] ['/api/v1/logs/:id'; auth; get]
fn (mut app App) v1_get_single_log(id int) web.Result { fn (mut app App) v1_get_single_log(id int) web.Result {
log := app.db.get_build_log(id) or { return app.not_found() } log := app.db.get_build_log(id) or { return app.status(.not_found) }
return app.json(.ok, new_data_response(log)) return app.json(.ok, new_data_response(log))
} }
@ -33,7 +32,7 @@ fn (mut app App) v1_get_single_log(id int) web.Result {
// v1_get_log_content returns the actual build log file for the given id. // v1_get_log_content returns the actual build log file for the given id.
['/api/v1/logs/:id/content'; auth; get] ['/api/v1/logs/:id/content'; auth; get]
fn (mut app App) v1_get_log_content(id int) web.Result { fn (mut app App) v1_get_log_content(id int) web.Result {
log := app.db.get_build_log(id) or { return app.not_found() } log := app.db.get_build_log(id) or { return app.status(.not_found) }
file_name := log.start_time.custom_format('YYYY-MM-DD_HH-mm-ss') file_name := log.start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
full_path := os.join_path(app.conf.data_dir, logs_dir_name, log.target_id.str(), log.arch, full_path := os.join_path(app.conf.data_dir, logs_dir_name, log.target_id.str(), log.arch,
file_name) file_name)
@ -43,9 +42,9 @@ fn (mut app App) v1_get_log_content(id int) web.Result {
// parse_query_time unescapes an HTTP query parameter & tries to parse it as a // parse_query_time unescapes an HTTP query parameter & tries to parse it as a
// time.Time struct. // time.Time struct.
fn parse_query_time(query string) ?time.Time { fn parse_query_time(query string) !time.Time {
unescaped := urllib.query_unescape(query)? unescaped := urllib.query_unescape(query)!
t := time.parse(unescaped)? t := time.parse(unescaped)!
return t return t
} }
@ -57,25 +56,25 @@ fn (mut app App) v1_post_log() web.Result {
start_time_int := app.query['startTime'].int() start_time_int := app.query['startTime'].int()
if start_time_int == 0 { if start_time_int == 0 {
return app.json(http.Status.bad_request, new_response('Invalid or missing start time.')) return app.json(.bad_request, new_response('Invalid or missing start time.'))
} }
start_time := time.unix(start_time_int) start_time := time.unix(start_time_int)
end_time_int := app.query['endTime'].int() end_time_int := app.query['endTime'].int()
if end_time_int == 0 { if end_time_int == 0 {
return app.json(http.Status.bad_request, new_response('Invalid or missing end time.')) return app.json(.bad_request, new_response('Invalid or missing end time.'))
} }
end_time := time.unix(end_time_int) end_time := time.unix(end_time_int)
if 'exitCode' !in app.query { if 'exitCode' !in app.query {
return app.json(http.Status.bad_request, new_response('Missing exit code.')) return app.json(.bad_request, new_response('Missing exit code.'))
} }
exit_code := app.query['exitCode'].int() exit_code := app.query['exitCode'].int()
if 'arch' !in app.query { if 'arch' !in app.query {
return app.json(http.Status.bad_request, new_response("Missing parameter 'arch'.")) return app.json(.bad_request, new_response("Missing parameter 'arch'."))
} }
arch := app.query['arch'] arch := app.query['arch']
@ -83,7 +82,7 @@ fn (mut app App) v1_post_log() web.Result {
target_id := app.query['target'].int() target_id := app.query['target'].int()
if !app.db.target_exists(target_id) { if !app.db.target_exists(target_id) {
return app.json(http.Status.bad_request, new_response('Unknown target.')) return app.json(.bad_request, new_response('Unknown target.'))
} }
// Store log in db // Store log in db
@ -105,7 +104,7 @@ fn (mut app App) v1_post_log() web.Result {
os.mkdir_all(repo_logs_dir) or { os.mkdir_all(repo_logs_dir) or {
app.lerror("Couldn't create dir '$repo_logs_dir'.") app.lerror("Couldn't create dir '$repo_logs_dir'.")
return app.json(http.Status.internal_server_error, new_response('An error occured while processing the request.')) return app.status(.internal_server_error)
} }
} }
@ -117,10 +116,10 @@ fn (mut app App) v1_post_log() web.Result {
util.reader_to_file(mut app.reader, length.int(), full_path) or { util.reader_to_file(mut app.reader, length.int(), full_path) or {
app.lerror('An error occured while receiving logs: $err.msg()') app.lerror('An error occured while receiving logs: $err.msg()')
return app.json(http.Status.internal_server_error, new_response('Failed to upload logs.')) return app.status(.internal_server_error)
} }
} else { } else {
return app.status(http.Status.length_required) return app.status(.length_required)
} }
return app.json(.ok, new_data_response(log_id)) return app.json(.ok, new_data_response(log_id))

View File

@ -1,7 +1,6 @@
module server module server
import web import web
import net.http
import web.response { new_data_response, new_response } import web.response { new_data_response, new_response }
import db import db
import models { Target, TargetArch, TargetFilter } import models { Target, TargetArch, TargetFilter }
@ -10,19 +9,19 @@ import models { Target, TargetArch, TargetFilter }
['/api/v1/targets'; auth; get] ['/api/v1/targets'; auth; get]
fn (mut app App) v1_get_targets() web.Result { fn (mut app App) v1_get_targets() web.Result {
filter := models.from_params<TargetFilter>(app.query) or { filter := models.from_params<TargetFilter>(app.query) or {
return app.json(http.Status.bad_request, new_response('Invalid query parameters.')) return app.json(.bad_request, new_response('Invalid query parameters.'))
} }
repos := app.db.get_targets(filter) targets := app.db.get_targets(filter)
return app.json(.ok, new_data_response(repos)) return app.json(.ok, new_data_response(targets))
} }
// v1_get_single_target returns the information for a single target. // v1_get_single_target returns the information for a single target.
['/api/v1/targets/:id'; auth; get] ['/api/v1/targets/:id'; auth; get]
fn (mut app App) v1_get_single_target(id int) web.Result { fn (mut app App) v1_get_single_target(id int) web.Result {
repo := app.db.get_target(id) or { return app.not_found() } target := app.db.get_target(id) or { return app.status(.not_found) }
return app.json(.ok, new_data_response(repo)) return app.json(.ok, new_data_response(target))
} }
// v1_post_target creates a new target from the provided query string. // v1_post_target creates a new target from the provided query string.
@ -30,30 +29,36 @@ fn (mut app App) v1_get_single_target(id int) web.Result {
fn (mut app App) v1_post_target() web.Result { fn (mut app App) v1_post_target() web.Result {
mut params := app.query.clone() mut params := app.query.clone()
// If a repo is created without specifying the arch, we assume it's meant // If a target is created without specifying the arch, we assume it's meant
// for the default architecture. // for the default architecture.
if 'arch' !in params { if 'arch' !in params || params['arch'] == '' {
params['arch'] = app.conf.default_arch params['arch'] = app.conf.default_arch
} }
new_repo := models.from_params<Target>(params) or { mut new_target := models.from_params<Target>(params) or {
return app.json(http.Status.bad_request, new_response(err.msg())) return app.json(.bad_request, new_response(err.msg()))
} }
// Ensure someone doesn't submit an invalid kind // Ensure someone doesn't submit an invalid kind
if new_repo.kind !in models.valid_kinds { if new_target.kind !in models.valid_kinds {
return app.json(http.Status.bad_request, new_response('Invalid kind.')) return app.json(.bad_request, new_response('Invalid kind.'))
} }
id := app.db.add_target(new_repo) id := app.db.add_target(new_target)
new_target.id = id
return app.json(http.Status.ok, new_data_response(id)) // Add the target to the job queue
// TODO return better error here if it's the cron schedule that's incorrect
app.job_queue.insert_all(new_target) or { return app.status(.internal_server_error) }
return app.json(.ok, new_data_response(id))
} }
// v1_delete_target removes a given target from the server's list. // v1_delete_target removes a given target from the server's list.
['/api/v1/targets/:id'; auth; delete] ['/api/v1/targets/:id'; auth; delete]
fn (mut app App) v1_delete_target(id int) web.Result { fn (mut app App) v1_delete_target(id int) web.Result {
app.db.delete_target(id) app.db.delete_target(id)
app.job_queue.invalidate(id)
return app.status(.ok) return app.status(.ok)
} }
@ -69,5 +74,10 @@ fn (mut app App) v1_patch_target(id int) web.Result {
app.db.update_target_archs(id, arch_objs) app.db.update_target_archs(id, arch_objs)
} }
return app.status(.ok) target := app.db.get_target(id) or { return app.status(.internal_server_error) }
app.job_queue.invalidate(id)
app.job_queue.insert_all(target) or { return app.status(.internal_server_error) }
return app.json(.ok, new_data_response(target))
} }

View File

@ -1,16 +1,18 @@
module server module server
import cli import cli
import vieter_v.conf as vconf import conf as vconf
struct Config { struct Config {
pub: pub:
log_level string = 'WARN' log_level string = 'WARN'
pkg_dir string pkg_dir string
data_dir string data_dir string
api_key string api_key string
default_arch string default_arch string
port int = 8000 global_schedule string = '0 3'
port int = 8000
base_image string = 'archlinux:base-devel'
} }
// cmd returns the cli submodule that handles starting the server // cmd returns the cli submodule that handles starting the server
@ -18,11 +20,11 @@ pub fn cmd() cli.Command {
return cli.Command{ return cli.Command{
name: 'server' name: 'server'
description: 'Start the Vieter server.' description: 'Start the Vieter server.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ! {
config_file := cmd.flags.get_string('config-file')? config_file := cmd.flags.get_string('config-file')!
conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)? conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)!
server(conf)? server(conf)!
} }
} }
} }

View File

@ -6,6 +6,8 @@ import log
import repo import repo
import util import util
import db import db
import build { BuildJobQueue }
import cron.expression
const ( const (
log_file_name = 'vieter.log' log_file_name = 'vieter.log'
@ -20,16 +22,39 @@ pub:
conf Config [required; web_global] conf Config [required; web_global]
pub mut: pub mut:
repo repo.RepoGroupManager [required; web_global] repo repo.RepoGroupManager [required; web_global]
db db.VieterDb // Keys are the various architectures for packages
job_queue BuildJobQueue [required; web_global]
db db.VieterDb
}
// init_job_queue populates a fresh job queue with all the targets currently
// stored in the database.
fn (mut app App) init_job_queue() ! {
// Initialize build queues
mut targets := app.db.get_targets(limit: 25)
mut i := u64(0)
for targets.len > 0 {
for target in targets {
app.job_queue.insert_all(target)!
}
i += 25
targets = app.db.get_targets(limit: 25, offset: i)
}
} }
// server starts the web server & starts listening for requests // server starts the web server & starts listening for requests
pub fn server(conf Config) ? { pub fn server(conf Config) ! {
// Prevent using 'any' as the default arch // Prevent using 'any' as the default arch
if conf.default_arch == 'any' { if conf.default_arch == 'any' {
util.exit_with_message(1, "'any' is not allowed as the value for default_arch.") util.exit_with_message(1, "'any' is not allowed as the value for default_arch.")
} }
global_ce := expression.parse_expression(conf.global_schedule) or {
util.exit_with_message(1, 'Invalid global cron expression: $err.msg()')
}
// Configure logger // Configure logger
log_level := log.level_from_tag(conf.log_level) or { log_level := log.level_from_tag(conf.log_level) or {
util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
@ -71,11 +96,17 @@ pub fn server(conf Config) ? {
util.exit_with_message(1, 'Failed to initialize database: $err.msg()') util.exit_with_message(1, 'Failed to initialize database: $err.msg()')
} }
web.run(&App{ mut app := &App{
logger: logger logger: logger
api_key: conf.api_key api_key: conf.api_key
conf: conf conf: conf
repo: repo repo: repo
db: db db: db
}, conf.port) job_queue: build.new_job_queue(global_ce, conf.base_image)
}
app.init_job_queue() or {
util.exit_with_message(1, 'Failed to inialize job queue: $err.msg()')
}
web.run(app, conf.port)
} }

View File

@ -5,7 +5,7 @@ import io
import os import os
// reader_to_writer tries to consume the entire reader & write it to the writer. // reader_to_writer tries to consume the entire reader & write it to the writer.
pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ? { pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ! {
mut buf := []u8{len: 10 * 1024} mut buf := []u8{len: 10 * 1024}
for { for {
@ -21,8 +21,8 @@ pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ? {
} }
// reader_to_file writes the contents of a BufferedReader to a file // reader_to_file writes the contents of a BufferedReader to a file
pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? { pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ! {
mut file := os.create(path)? mut file := os.create(path)!
defer { defer {
file.close() file.close()
} }
@ -69,11 +69,11 @@ pub fn match_array_in_array<T>(a1 []T, a2 []T) int {
// read_until_separator consumes an io.Reader until it encounters some // read_until_separator consumes an io.Reader until it encounters some
// separator array. The data read is stored inside the provided res array. // separator array. The data read is stored inside the provided res array.
pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ? { pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ! {
mut buf := []u8{len: sep.len} mut buf := []u8{len: sep.len}
for { for {
c := reader.read(mut buf)? c := reader.read(mut buf)!
res << buf[..c] res << buf[..c]
match_len := match_array_in_array(buf[..c], sep) match_len := match_array_in_array(buf[..c], sep)
@ -84,7 +84,7 @@ pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ? {
if match_len > 0 { if match_len > 0 {
match_left := sep.len - match_len match_left := sep.len - match_len
c2 := reader.read(mut buf[..match_left])? c2 := reader.read(mut buf[..match_left])!
res << buf[..c2] res << buf[..c2]
if buf[..c2] == sep[match_len..] { if buf[..c2] == sep[match_len..] {

View File

@ -23,7 +23,7 @@ pub fn exit_with_message(code int, msg string) {
} }
// hash_file returns the sha256 hash of a given file // hash_file returns the sha256 hash of a given file
pub fn hash_file(path &string) ?string { pub fn hash_file(path &string) !string {
file := os.open(path) or { return error('Failed to open file.') } file := os.open(path) or { return error('Failed to open file.') }
mut sha256sum := sha256.new() mut sha256sum := sha256.new()
@ -39,7 +39,7 @@ pub fn hash_file(path &string) ?string {
// This function never actually fails, but returns an option to follow // This function never actually fails, but returns an option to follow
// the Writer interface. // the Writer interface.
sha256sum.write(buf[..bytes_read])? sha256sum.write(buf[..bytes_read])!
} }
return sha256sum.checksum().hex() return sha256sum.checksum().hex()

View File

@ -3,33 +3,33 @@ module web
import log import log
// log reate a log message with the given level // log reate a log message with the given level
pub fn (mut ctx Context) log(msg &string, level log.Level) { pub fn (mut ctx Context) log(msg string, level log.Level) {
lock ctx.logger { lock ctx.logger {
ctx.logger.send_output(msg, level) ctx.logger.send_output(msg, level)
} }
} }
// lfatal create a log message with the fatal level // lfatal create a log message with the fatal level
pub fn (mut ctx Context) lfatal(msg &string) { pub fn (mut ctx Context) lfatal(msg string) {
ctx.log(msg, log.Level.fatal) ctx.log(msg, log.Level.fatal)
} }
// lerror create a log message with the error level // lerror create a log message with the error level
pub fn (mut ctx Context) lerror(msg &string) { pub fn (mut ctx Context) lerror(msg string) {
ctx.log(msg, log.Level.error) ctx.log(msg, log.Level.error)
} }
// lwarn create a log message with the warn level // lwarn create a log message with the warn level
pub fn (mut ctx Context) lwarn(msg &string) { pub fn (mut ctx Context) lwarn(msg string) {
ctx.log(msg, log.Level.warn) ctx.log(msg, log.Level.warn)
} }
// linfo create a log message with the info level // linfo create a log message with the info level
pub fn (mut ctx Context) linfo(msg &string) { pub fn (mut ctx Context) linfo(msg string) {
ctx.log(msg, log.Level.info) ctx.log(msg, log.Level.info)
} }
// ldebug create a log message with the debug level // ldebug create a log message with the debug level
pub fn (mut ctx Context) ldebug(msg &string) { pub fn (mut ctx Context) ldebug(msg string) {
ctx.log(msg, log.Level.debug) ctx.log(msg, log.Level.debug)
} }

View File

@ -8,7 +8,7 @@ import net.http
const attrs_to_ignore = ['auth'] const attrs_to_ignore = ['auth']
// Parsing function attributes for methods and path. // Parsing function attributes for methods and path.
fn parse_attrs(name string, attrs []string) ?([]http.Method, string) { fn parse_attrs(name string, attrs []string) !([]http.Method, string) {
if attrs.len == 0 { if attrs.len == 0 {
return [http.Method.get], '/$name' return [http.Method.get], '/$name'
} }
@ -61,7 +61,7 @@ fn parse_query_from_url(url urllib.URL) map[string]string {
} }
// Extract form data from an HTTP request. // Extract form data from an HTTP request.
fn parse_form_from_request(request http.Request) ?(map[string]string, map[string][]http.FileData) { fn parse_form_from_request(request http.Request) !(map[string]string, map[string][]http.FileData) {
mut form := map[string]string{} mut form := map[string]string{}
mut files := map[string][]http.FileData{} mut files := map[string][]http.FileData{}
if request.method in methods_with_form { if request.method in methods_with_form {

View File

@ -24,7 +24,7 @@ pub:
pub mut: pub mut:
// TCP connection to client. // TCP connection to client.
// But beware, do not store it for further use, after request processing web will close connection. // But beware, do not store it for further use, after request processing web will close connection.
conn &net.TcpConn conn &net.TcpConn = unsafe { nil }
// Gives access to a shared logger object // Gives access to a shared logger object
logger shared log.Log logger shared log.Log
// time.ticks() from start of web connection handle. // time.ticks() from start of web connection handle.
@ -67,20 +67,20 @@ struct Route {
pub fn (ctx Context) before_request() {} pub fn (ctx Context) before_request() {}
// send_string writes the given string to the TCP connection socket. // send_string writes the given string to the TCP connection socket.
fn (mut ctx Context) send_string(s string) ? { fn (mut ctx Context) send_string(s string) ! {
ctx.conn.write(s.bytes())? ctx.conn.write(s.bytes())!
} }
// send_reader reads at most `size` bytes from the given reader & writes them // send_reader reads at most `size` bytes from the given reader & writes them
// to the TCP connection socket. Internally, a 10KB buffer is used, to avoid // to the TCP connection socket. Internally, a 10KB buffer is used, to avoid
// having to store all bytes in memory at once. // having to store all bytes in memory at once.
fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ? { fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ! {
mut buf := []u8{len: 10_000} mut buf := []u8{len: 10_000}
mut bytes_left := size mut bytes_left := size
// Repeat as long as the stream still has data // Repeat as long as the stream still has data
for bytes_left > 0 { for bytes_left > 0 {
bytes_read := reader.read(mut buf)? bytes_read := reader.read(mut buf)!
bytes_left -= u64(bytes_read) bytes_left -= u64(bytes_read)
mut to_write := bytes_read mut to_write := bytes_read
@ -96,20 +96,20 @@ fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ? {
// send_custom_response sends the given http.Response to the client. It can be // send_custom_response sends the given http.Response to the client. It can be
// used to overwrite the Context object & send a completely custom // used to overwrite the Context object & send a completely custom
// http.Response instead. // http.Response instead.
fn (mut ctx Context) send_custom_response(resp &http.Response) ? { fn (mut ctx Context) send_custom_response(resp &http.Response) ! {
ctx.send_string(resp.bytestr())? ctx.send_string(resp.bytestr())!
} }
// send_response_header constructs a valid HTTP response with an empty body & // send_response_header constructs a valid HTTP response with an empty body &
// sends it to the client. // sends it to the client.
pub fn (mut ctx Context) send_response_header() ? { pub fn (mut ctx Context) send_response_header() ! {
mut resp := http.new_response( mut resp := http.new_response(
header: ctx.header.join(headers_close) header: ctx.header.join(headers_close)
) )
resp.header.add(.content_type, ctx.content_type) resp.header.add(.content_type, ctx.content_type)
resp.set_status(ctx.status) resp.set_status(ctx.status)
ctx.send_custom_response(resp)? ctx.send_custom_response(resp)!
} }
// send is a convenience function for sending the HTTP response with an empty // send is a convenience function for sending the HTTP response with an empty
@ -260,13 +260,6 @@ pub fn (mut ctx Context) redirect(url string) Result {
return Result{} return Result{}
} }
// not_found Send an not_found response
pub fn (mut ctx Context) not_found() Result {
ctx.send_custom_response(http_404) or {}
return Result{}
}
interface DbInterface { interface DbInterface {
db voidptr db voidptr
} }

View File

@ -4,11 +4,11 @@ data_dir = "data"
pkg_dir = "data/pkgs" pkg_dir = "data/pkgs"
log_level = "DEBUG" log_level = "DEBUG"
default_arch = "x86_64" default_arch = "x86_64"
arch = "x86_64"
address = "http://localhost:8000" address = "http://localhost:8000"
global_schedule = '* *' # global_schedule = '* *'
api_update_frequency = 2 api_update_frequency = 2
image_rebuild_frequency = 1 image_rebuild_frequency = 1
max_concurrent_builds = 3 max_concurrent_builds = 3