diff --git a/.clang-format b/.clang-format
deleted file mode 100644
index 2e6afb4..0000000
--- a/.clang-format
+++ /dev/null
@@ -1,4 +0,0 @@
-# To stay consistent with the V formatting style, we use tabs
-UseTab: Always
-IndentWidth: 4
-TabWidth: 4
diff --git a/.editorconfig b/.editorconfig
index e9c1e63..e23a3c7 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -5,5 +5,6 @@ root = true
end_of_line = lf
insert_final_newline = true
-[*.{v,c,h}]
+[*.v]
+# vfmt wants it :(
indent_style = tab
diff --git a/.gitignore b/.gitignore
index daeb3d3..4d9f94f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,4 @@
-vieter.c
+*.c
/data/
# Build artifacts
@@ -26,8 +26,4 @@ gdb.txt
# Generated docs
_docs/
-docs/resources/_gen/
/man/
-
-# VLS logs
-vls.log
diff --git a/.gitmodules b/.gitmodules
index 24af818..47029a0 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,3 @@
[submodule "docs/themes/hugo-book"]
path = docs/themes/hugo-book
url = https://github.com/alex-shpak/hugo-book
-[submodule "src/libvieter"]
- path = src/libvieter
- url = https://git.rustybever.be/vieter-v/libvieter
diff --git a/.woodpecker/arch-rel.yml b/.woodpecker/.arch-rel.yml
similarity index 94%
rename from .woodpecker/arch-rel.yml
rename to .woodpecker/.arch-rel.yml
index 0cdf91d..f5f228e 100644
--- a/.woodpecker/arch-rel.yml
+++ b/.woodpecker/.arch-rel.yml
@@ -9,8 +9,7 @@ skip_clone: true
pipeline:
build:
- image: 'git.rustybever.be/vieter-v/vieter-builder'
- pull: true
+ image: 'menci/archlinuxarm:base-devel'
commands:
# Add the vieter repository so we can use the compiler
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
diff --git a/.woodpecker/arch.yml b/.woodpecker/.arch.yml
similarity index 94%
rename from .woodpecker/arch.yml
rename to .woodpecker/.arch.yml
index 7295065..8f1a6ff 100644
--- a/.woodpecker/arch.yml
+++ b/.woodpecker/.arch.yml
@@ -9,8 +9,7 @@ skip_clone: true
pipeline:
build:
- image: 'git.rustybever.be/vieter-v/vieter-builder'
- pull: true
+ image: 'menci/archlinuxarm:base-devel'
commands:
# Add the vieter repository so we can use the compiler
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
diff --git a/.woodpecker/build.yml b/.woodpecker/.build.yml
similarity index 84%
rename from .woodpecker/build.yml
rename to .woodpecker/.build.yml
index e288bb2..7cb7d53 100644
--- a/.woodpecker/build.yml
+++ b/.woodpecker/.build.yml
@@ -1,6 +1,3 @@
-variables:
- - &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
-
matrix:
PLATFORM:
- 'linux/amd64'
@@ -10,7 +7,7 @@ platform: ${PLATFORM}
pipeline:
install-modules:
- image: *vlang_image
+ image: 'chewingbever/vlang:latest'
pull: true
commands:
- export VMODULES=$PWD/.vmodules
@@ -19,7 +16,7 @@ pipeline:
event: [push, pull_request]
debug:
- image: *vlang_image
+ image: 'chewingbever/vlang:latest'
commands:
- export VMODULES=$PWD/.vmodules
- make
@@ -29,7 +26,7 @@ pipeline:
exclude: [main]
prod:
- image: *vlang_image
+ image: 'chewingbever/vlang:latest'
environment:
- LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3 -static
commands:
@@ -47,7 +44,7 @@ pipeline:
event: [push, pull_request]
upload:
- image: *vlang_image
+ image: 'chewingbever/vlang:latest'
secrets: [ s3_username, s3_password ]
commands:
# https://gist.github.com/JustinTimperio/7c7115f87b775618637d67ac911e595f
@@ -57,7 +54,7 @@ pipeline:
- export OBJ_PATH="/vieter/commits/$CI_COMMIT_SHA/vieter-$(echo '${PLATFORM}' | sed 's:/:-:g')"
- export SIG_STRING="PUT\n\n$CONTENT_TYPE\n$DATE\n$OBJ_PATH"
- - export SIGNATURE="$(echo -en $SIG_STRING | openssl dgst -sha1 -hmac $S3_PASSWORD -binary | base64)"
+ - export SIGNATURE="$(echo -en $SIG_STRING | openssl sha1 -hmac $S3_PASSWORD -binary | base64)"
- >
curl
--silent
diff --git a/.woodpecker/deploy.yml b/.woodpecker/.deploy.yml
similarity index 100%
rename from .woodpecker/deploy.yml
rename to .woodpecker/.deploy.yml
diff --git a/.woodpecker/docker.yml b/.woodpecker/.docker.yml
similarity index 100%
rename from .woodpecker/docker.yml
rename to .woodpecker/.docker.yml
diff --git a/.woodpecker/docs.yml b/.woodpecker/.docs.yml
similarity index 82%
rename from .woodpecker/docs.yml
rename to .woodpecker/.docs.yml
index c7ecd59..051d852 100644
--- a/.woodpecker/docs.yml
+++ b/.woodpecker/.docs.yml
@@ -1,6 +1,3 @@
-variables:
- - &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
-
platform: 'linux/amd64'
branches:
exclude: [ main ]
@@ -14,16 +11,15 @@ pipeline:
- make docs
api-docs:
- image: *vlang_image
+ image: 'chewingbever/vlang:latest'
pull: true
group: 'generate'
commands:
- make api-docs
slate-docs:
- image: 'slatedocs/slate:v2.13.0'
+ image: 'slatedocs/slate'
group: 'generate'
- # Slate requires a specific directory to run in
commands:
- cd docs/api
- bundle exec middleman build --clean
diff --git a/.woodpecker/gitea.yml b/.woodpecker/.gitea.yml
similarity index 83%
rename from .woodpecker/gitea.yml
rename to .woodpecker/.gitea.yml
index 6079b76..d0825c2 100644
--- a/.woodpecker/gitea.yml
+++ b/.woodpecker/.gitea.yml
@@ -1,6 +1,3 @@
-variables:
- - &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
-
platform: 'linux/amd64'
branches: [ 'main' ]
depends_on:
@@ -11,7 +8,7 @@ skip_clone: true
pipeline:
prepare:
- image: *vlang_image
+ image: 'chewingbever/vlang:latest'
pull: true
secrets: [ s3_username, s3_password ]
commands:
diff --git a/.woodpecker/.lint.yml b/.woodpecker/.lint.yml
new file mode 100644
index 0000000..e70648d
--- /dev/null
+++ b/.woodpecker/.lint.yml
@@ -0,0 +1,13 @@
+# These checks already get performed on the feature branches
+branches:
+ exclude: [ main ]
+platform: 'linux/amd64'
+
+pipeline:
+ lint:
+ image: 'chewingbever/vlang:latest'
+ pull: true
+ commands:
+ - make lint
+ when:
+ event: [ pull_request ]
diff --git a/.woodpecker/man.yml b/.woodpecker/.man.yml
similarity index 63%
rename from .woodpecker/man.yml
rename to .woodpecker/.man.yml
index 23330f3..0b80886 100644
--- a/.woodpecker/man.yml
+++ b/.woodpecker/.man.yml
@@ -1,6 +1,3 @@
-variables:
- - &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
-
platform: 'linux/amd64'
branches:
exclude: [ main ]
@@ -8,21 +5,15 @@ branches:
depends_on:
- build
+skip_clone: true
+
pipeline:
- install-modules:
- image: *vlang_image
+ generate:
+ image: 'chewingbever/vlang:latest'
pull: true
commands:
- - export VMODULES=$PWD/.vmodules
- - 'cd src && v install'
-
- generate:
- image: *vlang_image
- commands:
- # - curl -o vieter -L "https://s3.rustybever.be/vieter/commits/$CI_COMMIT_SHA/vieter-linux-amd64"
- # - chmod +x vieter
- - export VMODULES=$PWD/.vmodules
- - make
+ - curl -o vieter -L "https://s3.rustybever.be/vieter/commits/$CI_COMMIT_SHA/vieter-linux-amd64"
+ - chmod +x vieter
- ./vieter man man
- cd man
diff --git a/.woodpecker/test.yml b/.woodpecker/.test.yml
similarity index 71%
rename from .woodpecker/test.yml
rename to .woodpecker/.test.yml
index ba93957..a19dbd4 100644
--- a/.woodpecker/test.yml
+++ b/.woodpecker/.test.yml
@@ -1,6 +1,3 @@
-variables:
- - &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
-
matrix:
PLATFORM:
- 'linux/amd64'
@@ -12,7 +9,7 @@ platform: ${PLATFORM}
pipeline:
install-modules:
- image: *vlang_image
+ image: 'chewingbever/vlang:latest'
pull: true
commands:
- export VMODULES=$PWD/.vmodules
@@ -21,7 +18,7 @@ pipeline:
event: [pull_request]
test:
- image: *vlang_image
+ image: 'chewingbever/vlang:latest'
pull: true
commands:
- export VMODULES=$PWD/.vmodules
diff --git a/.woodpecker/lint.yml b/.woodpecker/lint.yml
deleted file mode 100644
index 39918a9..0000000
--- a/.woodpecker/lint.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-variables:
- - &vlang_image 'git.rustybever.be/vieter/vlang:5d4c9dc9fc11bf8648541c934adb64f27cb94e37-alpine3.17'
-
-# These checks already get performed on the feature branches
-branches:
- exclude: [ main ]
-platform: 'linux/amd64'
-
-pipeline:
- # vfmt seems to get confused if these aren't present
- install-modules:
- image: *vlang_image
- pull: true
- commands:
- - export VMODULES=$PWD/.vmodules
- - 'cd src && v install'
- when:
- event: [pull_request]
-
- lint:
- image: *vlang_image
- pull: true
- commands:
- - export VMODULES=$PWD/.vmodules
- - make lint
- when:
- event: [pull_request]
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 871877e..cf2b829 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,78 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/vieter-v/vieter/src/branch/dev)
-## [0.6.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.6.0)
-
-### Added
-
-* Metrics endpoint for Prometheus integration
-* Search in list of targets using API & CLI
-* Allow filtering targets by arch value
-* Configurable global timeout for builds
-
-### Changed
-
-* Rewrote cron expression logic in C
-* Updated codebase to V commit after 0.3.3
-* Agents now use worker threads and no longer spawn a new thread for every
- build
-
-### Fixed
-
-* Package upload now fails if TCP connection is closed before all bytes have
- been received
-
-### Removed
-
-* Deprecated cron daemon
-
-## [0.5.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0)
-
-### Added
-
-* CLI commands for removing packages, arch-repos & repositories
-
-## [0.5.0-rc.2](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0-rc.2)
-
-### Added
-
-* API route for removing logs & accompanying CLI command
-* Daemon for periodically removing old logs
-* CLI flag to filter logs by specific exit codes
-
-### Changed
-
-* Use `--long-option` instead of `-long-option` for CLI
-
-## [0.5.0-rc.1](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0-rc.1)
-
-### Added
-
-* Allow specifying subdirectory inside Git repository
-* Added option to deploy using agent-server architecture instead of cron daemon
-* Allow scheduling builds on the server from the CLI tool instead of building
- them locally
-* Allow force-building packages, meaning the build won't check if the
- repository is already up to date
-
-### Changed
-
-* Migrated codebase to V 0.3.2
-* Cron expression parser now uses bitfields instead of bool arrays
-
-### Fixed
-
-* Arch value for target is now properly set if not provided
-* Allow NULL values for branch in database
-* Endpoint for adding targets now returns the correct id
-* CLI now correctly errors and doesn't error when sending requests
-* Fixed possible infinite loop when removing old build images
-* Check whether build image still exists before starting build
-* Don't run makepkg `prepare()` function twice
-* Don't buffer stdout in Docker containers
-
-## [0.4.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.4.0)
-
### Added
* Server port can now be configured
@@ -86,15 +14,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
URL to a PKGBUILD
* Targets with kind 'url' can provide a direct URL to a PKGBUILD instead of
providing a Git repository
-* CLI commands for searching the AUR & directly adding packages
-* HTTP routes for removing packages, arch-repos & repos
-* All endpoints serving files now support HTTP byte range requests
-* Better CLI UX
- * When adding targets, the ID of the created target is returned
- * The `-r` flag only shows raw data of action
- * When adding a target, only ID is shown and not surrounding text
- * Tabled output returns a tab-separated list (easy to script using
- `cut`)
### Changed
@@ -104,11 +23,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Branch name for 'git' targets is now optional; if not provided, the
repository will be cloned with the default branch
* Build containers now explicitely set the PATH variable
-* Refactor of web framework
-* API endpoints now return id of newly created entries
-* Repo POST requests now return information on published package
-* `api` can no longer be used as a repository name
-* CLI client now allows setting values to an empty value
### Removed
diff --git a/Dockerfile b/Dockerfile
index a27ad44..5997adc 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM git.rustybever.be/chewing_bever/vlang:0.3.2 AS builder
+FROM chewingbever/vlang:latest AS builder
ARG TARGETPLATFORM
ARG CI_COMMIT_SHA
@@ -23,7 +23,6 @@ RUN if [ -n "${CI_COMMIT_SHA}" ]; then \
"https://s3.rustybever.be/vieter/commits/${CI_COMMIT_SHA}/vieter-$(echo "${TARGETPLATFORM}" | sed 's:/:-:g')" && \
chmod +x vieter ; \
else \
- cd src && v install && cd .. && \
LDFLAGS='-lz -lbz2 -llzma -lexpat -lzstd -llz4 -lsqlite3 -static' make prod && \
mv pvieter vieter ; \
fi
diff --git a/Makefile b/Makefile
index 7dda68c..ed44df9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,20 +1,16 @@
# =====CONFIG=====
SRC_DIR := src
-SRCS != find '$(SRC_DIR)' -iname '*.v'
+SOURCES != find '$(SRC_DIR)' -iname '*.v'
V_PATH ?= v
-V := $(V_PATH) -showcc -gc boehm -d use_openssl -skip-unused
+V := $(V_PATH) -showcc -gc boehm
all: vieter
# =====COMPILATION=====
-.PHONY: libvieter
-libvieter:
- make -C '$(SRC_DIR)/libvieter' CFLAGS='-O3'
-
# Regular binary
-vieter: $(SOURCES) libvieter
+vieter: $(SOURCES)
$(V) -g -o vieter $(SRC_DIR)
# Debug build using gcc
@@ -22,7 +18,7 @@ vieter: $(SOURCES) libvieter
# multi-threaded and causes issues when running vieter inside gdb.
.PHONY: debug
debug: dvieter
-dvieter: $(SOURCES) libvieter
+dvieter: $(SOURCES)
$(V_PATH) -showcc -keepc -cg -o dvieter $(SRC_DIR)
# Run the debug build inside gdb
@@ -33,12 +29,12 @@ gdb: dvieter
# Optimised production build
.PHONY: prod
prod: pvieter
-pvieter: $(SOURCES) libvieter
+pvieter: $(SOURCES)
$(V) -o pvieter -prod $(SRC_DIR)
# Only generate C code
.PHONY: c
-c: $(SOURCES) libvieter
+c: $(SOURCES)
$(V) -o vieter.c $(SRC_DIR)
@@ -71,7 +67,6 @@ man: vieter
# =====OTHER=====
-# Linting
.PHONY: lint
lint:
$(V) fmt -verify $(SRC_DIR)
@@ -79,33 +74,34 @@ lint:
$(V_PATH) missdoc -p $(SRC_DIR)
@ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ]
-
-# Formatting
+# Format the V codebase
.PHONY: fmt
fmt:
$(V) fmt -w $(SRC_DIR)
-
-# Testing
.PHONY: test
-test: libvieter
- $(V) -g test $(SRC_DIR)
+test:
+ $(V) test $(SRC_DIR)
+# Build & patch the V compiler
+.PHONY: v
+v: v/v
+v/v:
+ git clone --single-branch https://git.rustybever.be/vieter-v/v v
+ make -C v
-# Cleaning
.PHONY: clean
clean:
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' 'docs/public'
- make -C '$(SRC_DIR)/libvieter' clean
# =====EXPERIMENTAL=====
.PHONY: autofree
autofree: afvieter
afvieter: $(SOURCES)
- $(V) -showcc -autofree -o afvieter $(SRC_DIR)
+ $(V_PATH) -showcc -autofree -o afvieter $(SRC_DIR)
.PHONY: skip-unused
skip-unused: suvieter
suvieter: $(SOURCES)
- $(V) -skip-unused -o suvieter $(SRC_DIR)
+ $(V_PATH) -showcc -skip-unused -o suvieter $(SRC_DIR)
diff --git a/PKGBUILD b/PKGBUILD
index e5cde95..6b664d1 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -3,31 +3,21 @@
pkgbase='vieter'
pkgname='vieter'
-pkgver='0.6.0'
+pkgver='0.3.0'
pkgrel=1
-pkgdesc="Lightweight Arch repository server & package build system"
+pkgdesc="Vieter is a lightweight implementation of an Arch repository server."
depends=('glibc' 'openssl' 'libarchive' 'sqlite')
-makedepends=('git' 'vieter-vlang')
+makedepends=('git' 'vieter-v')
arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter-v/vieter'
license=('AGPL3')
-source=(
- "$pkgname::git+https://git.rustybever.be/vieter-v/vieter#tag=${pkgver//_/-}"
- "libvieter::git+https://git.rustybever.be/vieter-v/libvieter"
-)
-md5sums=('SKIP' 'SKIP')
+source=("$pkgname::git+https://git.rustybever.be/vieter-v/vieter#tag=${pkgver//_/-}")
+md5sums=('SKIP')
prepare() {
- cd "${pkgname}"
+ export VMODULES="$srcdir/.vmodules"
- # Add the libvieter submodule
- git submodule init
- git config submodules.src/libvieter.url "${srcdir}/libvieter"
- git -c protocol.file.allow=always submodule update
-
- export VMODULES="${srcdir}/.vmodules"
-
- cd src && v install
+ cd "$pkgname/src" && v install
}
build() {
diff --git a/PKGBUILD.dev b/PKGBUILD.dev
index 4ea213d..045e576 100644
--- a/PKGBUILD.dev
+++ b/PKGBUILD.dev
@@ -5,43 +5,33 @@ pkgbase='vieter-git'
pkgname='vieter-git'
pkgver=0.2.0.r25.g20112b8
pkgrel=1
-pkgdesc="Lightweight Arch repository server & package build system (development version)"
+pkgdesc="Vieter is a lightweight implementation of an Arch repository server."
depends=('glibc' 'openssl' 'libarchive' 'sqlite')
-makedepends=('git' 'vieter-vlang')
+makedepends=('git' 'vieter-v')
arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter-v/vieter'
license=('AGPL3')
-source=(
- "${pkgname}::git+https://git.rustybever.be/vieter-v/vieter#branch=dev"
- "libvieter::git+https://git.rustybever.be/vieter-v/libvieter"
-)
-md5sums=('SKIP' 'SKIP')
+source=("$pkgname::git+https://git.rustybever.be/vieter-v/vieter#branch=dev")
+md5sums=('SKIP')
provides=('vieter')
conflicts=('vieter')
pkgver() {
- cd "${pkgname}"
+ cd "$pkgname"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
prepare() {
- cd "${pkgname}"
+ export VMODULES="$srcdir/.vmodules"
- # Add the libvieter submodule
- git submodule init
- git config submodules.src/libvieter.url "${srcdir}/libvieter"
- git -c protocol.file.allow=always submodule update
-
- export VMODULES="${srcdir}/.vmodules"
-
- cd src && v install
+ cd "$pkgname/src" && v install
}
build() {
- export VMODULES="${srcdir}/.vmodules"
+ export VMODULES="$srcdir/.vmodules"
- cd "${pkgname}"
+ cd "$pkgname"
make prod
@@ -52,9 +42,9 @@ build() {
}
package() {
- install -dm755 "${pkgdir}/usr/bin"
- install -Dm755 "${pkgname}/pvieter" "${pkgdir}/usr/bin/vieter"
+ install -dm755 "$pkgdir/usr/bin"
+ install -Dm755 "$pkgname/pvieter" "$pkgdir/usr/bin/vieter"
- install -dm755 "${pkgdir}/usr/share/man/man1"
- install -Dm644 "${pkgname}/man"/*.1 "${pkgdir}/usr/share/man/man1"
+ install -dm755 "$pkgdir/usr/share/man/man1"
+ install -Dm644 "$pkgname/man"/*.1 "$pkgdir/usr/share/man/man1"
}
diff --git a/README.md b/README.md
index 6b487b6..5911ea2 100644
--- a/README.md
+++ b/README.md
@@ -21,8 +21,7 @@ quicker.
I chose [V](https://vlang.io/) as I've been very intrigued by this language for
a while now. I wanted a fast language that I could code while relaxing, without
having to exert too much mental effort & V seemed like the right choice for
-that. Sadly, this didn't quite turn out the way I expected, but I'm sticking
-with it anyways ;p
+that.
## Features
@@ -38,6 +37,7 @@ with it anyways ;p
Besides a V installer, Vieter also requires the following libraries to work:
+* gc
* libarchive
* openssl
* sqlite3
@@ -48,9 +48,15 @@ update`.
### Compiler
-V is developed using a specific compiler commit that is usually updated
-whenever a new version is released. Information on this can be found in the
-[tools](https://git.rustybever.be/vieter-v/tools) repository.
+Vieter compiles with the standard Vlang compiler. However, I do maintain a
+[mirror](https://git.rustybever.be/vieter-v/v). This is to ensure my CI does
+not break without reason, as I control when & how frequently the mirror is
+updated to reflect the official repository.
+
+If you encounter issues using the latest V compiler, try using my mirror
+instead. `make v` will clone the repository & build the mirror. Afterwards,
+prepending any make command with `V_PATH=v/v` tells make to use the locally
+compiled mirror instead.
## Contributing
diff --git a/docs/api/source/includes/_jobs.md b/docs/api/source/includes/_jobs.md
deleted file mode 100644
index a25309d..0000000
--- a/docs/api/source/includes/_jobs.md
+++ /dev/null
@@ -1,78 +0,0 @@
-# Jobs
-
-
-
-## Manually schedule a job
-
-```shell
-curl \
- -H 'X-Api-Key: secret' \
- https://example.com/api/v1/jobs/queue?target=10&force&arch=x86_64
-```
-
-Manually schedule a job on the server.
-
-### HTTP Request
-
-`POST /api/v1/jobs/queue`
-
-### Query Parameters
-
-Parameter | Description
---------- | -----------
-target | Id of target to schedule build for
-arch | Architecture to build on
-force | Whether it's a forced build (true if present)
-
-## Poll for new jobs
-
-
-
-```shell
-curl \
- -H 'X-Api-Key: secret' \
- https://example.com/api/v1/jobs/poll?arch=x86_64&max=2
-```
-
-> JSON output format
-
-```json
-{
- "message": "",
- "data": [
- {
- "target_id": 1,
- "kind": "git",
- "url": "https://aur.archlinux.org/discord-ptb.git",
- "branch": "master",
- "path": "",
- "repo": "bur",
- "base_image": "archlinux:base-devel",
- "force": true
- }
- ]
-}
-```
-
-Poll the server for new builds.
-
-### HTTP Request
-
-`GET /api/v1/jobs/poll`
-
-### Query Parameters
-
-Parameter | Description
---------- | -----------
-arch | For which architecture to receive jobs
-max | How many jobs to receive at most
diff --git a/docs/api/source/includes/_logs.md b/docs/api/source/includes/_logs.md
index d6134b7..2797e60 100644
--- a/docs/api/source/includes/_logs.md
+++ b/docs/api/source/includes/_logs.md
@@ -112,21 +112,10 @@ id | ID of requested log
## Publish build log
-> JSON output format
-
-```json
-{
- "message": "",
- "data": {
- "id": 15
- }
-}
-```
-
@@ -149,24 +138,3 @@ target | id of target this build is for
### Request body
Plaintext contents of the build log.
-
-## Remove a build log
-
-```shell
-curl \
- -XDELETE \
- -H 'X-Api-Key: secret' \
- https://example.com/api/v1/logs/1
-```
-
-Remove a build log from the server.
-
-### HTTP Request
-
-`DELETE /api/v1/logs/:id`
-
-### URL Parameters
-
-Parameter | Description
---------- | -----------
-id | id of log to remove
diff --git a/docs/api/source/includes/_repository.md b/docs/api/source/includes/_repository.md
index ff17f71..fbbc329 100644
--- a/docs/api/source/includes/_repository.md
+++ b/docs/api/source/includes/_repository.md
@@ -93,87 +93,3 @@ other already present arch-repos.
Parameter | Description
--------- | -----------
repo | Repository to publish package to
-
-## Remove package from arch-repo
-
-
-
-```shell
-curl \
- -H 'X-Api-Key: secret' \
- -XDELETE \
- https://example.com/vieter/x86_64/mike
-```
-
-This endpoint allows you to remove a package from a given arch-repo.
-
-### HTTP Request
-
-`DELETE /:repo/:arch/:pkg`
-
-### URL Parameters
-
-Parameter | Description
---------- | -----------
-repo | Repository to delete package from
-arch | Specific arch-repo to remove package from
-pkg | Name of package to remove (without any version information)
-
-## Remove arch-repo
-
-
-
-```shell
-curl \
- -H 'X-Api-Key: secret' \
- -XDELETE \
- https://example.com/vieter/x86_64
-```
-
-This endpoint allows removing an entire arch-repo.
-
-### HTTP Request
-
-`DELETE /:repo/:arch`
-
-### URL Parameters
-
-Parameter | Description
---------- | -----------
-repo | Repository to delete arch-repo from
-arch | Specific architecture to remove
-
-## Remove repo
-
-
-
-```shell
-curl \
- -H 'X-Api-Key: secret' \
- -XDELETE \
- https://example.com/vieter
-```
-
-This endpoint allows removing an entire repo.
-
-### HTTP Request
-
-`DELETE /:repo`
-
-### URL Parameters
-
-Parameter | Description
---------- | -----------
-repo | Repository to delete
diff --git a/docs/api/source/includes/_targets.md b/docs/api/source/includes/_targets.md
index 1a5f3e0..c7061c8 100644
--- a/docs/api/source/includes/_targets.md
+++ b/docs/api/source/includes/_targets.md
@@ -27,7 +27,6 @@ curl \
"kind": "git",
"url": "https://aur.archlinux.org/discord-ptb.git",
"branch": "master",
- "path" : "",
"repo": "bur",
"schedule": "",
"arch": [
@@ -55,8 +54,6 @@ Parameter | Description
limit | Maximum amount of results to return.
offset | Offset of results.
repo | Limit results to targets that publish to the given repo.
-query | Only return targets that have this substring in their URL, path or branch.
-arch | Only return targets that publish to this arch.
## Get specific target
@@ -76,9 +73,8 @@ curl \
"kind": "git",
"url": "https://aur.archlinux.org/discord-ptb.git",
"branch": "master",
- "path": "",
"repo": "bur",
- "schedule": "0 2",
+ "schedule": "0 3",
"arch": [
{
"id": 1,
@@ -104,17 +100,6 @@ id | id of requested target
## Create a new target
-> JSON output format
-
-```json
-{
- "message": "",
- "data": {
- "id": 15
- }
-}
-```
-
Create a new target with the given data.
### HTTP Request
@@ -128,7 +113,6 @@ Parameter | Description
kind | Kind of target to add; one of 'git', 'url'.
url | URL of the Git repository.
branch | Branch of the Git repository.
-path | Subdirectory inside Git repository to use.
repo | Vieter repository to publish built packages to.
schedule | Cron build schedule (syntax explained [here](https://rustybever.be/docs/vieter/usage/builds/schedule/))
arch | Comma-separated list of architectures to build package on.
@@ -154,20 +138,12 @@ Parameter | Description
kind | Kind of target; one of 'git', 'url'.
url | URL of the Git repository.
branch | Branch of the Git repository.
-path | Subdirectory inside Git repository to use.
repo | Vieter repository to publish built packages to.
schedule | Cron build schedule
arch | Comma-separated list of architectures to build package on.
## Remove a target
-```shell
-curl \
- -XDELETE \
- -H 'X-Api-Key: secret' \
- https://example.com/api/v1/targets/1
-```
-
Remove a target from the server.
### HTTP Request
diff --git a/docs/api/source/index.html.md b/docs/api/source/index.html.md
index f61e44a..4bfddb8 100644
--- a/docs/api/source/index.html.md
+++ b/docs/api/source/index.html.md
@@ -11,7 +11,6 @@ includes:
- repository
- targets
- logs
- - jobs
search: true
diff --git a/docs/content/configuration.md b/docs/content/configuration.md
index 612c505..af941a2 100644
--- a/docs/content/configuration.md
+++ b/docs/content/configuration.md
@@ -17,7 +17,7 @@ If a variable is both present in the config file & as an environment variable,
the value in the environment variable is used.
{{< hint info >}}
-**Note**
+**Note**
All environment variables can also be provided from a file by appending them
with `_FILE`. This for example allows you to provide the API key from a Docker
secrets file.
@@ -32,11 +32,11 @@ configuration variable required for each command.
### `vieter server`
-* `port`: HTTP port to run on
- * Default: `8000`
* `log_level`: log verbosity level. Value should be one of `FATAL`, `ERROR`,
`WARN`, `INFO` or `DEBUG`.
* Default: `WARN`
+* `log_file`: log file to write logs to.
+ * Default: `vieter.log` (in the current directory)
* `pkg_dir`: where Vieter should store the actual package archives.
* `data_dir`: where Vieter stores the repositories, log file & database.
* `api_key`: the API key to use when authenticating requests.
@@ -44,26 +44,9 @@ configuration variable required for each command.
* Packages with architecture `any` are always added to this architecture.
This prevents the server from being confused when an `any` package is
published as the very first package for a repository.
- * Targets added without an `arch` value use this value instead.
-* `global_schedule`: build schedule for any target that does not have a
- schedule defined. For information about this syntax, see
- [here](/usage/builds/schedule).
- * Default: `0 3` (3AM every night)
-* `base_image`: Docker image to use when building a package. Any Pacman-based
- distro image should work, as long as `/etc/pacman.conf` is used &
- `base-devel` exists in the repositories. Make sure that the image supports
- the architecture of your cron daemon.
- * Default: `archlinux:base-devel` (only works on `x86_64`). If you require
- `aarch64` support, consider using
- [`menci/archlinuxarm:base-devel`](https://hub.docker.com/r/menci/archlinuxarm)
- ([GitHub](https://github.com/Menci/docker-archlinuxarm)). This is the
- image used for the Vieter CI builds.
-* `max_log_age`: maximum age of logs (in days). Logs older than this will get
- cleaned by the log removal daemon. If set to zero, no logs are ever removed.
- The age of logs is determined by the time the build was started.
- * Default: `0`
-* `log_removal_schedule`: cron schedule defining when to clean old logs.
- * Default: `0 0` (every day at midnight)
+ * Git repositories added without an `arch` value use this value instead.
+* `port`: HTTP port to run on
+ * Default: `8000`
### `vieter cron`
@@ -114,25 +97,3 @@ configuration variable required for each command.
build`.
* Default: `archlinux:base-devel`
-### `vieter agent`
-
-* `log_level`: log verbosity level. Value should be one of `FATAL`, `ERROR`,
- `WARN`, `INFO` or `DEBUG`.
- * Default: `WARN`
-* `address`: *public* URL of the Vieter repository server to build for. From
- this server jobs are retrieved. All built packages are published to this
- server.
-* `api_key`: API key of the above server.
-* `data_dir`: directory to store log file in.
-* `max_concurrent_builds`: how many builds to run at the same time.
- * Default: `1`
-* `polling_frequency`: how often (in seconds) to poll the server for new
- builds. Note that the agent might poll more frequently when it's actively
- processing builds.
-* `image_rebuild_frequency`: Vieter periodically builds images that are then
- used as a basis for running build containers. This is to prevent each build
- from downloading an entire repository worth of dependencies. This setting
- defines how frequently (in minutes) to rebuild these images.
- * Default: `1440` (every 24 hours)
-* `arch`: architecture for which this agent should pull down builds (e.g.
- `x86_64`)
diff --git a/docs/content/installation.md b/docs/content/installation.md
index 5b8e2d8..87b9cba 100644
--- a/docs/content/installation.md
+++ b/docs/content/installation.md
@@ -21,17 +21,17 @@ branch. This branch will be the most up to date, but does not give any
guarantees about stability, so beware!
Thanks to the single-binary design of Vieter, this image can be used both for
-the repository server, the cron daemon and the agent.
+the repository server & the cron daemon.
-Below is a minimal compose file to set up both the repository server & a build
-agent:
+Below is an example compose file to set up both the repository server & the
+cron daemon:
```yaml
version: '3'
services:
server:
- image: 'chewingbever/vieter:0.5.0-rc.1'
+ image: 'chewingbever/vieter:dev'
restart: 'always'
environment:
@@ -41,19 +41,18 @@ services:
- 'data:/data'
cron:
- image: 'chewingbever/vieter:0.5.0-rc.1'
+ image: 'chewingbever/vieter:dev'
restart: 'always'
- # Required to connect to the Docker daemon
user: root
- command: 'vieter agent'
+ command: 'vieter cron'
environment:
- 'VIETER_API_KEY=secret'
# MUST be public URL of Vieter repository
- 'VIETER_ADDRESS=https://example.com'
- # Architecture for which the agent builds
- - 'VIETER_ARCH=x86_64'
+ - 'VIETER_DEFAULT_ARCH=x86_64'
- 'VIETER_MAX_CONCURRENT_BUILDS=2'
+ - 'VIETER_GLOBAL_SCHEDULE=0 3'
volumes:
- '/var/run/docker.sock:/var/run/docker.sock'
@@ -64,23 +63,20 @@ volumes:
If you do not require the build system, the repository server can be used
independently as well.
-Of course, Vieter allows a lot more configuration than this. This compose file
-is meant as a starting point for setting up your installation.
-
{{< hint info >}}
**Note**
-Builds are executed on the agent's system using the host's Docker daemon. An
-agent for a specific `arch` will only build packages for that specific
-architecture. Therefore, if you wish to build packages for both `x86_64` &
-`aarch64`, you'll have to deploy two agents, one on each architecture.
-Afterwards, any Git repositories enabled for those two architectures will build
-on both.
+Builds are executed on the cron daemon's system using the host's Docker daemon.
+A cron daemon on a specific architecture will only build packages for that
+specific architecture. Therefore, if you wish to build packages for both
+`x86_64` & `aarch64`, you'll have to deploy two cron daemons, one on each
+architecture. Afterwards, any Git repositories enabled for those two
+architectures will build on both.
{{< /hint >}}
## Binary
On the
-[releases](https://git.rustybever.be/vieter-v/vieter/releases)
+[releases](https://git.rustybever.be/vieter/vieter/releases)
page, you can find statically compiled binaries for all
released versions. This is the same binary as used inside
the Docker images.
@@ -103,12 +99,12 @@ latest official release or `vieter-git` for the latest development release.
### AUR
If you prefer building the packages locally (or on your own Vieter instance),
-there's the [`vieter`](https://aur.archlinux.org/packages/vieter) &
-[`vieter-git`](https://aur.archlinux.org/packages/vieter-git) packages on the
-AUR. These packages build using the `vlang` compiler package, so I can't
+there's the `[vieter](https://aur.archlinux.org/packages/vieter)` &
+`[vieter-git](https://aur.archlinux.org/packages/vieter-git)` packages on the
+AUR. These packages build using the `vlang-git` compiler package, so I can't
guarantee that a compiler update won't temporarily break them.
## Building from source
-The project [README](https://git.rustybever.be/vieter-v/vieter#building)
-contains instructions for building Vieter from source.
+The project [README](https://git.rustybever.be/vieter/vieter#building) contains
+instructions for building Vieter from source.
diff --git a/docs/content/other/_index.md b/docs/content/other/_index.md
new file mode 100644
index 0000000..394456b
--- /dev/null
+++ b/docs/content/other/_index.md
@@ -0,0 +1,3 @@
+---
+weight: 100
+---
diff --git a/docs/content/other/builds-in-depth.md b/docs/content/other/builds-in-depth.md
new file mode 100644
index 0000000..d8df6ec
--- /dev/null
+++ b/docs/content/other/builds-in-depth.md
@@ -0,0 +1,81 @@
+# Builds In-depth
+
+For those interested, this page describes how the build system works
+internally.
+
+## Builder image
+
+Every cron daemon perodically creates a builder image that is then used as a
+base for all builds. This is done to prevent build containers having to pull
+down a bunch of updates when they update their system.
+
+The build container is created by running the following commands inside a
+container started from the image defined in `base_image`:
+
+```sh
+# Update repos & install required packages
+pacman -Syu --needed --noconfirm base-devel git
+# Add a non-root user to run makepkg
+groupadd -g 1000 builder
+useradd -mg builder builder
+# Make sure they can use sudo without a password
+echo 'builder ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
+# Create the directory for the builds & make it writeable for the
+# build user
+mkdir /build
+chown -R builder:builder /build
+```
+
+This script updates the packages to their latest versions & creates a non-root
+user to use when running `makepkg`.
+
+This script is base64-encoded & passed to the container as an environment
+variable. The container's entrypoint is set to `/bin/sh -c` & its command
+argument to `echo $BUILD_SCRIPT | base64 -d | /bin/sh -e`, with the
+`BUILD_SCRIPT` environment variable containing the base64-encoded script.
+
+Once the container exits, a new Docker image is created from it. This image is
+then used as the base for any builds.
+
+## Running builds
+
+Each build has its own Docker container, using the builder image as its base.
+The same base64-based technique as above is used, just with a different script.
+To make the build logs more clear, each command is appended by an echo command
+printing the next command to stdout.
+
+Given the Git repository URL is `https://examplerepo.com` with branch `main`,
+the URL of the Vieter server is `https://example.com` and `vieter` is the
+repository we wish to publish to, we get the following script:
+
+```sh
+echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf'
+echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
+echo -e '+ pacman -Syu --needed --noconfirm'
+pacman -Syu --needed --noconfirm
+echo -e '+ su builder'
+su builder
+echo -e '+ git clone --single-branch --depth 1 --branch main https://examplerepo.com repo'
+git clone --single-branch --depth 1 --branch main https://examplerepo.com repo
+echo -e '+ cd repo'
+cd repo
+echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm'
+makepkg --nobuild --syncdeps --needed --noconfirm
+echo -e '+ source PKGBUILD'
+source PKGBUILD
+echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0'
+curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
+echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
+[ "$(id -u)" == 0 ] && exit 0
+echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
+MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
+```
+
+This script:
+
+1. Adds the target repository as a repository in the build container
+2. Updates mirrors & packages
+3. Clones the Git repository
+4. Runs `makepkg` without building to calculate `pkgver`
+5. Checks whether the package version is already present on the server
+6. If not, run `makepkg` & publish any generated package archives to the server
diff --git a/docs/content/usage/builds/cleanup.md b/docs/content/usage/builds/cleanup.md
deleted file mode 100644
index 724a75f..0000000
--- a/docs/content/usage/builds/cleanup.md
+++ /dev/null
@@ -1,23 +0,0 @@
----
-weight: 20
----
-
-# Cleanup
-
-Vieter stores the logs of every single package build. While this is great for
-debugging why builds fail, it also causes an active or long-running Vieter
-instance to accumulate thousands of logs.
-
-To combat this, a log removal daemon can be enabled that periodically removes
-old build logs. By starting your server with the `max_log_age` variable (see
-[Configuration](/configuration#vieter-server)), a daemon will get enabled that
-periodically removes logs older than this setting. By default, this will happen
-every day at midnight, but this behavior can be changed using the
-`log_removal_schedule` variable.
-
-{{< hint info >}}
-**Note**
-The daemon will always run a removal of logs on startup. Therefore, it's
-possible the daemon will be *very* active when first enabling this setting.
-After the initial surge of logs to remove, it'll calm down again.
-{{< /hint >}}
diff --git a/docs/content/usage/builds/schedule.md b/docs/content/usage/builds/schedule.md
index d3802fd..38f76a4 100644
--- a/docs/content/usage/builds/schedule.md
+++ b/docs/content/usage/builds/schedule.md
@@ -1,7 +1,3 @@
----
-weight: 10
----
-
# Cron schedule syntax
The Vieter cron daemon uses a subset of the cron expression syntax to schedule
@@ -41,6 +37,6 @@ Each section can consist of as many of these parts as necessary.
## CLI tool
The Vieter binary contains a command that shows you the next matching times for
-a given expression. This can be useful for understanding the syntax. For more
+a given expression. This can be useful to understand the syntax. For more
information, see
[vieter-schedule(1)](https://rustybever.be/man/vieter/vieter-schedule.1.html).
diff --git a/src/agent/agent.v b/src/agent/agent.v
deleted file mode 100644
index 69b9947..0000000
--- a/src/agent/agent.v
+++ /dev/null
@@ -1,27 +0,0 @@
-module agent
-
-import log
-import os
-import util
-
-const log_file_name = 'vieter.agent.log'
-
-// agent starts an agent service
-pub fn agent(conf Config) ! {
- log_level := log.level_from_tag(conf.log_level) or {
- return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
- }
-
- mut logger := log.Log{
- level: log_level
- }
-
- os.mkdir_all(conf.data_dir) or { util.exit_with_message(1, 'Failed to create data directory.') }
-
- log_file := os.join_path_single(conf.data_dir, agent.log_file_name)
- logger.set_full_logpath(log_file)
- logger.log_to_console_too()
-
- mut d := agent_init(logger, conf)
- d.run()
-}
diff --git a/src/agent/cli.v b/src/agent/cli.v
deleted file mode 100644
index 2dee8d6..0000000
--- a/src/agent/cli.v
+++ /dev/null
@@ -1,31 +0,0 @@
-module agent
-
-import cli
-import conf as vconf
-
-struct Config {
-pub:
- log_level string = 'WARN'
- // Architecture that the agent represents
- arch string
- api_key string
- address string
- data_dir string
- max_concurrent_builds int = 1
- polling_frequency int = 30
- image_rebuild_frequency int = 1440
-}
-
-// cmd returns the cli module that handles the cron daemon.
-pub fn cmd() cli.Command {
- return cli.Command{
- name: 'agent'
- description: 'Start an agent daemon.'
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
-
- agent(conf_)!
- }
- }
-}
diff --git a/src/agent/daemon.v b/src/agent/daemon.v
deleted file mode 100644
index d49b45e..0000000
--- a/src/agent/daemon.v
+++ /dev/null
@@ -1,197 +0,0 @@
-module agent
-
-import log
-import sync.stdatomic
-import build
-import models { BuildConfig }
-import client
-import time
-import os
-
-const (
- build_empty = 0
- build_running = 1
- build_done = 2
-)
-
-struct AgentDaemon {
- logger shared log.Log
- conf Config
- client client.Client
-mut:
- images ImageManager
- // Atomic variables used to detect when a build has finished; length is
- // conf.max_concurrent_builds. This approach is used as the difference
- // between a recently finished build and an empty build slot is important
- // for knowing whether the agent is currently "active".
- atomics []u64
- // Channel used to send builds to worker threads
- build_channel chan BuildConfig
-}
-
-// agent_init initializes a new agent
-fn agent_init(logger log.Log, conf Config) AgentDaemon {
- mut d := AgentDaemon{
- logger: logger
- client: client.new(conf.address, conf.api_key)
- conf: conf
- images: new_image_manager(conf.image_rebuild_frequency * 60)
- atomics: []u64{len: conf.max_concurrent_builds}
- build_channel: chan BuildConfig{cap: conf.max_concurrent_builds}
- }
-
- return d
-}
-
-// run starts the actual agent daemon. This function will run forever.
-pub fn (mut d AgentDaemon) run() {
- // Spawn worker threads
- for builder_index in 0 .. d.conf.max_concurrent_builds {
- spawn d.builder_thread(d.build_channel, builder_index)
- }
-
- // This is just so that the very first time the loop is ran, the jobs are
- // always polled
- mut last_poll_time := time.now().add_seconds(-d.conf.polling_frequency)
- mut sleep_time := 0 * time.second
- mut finished, mut empty, mut running := 0, 0, 0
-
- for {
- if sleep_time > 0 {
- d.ldebug('Sleeping for ${sleep_time}')
- time.sleep(sleep_time)
- }
-
- finished, empty = d.update_atomics()
- running = d.conf.max_concurrent_builds - finished - empty
-
- // No new finished builds and no free slots, so there's nothing to be
- // done
- if finished + empty == 0 {
- sleep_time = 1 * time.second
- continue
- }
-
- // Builds have finished, so old builder images might have freed up.
- // TODO this might query the docker daemon too frequently.
- if finished > 0 {
- d.images.clean_old_images()
- }
-
- // The agent will always poll for new jobs after at most
- // `polling_frequency` seconds. However, when jobs have finished, the
- // agent will also poll for new jobs. This is because jobs are often
- // clustered together (especially when mostly using the global cron
- // schedule), so there's a much higher chance jobs are available.
- if finished > 0 || time.now() >= last_poll_time.add_seconds(d.conf.polling_frequency) {
- d.ldebug('Polling for new jobs')
-
- new_configs := d.client.poll_jobs(d.conf.arch, finished + empty) or {
- d.lerror('Failed to poll jobs: ${err.msg()}')
-
- // TODO pick a better delay here
- sleep_time = 5 * time.second
- continue
- }
-
- d.ldebug('Received ${new_configs.len} jobs')
-
- last_poll_time = time.now()
-
- for config in new_configs {
- // Make sure a recent build base image is available for
- // building the config
- if !d.images.up_to_date(config.base_image) {
- d.linfo('Building builder image from base image ${config.base_image}')
-
- // TODO handle this better than to just skip the config
- d.images.refresh_image(config.base_image) or {
- d.lerror(err.msg())
- continue
- }
- }
-
- // It's technically still possible that the build image is
- // removed in the very short period between building the
- // builder image and starting a build container with it. If
- // this happens, fate really just didn't want you to do this
- // build.
-
- d.build_channel <- config
- running++
- }
- }
-
- // The agent is not doing anything, so we just wait until the next poll
- // time
- if running == 0 {
- sleep_time = last_poll_time.add_seconds(d.conf.polling_frequency) - time.now()
- } else {
- sleep_time = 1 * time.second
- }
- }
-}
-
-// update_atomics checks for each build whether it's completed, and sets it to
-// empty again if so. The return value is a tuple `(finished, empty)` where
-// `finished` is how many builds were just finished and thus set to empty, and
-// `empty` is how many build slots were already empty. The amount of running
-// builds can then be calculated by substracting these two values from the
-// total allowed concurrent builds.
-fn (mut d AgentDaemon) update_atomics() (int, int) {
- mut finished := 0
- mut empty := 0
-
- for i in 0 .. d.atomics.len {
- if stdatomic.load_u64(&d.atomics[i]) == agent.build_done {
- stdatomic.store_u64(&d.atomics[i], agent.build_empty)
- finished++
- } else if stdatomic.load_u64(&d.atomics[i]) == agent.build_empty {
- empty++
- }
- }
-
- return finished, empty
-}
-
-// run_build actually starts the build process for a given target.
-fn (mut d AgentDaemon) run_build(build_index int, config BuildConfig) {
- d.linfo('started build: ${config}')
-
- // 0 means success, 1 means failure
- mut status := 0
-
- new_config := BuildConfig{
- ...config
- base_image: d.images.get(config.base_image)
- }
-
- res := build.build_config(d.client.address, d.client.api_key, new_config) or {
- d.ldebug('build_config error: ${err.msg()}')
- status = 1
-
- build.BuildResult{}
- }
-
- if status == 0 {
- d.linfo('Uploading build logs for ${config}')
-
- // TODO use the arch value here
- build_arch := os.uname().machine
- d.client.add_build_log(config.target_id, res.start_time, res.end_time, build_arch,
- res.exit_code, res.logs) or { d.lerror('Failed to upload logs for ${config}') }
- } else {
- d.lwarn('an error occurred during build: ${config}')
- }
-
- stdatomic.store_u64(&d.atomics[build_index], agent.build_done)
-}
-
-// builder_thread is a thread that constantly listens for builds to process
-fn (mut d AgentDaemon) builder_thread(ch chan BuildConfig, builder_index int) {
- for {
- build_config := <-ch or { break }
-
- d.run_build(builder_index, build_config)
- }
-}
diff --git a/src/agent/images.v b/src/agent/images.v
deleted file mode 100644
index 9befc0c..0000000
--- a/src/agent/images.v
+++ /dev/null
@@ -1,119 +0,0 @@
-module agent
-
-import time
-import docker
-import build
-
-// An ImageManager is a utility that creates builder images from given base
-// images, updating these builder images if they've become too old. This
-// structure can manage images from any number of base images, paving the way
-// for configurable base images per target/repository.
-struct ImageManager {
- max_image_age int [required]
-mut:
- // For each base image, one or more builder images can exist at the same
- // time
- images map[string][]string [required]
- // For each base image, we track when its newest image was built
- timestamps map[string]time.Time [required]
-}
-
-// new_image_manager initializes a new image manager.
-fn new_image_manager(max_image_age int) ImageManager {
- return ImageManager{
- max_image_age: max_image_age
- images: map[string][]string{}
- timestamps: map[string]time.Time{}
- }
-}
-
-// get returns the name of the newest image for the given base image. Note that
-// this function should only be called *after* a first call to `refresh_image`.
-pub fn (m &ImageManager) get(base_image string) string {
- return m.images[base_image].last()
-}
-
-// up_to_date returns true if the last known builder image exists and is up to
-// date. If this function returns true, the last builder image may be used to
-// perform a build.
-pub fn (mut m ImageManager) up_to_date(base_image string) bool {
- if base_image !in m.timestamps
- || m.timestamps[base_image].add_seconds(m.max_image_age) <= time.now() {
- return false
- }
-
- // It's possible the image has been removed by some external event, so we
- // check whether it actually exists as well.
- mut dd := docker.new_conn() or { return false }
-
- defer {
- dd.close() or {}
- }
-
- dd.image_inspect(m.images[base_image].last()) or {
- // Image doesn't exist, so we stop tracking it
- if err.code() == 404 {
- m.images[base_image].delete_last()
- m.timestamps.delete(base_image)
- }
-
- // If the inspect fails, it's either because the image doesn't exist or
- // because of some other error. Either way, we can't know *for certain*
- // that the image exists, so we return false.
- return false
- }
-
- return true
-}
-
-// refresh_image builds a new builder image from the given base image. This
-// function should only be called if `up_to_date` returned false.
-fn (mut m ImageManager) refresh_image(base_image string) ! {
- // TODO use better image tags for built images
- new_image := build.create_build_image(base_image) or {
- return error('Failed to build builder image from base image ${base_image}')
- }
-
- m.images[base_image] << new_image
- m.timestamps[base_image] = time.now()
-}
-
-// clean_old_images removes all older builder images that are no longer in use.
-// The function will always leave at least one builder image, namely the newest
-// one.
-fn (mut m ImageManager) clean_old_images() {
- mut dd := docker.new_conn() or { return }
-
- defer {
- dd.close() or {}
- }
-
- mut i := 0
-
- for image in m.images.keys() {
- i = 0
-
- for i < m.images[image].len - 1 {
- // For each builder image, we try to remove it by calling the Docker
- // API. If the function returns an error or false, that means the image
- // wasn't deleted. Therefore, we move the index over. If the function
- // returns true, the array's length has decreased by one so we don't
- // move the index.
- dd.image_remove(m.images[image][i]) or {
- // The image was removed by an external event
- if err.code() == 404 {
- m.images[image].delete(i)
- }
- // The image couldn't be removed, so we need to keep track of
- // it
- else {
- i += 1
- }
-
- continue
- }
-
- m.images[image].delete(i)
- }
- }
-}
diff --git a/src/agent/log.v b/src/agent/log.v
deleted file mode 100644
index fcd8373..0000000
--- a/src/agent/log.v
+++ /dev/null
@@ -1,36 +0,0 @@
-module agent
-
-// lfatal create a log message with the fatal level
-pub fn (mut d AgentDaemon) lfatal(msg string) {
- lock d.logger {
- d.logger.fatal(msg)
- }
-}
-
-// lerror create a log message with the error level
-pub fn (mut d AgentDaemon) lerror(msg string) {
- lock d.logger {
- d.logger.error(msg)
- }
-}
-
-// lwarn create a log message with the warn level
-pub fn (mut d AgentDaemon) lwarn(msg string) {
- lock d.logger {
- d.logger.warn(msg)
- }
-}
-
-// linfo create a log message with the info level
-pub fn (mut d AgentDaemon) linfo(msg string) {
- lock d.logger {
- d.logger.info(msg)
- }
-}
-
-// ldebug create a log message with the debug level
-pub fn (mut d AgentDaemon) ldebug(msg string) {
- lock d.logger {
- d.logger.debug(msg)
- }
-}
diff --git a/src/build/build.v b/src/build/build.v
index b864792..2ad70a6 100644
--- a/src/build/build.v
+++ b/src/build/build.v
@@ -1,12 +1,12 @@
module build
-import docker
+import vieter_v.docker
import encoding.base64
import time
import os
import strings
import util
-import models { BuildConfig, Target }
+import models { Target }
const (
container_build_dir = '/build'
@@ -21,8 +21,8 @@ const (
// system, install some necessary packages & creates a non-root user to run
// makepkg with. The base image should be some Linux distribution that uses
// Pacman as its package manager.
-pub fn create_build_image(base_image string) !string {
- mut dd := docker.new_conn()!
+pub fn create_build_image(base_image string) ?string {
+ mut dd := docker.new_conn()?
defer {
dd.close() or {}
@@ -45,7 +45,7 @@ pub fn create_build_image(base_image string) !string {
c := docker.NewContainer{
image: base_image
- env: ['BUILD_SCRIPT=${cmds_str}']
+ env: ['BUILD_SCRIPT=$cmds_str']
entrypoint: ['/bin/sh', '-c']
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/sh -e']
}
@@ -57,15 +57,15 @@ pub fn create_build_image(base_image string) !string {
image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' }
// We pull the provided image
- dd.image_pull(image_name, image_tag)!
+ dd.pull_image(image_name, image_tag)?
- id := dd.container_create(c)!.id
- // id := docker.create_container(c)!
- dd.container_start(id)!
+ id := dd.container_create(c)?.id
+ // id := docker.create_container(c)?
+ dd.container_start(id)?
// This loop waits until the container has stopped, so we can remove it after
for {
- data := dd.container_inspect(id)!
+ data := dd.container_inspect(id)?
if !data.state.running {
break
@@ -79,8 +79,8 @@ pub fn create_build_image(base_image string) !string {
// TODO also add the base image's name into the image name to prevent
// conflicts.
tag := time.sys_mono_now().str()
- image := dd.image_from_container(id, 'vieter-build', tag)!
- dd.container_remove(id)!
+ image := dd.create_image_from_container(id, 'vieter-build', tag)?
+ dd.container_remove(id)?
return image.id
}
@@ -93,35 +93,28 @@ pub:
logs string
}
-// build_target builds the given target. Internally it calls `build_config`.
-pub fn build_target(address string, api_key string, base_image_id string, target &Target, force bool, timeout int) !BuildResult {
- config := target.as_build_config(base_image_id, force, timeout)
-
- return build_config(address, api_key, config)
-}
-
-// build_config builds, packages & publishes a given Arch package based on the
+// build_target builds, packages & publishes a given Arch package based on the
// provided target. The base image ID should be of an image previously created
// by create_build_image. It returns the logs of the container.
-pub fn build_config(address string, api_key string, config BuildConfig) !BuildResult {
- mut dd := docker.new_conn()!
+pub fn build_target(address string, api_key string, base_image_id string, target &Target) ?BuildResult {
+ mut dd := docker.new_conn()?
defer {
dd.close() or {}
}
build_arch := os.uname().machine
- build_script := create_build_script(address, config, build_arch)
+ build_script := create_build_script(address, target, build_arch)
// We convert the build script into a base64 string, which then gets passed
// to the container as an env var
base64_script := base64.encode_str(build_script)
c := docker.NewContainer{
- image: '${config.base_image}'
+ image: '$base_image_id'
env: [
- 'BUILD_SCRIPT=${base64_script}',
- 'API_KEY=${api_key}',
+ 'BUILD_SCRIPT=$base64_script',
+ 'API_KEY=$api_key',
// `archlinux:base-devel` does not correctly set the path variable,
// causing certain builds to fail. This fixes it.
'PATH=${build.path_dirs.join(':')}',
@@ -132,33 +125,25 @@ pub fn build_config(address string, api_key string, config BuildConfig) !BuildRe
user: '0:0'
}
- id := dd.container_create(c)!.id
- dd.container_start(id)!
+ id := dd.container_create(c)?.id
+ dd.container_start(id)?
- mut data := dd.container_inspect(id)!
- start_time := time.now()
+ mut data := dd.container_inspect(id)?
// This loop waits until the container has stopped, so we can remove it after
for data.state.running {
- if time.now() - start_time > config.timeout * time.second {
- dd.container_kill(id)!
- dd.container_remove(id)!
-
- return error('Build killed due to timeout (${config.timeout}s)')
- }
-
time.sleep(1 * time.second)
- data = dd.container_inspect(id)!
+ data = dd.container_inspect(id)?
}
- mut logs_stream := dd.container_get_logs(id)!
+ mut logs_stream := dd.container_get_logs(id)?
// Read in the entire stream
mut logs_builder := strings.new_builder(10 * 1024)
- util.reader_to_writer(mut logs_stream, mut logs_builder)!
+ util.reader_to_writer(mut logs_stream, mut logs_builder)?
- dd.container_remove(id)!
+ dd.container_remove(id)?
return BuildResult{
start_time: data.state.start_time
diff --git a/src/build/scripts/git.sh b/src/build/build_script_git.sh
similarity index 75%
rename from src/build/scripts/git.sh
rename to src/build/build_script_git.sh
index 2644243..73e0965 100644
--- a/src/build/scripts/git.sh
+++ b/src/build/build_script_git.sh
@@ -16,5 +16,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
-echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
-MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
+echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
+MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
diff --git a/src/build/scripts/git_branch.sh b/src/build/build_script_git_branch.sh
similarity index 75%
rename from src/build/scripts/git_branch.sh
rename to src/build/build_script_git_branch.sh
index 9f36bdc..be1ff4f 100644
--- a/src/build/scripts/git_branch.sh
+++ b/src/build/build_script_git_branch.sh
@@ -16,5 +16,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
-echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
-MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
+echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
+MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
diff --git a/src/build/scripts/url.sh b/src/build/build_script_url.sh
similarity index 75%
rename from src/build/scripts/url.sh
rename to src/build/build_script_url.sh
index 2d27de7..3bc97e1 100644
--- a/src/build/scripts/url.sh
+++ b/src/build/build_script_url.sh
@@ -18,5 +18,5 @@ echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkg
curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
[ "$(id -u)" == 0 ] && exit 0
-echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
-MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
+echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
+MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
diff --git a/src/build/queue.v b/src/build/queue.v
deleted file mode 100644
index bc4db9d..0000000
--- a/src/build/queue.v
+++ /dev/null
@@ -1,219 +0,0 @@
-module build
-
-import models { BuildConfig, Target }
-import cron
-import time
-import datatypes { MinHeap }
-import util
-
-struct BuildJob {
-pub mut:
- // Time at which this build job was created/queued
- created time.Time
- // Next timestamp from which point this job is allowed to be executed
- timestamp time.Time
- // Required for calculating next timestamp after having pop'ed a job
- ce &cron.Expression = unsafe { nil }
- // Actual build config sent to the agent
- config BuildConfig
- // Whether this is a one-time job
- single bool
-}
-
-// Allows BuildJob structs to be sorted according to their timestamp in
-// MinHeaps
-fn (r1 BuildJob) < (r2 BuildJob) bool {
- return r1.timestamp < r2.timestamp
-}
-
-// The build job queue is responsible for managing the list of scheduled builds
-// for each architecture. Agents receive jobs from this queue.
-pub struct BuildJobQueue {
- // Schedule to use for targets without explicitely defined cron expression
- default_schedule &cron.Expression
- // Base image to use for targets without defined base image
- default_base_image string
- // After how many minutes a build should be forcefully cancelled
- default_build_timeout int
-mut:
- mutex shared util.Dummy
- // For each architecture, a priority queue is tracked
- queues map[string]MinHeap[BuildJob]
- // When a target is removed from the server or edited, its previous build
- // configs will be invalid. This map allows for those to be simply skipped
- // by ignoring any build configs created before this timestamp.
- invalidated map[int]time.Time
-}
-
-// new_job_queue initializes a new job queue
-pub fn new_job_queue(default_schedule &cron.Expression, default_base_image string, default_build_timeout int) BuildJobQueue {
- return BuildJobQueue{
- default_schedule: unsafe { default_schedule }
- default_base_image: default_base_image
- default_build_timeout: default_build_timeout
- invalidated: map[int]time.Time{}
- }
-}
-
-// insert_all executes insert for each architecture of the given Target.
-pub fn (mut q BuildJobQueue) insert_all(target Target) ! {
- for arch in target.arch {
- q.insert(target: target, arch: arch.value)!
- }
-}
-
-[params]
-pub struct InsertConfig {
- target Target [required]
- arch string [required]
- single bool
- force bool
- now bool
-}
-
-// insert a new target's job into the queue for the given architecture. This
-// job will then be endlessly rescheduled after being pop'ed, unless removed
-// explicitely.
-pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! {
- lock q.mutex {
- if input.arch !in q.queues {
- q.queues[input.arch] = MinHeap[BuildJob]{}
- }
-
- mut job := BuildJob{
- created: time.now()
- single: input.single
- config: input.target.as_build_config(q.default_base_image, input.force, q.default_build_timeout)
- }
-
- if !input.now {
- ce := if input.target.schedule != '' {
- cron.parse_expression(input.target.schedule) or {
- return error("Error while parsing cron expression '${input.target.schedule}' (id ${input.target.id}): ${err.msg()}")
- }
- } else {
- q.default_schedule
- }
-
- job.timestamp = ce.next_from_now()
- job.ce = ce
- } else {
- job.timestamp = time.now()
- }
-
- q.queues[input.arch].insert(job)
- }
-}
-
-// reschedule the given job by calculating the next timestamp and re-adding it
-// to its respective queue. This function is called by the pop functions
-// *after* having pop'ed the job.
-fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) {
- new_timestamp := job.ce.next_from_now()
-
- new_job := BuildJob{
- ...job
- created: time.now()
- timestamp: new_timestamp
- }
-
- q.queues[arch].insert(new_job)
-}
-
-// pop_invalid pops all invalid jobs.
-fn (mut q BuildJobQueue) pop_invalid(arch string) {
- for {
- job := q.queues[arch].peek() or { return }
-
- if job.config.target_id in q.invalidated
- && job.created < q.invalidated[job.config.target_id] {
- // This pop *should* never fail according to the source code
- q.queues[arch].pop() or {}
- } else {
- break
- }
- }
-}
-
-// peek shows the first job for the given architecture that's ready to be
-// executed, if present.
-pub fn (mut q BuildJobQueue) peek(arch string) ?BuildJob {
- // Even peek requires a write lock, because pop_invalid can modify the data
- // structure
- lock q.mutex {
- if arch !in q.queues {
- return none
- }
-
- q.pop_invalid(arch)
- job := q.queues[arch].peek() or { return none }
-
- if job.timestamp < time.now() {
- return job
- }
- }
-
- return none
-}
-
-// pop removes the first job for the given architecture that's ready to be
-// executed from the queue and returns it, if present.
-pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob {
- lock q.mutex {
- if arch !in q.queues {
- return none
- }
-
- q.pop_invalid(arch)
- mut job := q.queues[arch].peek() or { return none }
-
- if job.timestamp < time.now() {
- job = q.queues[arch].pop() or { return none }
-
- if !job.single {
- q.reschedule(job, arch)
- }
-
- return job
- }
- }
-
- return none
-}
-
-// pop_n tries to pop at most n available jobs for the given architecture.
-pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob {
- lock q.mutex {
- if arch !in q.queues {
- return []
- }
-
- mut out := []BuildJob{}
-
- for out.len < n {
- q.pop_invalid(arch)
- mut job := q.queues[arch].peek() or { break }
-
- if job.timestamp < time.now() {
- job = q.queues[arch].pop() or { break }
-
- if !job.single {
- q.reschedule(job, arch)
- }
-
- out << job
- } else {
- break
- }
- }
-
- return out
- }
-
- return []
-}
-
-// invalidate a target's old build jobs.
-pub fn (mut q BuildJobQueue) invalidate(target_id int) {
- q.invalidated[target_id] = time.now()
-}
diff --git a/src/build/scripts/git_path.sh b/src/build/scripts/git_path.sh
deleted file mode 100644
index 65b7fb9..0000000
--- a/src/build/scripts/git_path.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf'
-echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
-echo -e '+ pacman -Syu --needed --noconfirm'
-pacman -Syu --needed --noconfirm
-echo -e '+ su builder'
-su builder
-echo -e '+ git clone --single-branch --depth 1 '\''https://examplerepo.com'\'' repo'
-git clone --single-branch --depth 1 'https://examplerepo.com' repo
-echo -e '+ cd '\''repo/example/path'\'''
-cd 'repo/example/path'
-echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm'
-makepkg --nobuild --syncdeps --needed --noconfirm
-echo -e '+ source PKGBUILD'
-source PKGBUILD
-echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0'
-curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
-echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
-[ "$(id -u)" == 0 ] && exit 0
-echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
-MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
diff --git a/src/build/scripts/git_path_spaces.sh b/src/build/scripts/git_path_spaces.sh
deleted file mode 100644
index b632b91..0000000
--- a/src/build/scripts/git_path_spaces.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-echo -e '+ echo -e '\''[vieter]\\nServer = https://example.com/$repo/$arch\\nSigLevel = Optional'\'' >> /etc/pacman.conf'
-echo -e '[vieter]\nServer = https://example.com/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
-echo -e '+ pacman -Syu --needed --noconfirm'
-pacman -Syu --needed --noconfirm
-echo -e '+ su builder'
-su builder
-echo -e '+ git clone --single-branch --depth 1 '\''https://examplerepo.com'\'' repo'
-git clone --single-branch --depth 1 'https://examplerepo.com' repo
-echo -e '+ cd '\''repo/example/path with spaces'\'''
-cd 'repo/example/path with spaces'
-echo -e '+ makepkg --nobuild --syncdeps --needed --noconfirm'
-makepkg --nobuild --syncdeps --needed --noconfirm
-echo -e '+ source PKGBUILD'
-source PKGBUILD
-echo -e '+ curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0'
-curl -s --head --fail https://example.com/vieter/x86_64/$pkgname-$pkgver-$pkgrel && exit 0
-echo -e '+ [ "$(id -u)" == 0 ] && exit 0'
-[ "$(id -u)" == 0 ] && exit 0
-echo -e '+ MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done'
-MAKEFLAGS="-j$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $API_KEY" https://example.com/vieter/publish; done
diff --git a/src/build/shell.v b/src/build/shell.v
index f32cd08..e573d53 100644
--- a/src/build/shell.v
+++ b/src/build/shell.v
@@ -1,6 +1,6 @@
module build
-import models { BuildConfig }
+import models { Target }
// escape_shell_string escapes any characters that could be interpreted
// incorrectly by a shell. The resulting value should be safe to use inside an
@@ -23,13 +23,13 @@ pub fn echo_commands(cmds []string) []string {
}
// create_build_script generates a shell script that builds a given Target.
-fn create_build_script(address string, config BuildConfig, build_arch string) string {
- repo_url := '${address}/${config.repo}'
+fn create_build_script(address string, target &Target, build_arch string) string {
+ repo_url := '$address/$target.repo'
mut commands := [
// This will later be replaced by a proper setting for changing the
// mirrorlist
- "echo -e '[${config.repo}]\\nServer = ${address}/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
+ "echo -e '[$target.repo]\\nServer = $address/\$repo/\$arch\\nSigLevel = Optional' >> /etc/pacman.conf"
// We need to update the package list of the repo we just added above.
// This should however not pull in a lot of packages as long as the
// builder image is rebuilt frequently.
@@ -38,22 +38,22 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
'su builder',
]
- commands << match config.kind {
+ commands << match target.kind {
'git' {
- if config.branch == '' {
+ if target.branch == '' {
[
- "git clone --single-branch --depth 1 '${config.url}' repo",
+ "git clone --single-branch --depth 1 '$target.url' repo",
]
} else {
[
- "git clone --single-branch --depth 1 --branch ${config.branch} '${config.url}' repo",
+ "git clone --single-branch --depth 1 --branch $target.branch '$target.url' repo",
]
}
}
'url' {
[
'mkdir repo',
- "curl -o repo/PKGBUILD -L '${config.url}'",
+ "curl -o repo/PKGBUILD -L '$target.url'",
]
}
else {
@@ -61,32 +61,19 @@ fn create_build_script(address string, config BuildConfig, build_arch string) st
}
}
- commands << if config.path != '' {
- "cd 'repo/${config.path}'"
- } else {
- 'cd repo'
- }
-
commands << [
+ 'cd repo',
'makepkg --nobuild --syncdeps --needed --noconfirm',
'source PKGBUILD',
- ]
-
- if !config.force {
// The build container checks whether the package is already present on
// the server.
- commands << [
- 'curl -s --head --fail ${repo_url}/${build_arch}/\$pkgname-\$pkgver-\$pkgrel && exit 0',
- // If the above curl command succeeds, we don't need to rebuild the
- // package. However, because we're in a su shell, the exit command will
- // drop us back into the root shell. Therefore, we must check whether
- // we're in root so we don't proceed.
- '[ "\$(id -u)" == 0 ] && exit 0',
- ]
- }
-
- commands << [
- 'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed --noextract && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" ${repo_url}/publish; done',
+ 'curl -s --head --fail $repo_url/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
+ // If the above curl command succeeds, we don't need to rebuild the
+ // package. However, because we're in a su shell, the exit command will
+ // drop us back into the root shell. Therefore, we must check whether
+ // we're in root so we don't proceed.
+ '[ "\$(id -u)" == 0 ] && exit 0',
+ 'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $repo_url/publish; done',
]
return echo_commands(commands).join('\n')
diff --git a/src/build/shell_test.v b/src/build/shell_test.v
index e23d964..341df88 100644
--- a/src/build/shell_test.v
+++ b/src/build/shell_test.v
@@ -1,75 +1,43 @@
module build
-import models { BuildConfig }
-
-fn test_create_build_script_git() {
- config := BuildConfig{
- target_id: 1
- kind: 'git'
- url: 'https://examplerepo.com'
- repo: 'vieter'
- base_image: 'not-used:latest'
- }
-
- build_script := create_build_script('https://example.com', config, 'x86_64')
- expected := $embed_file('scripts/git.sh')
-
- assert build_script == expected.to_string().trim_space()
-}
-
-fn test_create_build_script_git_path() {
- mut config := BuildConfig{
- target_id: 1
- kind: 'git'
- url: 'https://examplerepo.com'
- repo: 'vieter'
- path: 'example/path'
- base_image: 'not-used:latest'
- }
-
- mut build_script := create_build_script('https://example.com', config, 'x86_64')
- mut expected := $embed_file('scripts/git_path.sh')
-
- assert build_script == expected.to_string().trim_space()
-
- config = BuildConfig{
- ...config
- path: 'example/path with spaces'
- }
-
- build_script = create_build_script('https://example.com', config, 'x86_64')
- expected = $embed_file('scripts/git_path_spaces.sh')
-
- assert build_script == expected.to_string().trim_space()
-}
+import models { Target }
fn test_create_build_script_git_branch() {
- config := BuildConfig{
- target_id: 1
+ target := Target{
+ id: 1
kind: 'git'
url: 'https://examplerepo.com'
branch: 'main'
repo: 'vieter'
- base_image: 'not-used:latest'
}
+ build_script := create_build_script('https://example.com', target, 'x86_64')
+ expected := $embed_file('build_script_git_branch.sh')
- build_script := create_build_script('https://example.com', config, 'x86_64')
- expected := $embed_file('scripts/git_branch.sh')
+ assert build_script == expected.to_string().trim_space()
+}
+
+fn test_create_build_script_git() {
+ target := Target{
+ id: 1
+ kind: 'git'
+ url: 'https://examplerepo.com'
+ repo: 'vieter'
+ }
+ build_script := create_build_script('https://example.com', target, 'x86_64')
+ expected := $embed_file('build_script_git.sh')
assert build_script == expected.to_string().trim_space()
}
fn test_create_build_script_url() {
- config := BuildConfig{
- target_id: 1
+ target := Target{
+ id: 1
kind: 'url'
url: 'https://examplerepo.com'
repo: 'vieter'
- base_image: 'not-used:latest'
}
-
- build_script := create_build_script('https://example.com', config, 'x86_64')
- expected := $embed_file('scripts/url.sh')
+ build_script := create_build_script('https://example.com', target, 'x86_64')
+ expected := $embed_file('build_script_url.sh')
assert build_script == expected.to_string().trim_space()
}
diff --git a/src/client/client.v b/src/client/client.v
index 7d57e92..2bb1ac2 100644
--- a/src/client/client.v
+++ b/src/client/client.v
@@ -2,7 +2,7 @@ module client
import net.http { Method }
import net.urllib
-import web.response { Response, new_data_response }
+import response { Response }
import json
pub struct Client {
@@ -21,8 +21,8 @@ pub fn new(address string, api_key string) Client {
// send_request_raw sends an HTTP request, returning the http.Response object.
// It encodes the params so that they're safe to pass as HTTP query parameters.
-fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) !http.Response {
- mut full_url := '${c.address}${url}'
+fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) ?http.Response {
+ mut full_url := '$c.address$url'
if params.len > 0 {
mut params_escaped := map[string]string{}
@@ -30,61 +30,41 @@ fn (c &Client) send_request_raw(method Method, url string, params map[string]str
// Escape each query param
for k, v in params {
// An empty parameter should be the same as not providing it at all
- params_escaped[k] = urllib.query_escape(v)
+ if v != '' {
+ params_escaped[k] = urllib.query_escape(v)
+ }
}
- params_str := params_escaped.keys().map('${it}=${params_escaped[it]}').join('&')
+ params_str := params_escaped.keys().map('$it=${params[it]}').join('&')
- full_url = '${full_url}?${params_str}'
+ full_url = '$full_url?$params_str'
}
- // Looking at the source code, this function doesn't actually fail, so I'm
- // not sure why it returns an optional
- mut req := http.new_request(method, full_url, body) or { return error('') }
- req.add_custom_header('X-Api-Key', c.api_key)!
+ mut req := http.new_request(method, full_url, body)?
+ req.add_custom_header('X-Api-Key', c.api_key)?
- res := req.do()!
+ res := req.do()?
return res
}
// send_request just calls send_request_with_body with an empty body.
-fn (c &Client) send_request[T](method Method, url string, params map[string]string) !Response[T] {
- return c.send_request_with_body[T](method, url, params, '')
+fn (c &Client) send_request(method Method, url string, params map[string]string) ?Response {
+ return c.send_request_with_body(method, url, params, '')
}
// send_request_with_body calls send_request_raw_response & parses its
// output as a Response object.
-fn (c &Client) send_request_with_body[T](method Method, url string, params map[string]string, body string) !Response[T] {
- res := c.send_request_raw(method, url, params, body)!
- status := res.status()
-
- // Non-successful requests are expected to return either an empty body or
- // Response
- if status.is_error() {
- // A non-successful status call will have an empty body
- if res.body == '' {
- return error('Error ${res.status_code} (${status.str()}): (empty response)')
- }
-
- data := json.decode(Response[string], res.body)!
-
- return error('Status ${res.status_code} (${status.str()}): ${data.message}')
- }
-
- // Just return an empty successful response
- if res.body == '' {
- return new_data_response(T{})
- }
-
- data := json.decode(Response[T], res.body)!
+fn (c &Client) send_request_with_body(method Method, url string, params map[string]string, body string) ?Response {
+ res_text := c.send_request_raw_response(method, url, params, body)?
+ data := json.decode(Response, res_text)?
return data
}
// send_request_raw_response returns the raw text response for an HTTP request.
-fn (c &Client) send_request_raw_response(method Method, url string, params map[string]string, body string) !string {
- res := c.send_request_raw(method, url, params, body)!
+fn (c &Client) send_request_raw_response(method Method, url string, params map[string]string, body string) ?string {
+ res := c.send_request_raw(method, url, params, body)?
return res.body
}
diff --git a/src/client/jobs.v b/src/client/jobs.v
deleted file mode 100644
index ddb9e2d..0000000
--- a/src/client/jobs.v
+++ /dev/null
@@ -1,23 +0,0 @@
-module client
-
-import models { BuildConfig }
-
-// poll_jobs requests a list of new build jobs from the server.
-pub fn (c &Client) poll_jobs(arch string, max int) ![]BuildConfig {
- data := c.send_request[[]BuildConfig](.get, '/api/v1/jobs/poll', {
- 'arch': arch
- 'max': max.str()
- })!
-
- return data.data
-}
-
-// queue_job adds a new one-time build job for the given target to the job
-// queue.
-pub fn (c &Client) queue_job(target_id int, arch string, force bool) ! {
- c.send_request[string](.post, '/api/v1/jobs/queue', {
- 'target': target_id.str()
- 'arch': arch
- 'force': force.str()
- })!
-}
diff --git a/src/client/logs.v b/src/client/logs.v
index ff6b7c5..f242f6e 100644
--- a/src/client/logs.v
+++ b/src/client/logs.v
@@ -1,33 +1,45 @@
module client
import models { BuildLog, BuildLogFilter }
-import web.response { Response }
+import net.http { Method }
+import response { Response }
import time
// get_build_logs returns all build logs.
-pub fn (c &Client) get_build_logs(filter BuildLogFilter) ![]BuildLog {
+pub fn (c &Client) get_build_logs(filter BuildLogFilter) ?Response<[]BuildLog> {
params := models.params_from(filter)
- data := c.send_request[[]BuildLog](.get, '/api/v1/logs', params)!
+ data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)?
- return data.data
+ return data
+}
+
+// get_build_logs_for_target returns all build logs for a given target.
+pub fn (c &Client) get_build_logs_for_target(target_id int) ?Response<[]BuildLog> {
+ params := {
+ 'repo': target_id.str()
+ }
+
+ data := c.send_request<[]BuildLog>(Method.get, '/api/v1/logs', params)?
+
+ return data
}
// get_build_log returns a specific build log.
-pub fn (c &Client) get_build_log(id int) !BuildLog {
- data := c.send_request[BuildLog](.get, '/api/v1/logs/${id}', {})!
+pub fn (c &Client) get_build_log(id int) ?Response {
+ data := c.send_request(Method.get, '/api/v1/logs/$id', {})?
- return data.data
+ return data
}
// get_build_log_content returns the contents of the build log file.
-pub fn (c &Client) get_build_log_content(id int) !string {
- data := c.send_request_raw_response(.get, '/api/v1/logs/${id}/content', {}, '')!
+pub fn (c &Client) get_build_log_content(id int) ?string {
+ data := c.send_request_raw_response(Method.get, '/api/v1/logs/$id/content', {}, '')?
return data
}
// add_build_log adds a new build log to the server.
-pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) !Response[int] {
+pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) ?Response {
params := {
'target': target_id.str()
'startTime': start_time.unix_time().str()
@@ -36,12 +48,7 @@ pub fn (c &Client) add_build_log(target_id int, start_time time.Time, end_time t
'exitCode': exit_code.str()
}
- data := c.send_request_with_body[int](.post, '/api/v1/logs', params, content)!
+ data := c.send_request_with_body(Method.post, '/api/v1/logs', params, content)?
return data
}
-
-// remove_build_log removes the build log with the given id from the server.
-pub fn (c &Client) remove_build_log(id int) ! {
- c.send_request[string](.delete, '/api/v1/logs/${id}', {})!
-}
diff --git a/src/client/repos.v b/src/client/repos.v
deleted file mode 100644
index dff5d90..0000000
--- a/src/client/repos.v
+++ /dev/null
@@ -1,16 +0,0 @@
-module client
-
-// remove_repo removes an entire repository.
-pub fn (c &Client) remove_repo(repo string) ! {
- c.send_request[string](.delete, '/${repo}', {})!
-}
-
-// remove_arch_repo removes an entire arch-repo.
-pub fn (c &Client) remove_arch_repo(repo string, arch string) ! {
- c.send_request[string](.delete, '/${repo}/${arch}', {})!
-}
-
-// remove_package removes a single package from the given arch-repo.
-pub fn (c &Client) remove_package(repo string, arch string, pkgname string) ! {
- c.send_request[string](.delete, '/${repo}/${arch}/${pkgname}', {})!
-}
diff --git a/src/client/targets.v b/src/client/targets.v
index 3d43d43..82c7878 100644
--- a/src/client/targets.v
+++ b/src/client/targets.v
@@ -1,23 +1,25 @@
module client
import models { Target, TargetFilter }
+import net.http { Method }
+import response { Response }
// get_targets returns a list of targets, given a filter object.
-pub fn (c &Client) get_targets(filter TargetFilter) ![]Target {
+pub fn (c &Client) get_targets(filter TargetFilter) ?[]Target {
params := models.params_from(filter)
- data := c.send_request[[]Target](.get, '/api/v1/targets', params)!
+ data := c.send_request<[]Target>(Method.get, '/api/v1/targets', params)?
return data.data
}
// get_all_targets retrieves *all* targs from the API using the default
// limit.
-pub fn (c &Client) get_all_targets() ![]Target {
+pub fn (c &Client) get_all_targets() ?[]Target {
mut targets := []Target{}
mut offset := u64(0)
for {
- sub_targets := c.get_targets(offset: offset)!
+ sub_targets := c.get_targets(offset: offset)?
if sub_targets.len == 0 {
break
@@ -32,8 +34,8 @@ pub fn (c &Client) get_all_targets() ![]Target {
}
// get_target returns the target for a specific id.
-pub fn (c &Client) get_target(id int) !Target {
- data := c.send_request[Target](.get, '/api/v1/targets/${id}', {})!
+pub fn (c &Client) get_target(id int) ?Target {
+ data := c.send_request(Method.get, '/api/v1/targets/$id', {})?
return data.data
}
@@ -43,29 +45,28 @@ pub struct NewTarget {
url string
branch string
repo string
- path string
arch []string
}
// add_target adds a new target to the server.
-pub fn (c &Client) add_target(t NewTarget) !int {
- params := models.params_from[NewTarget](t)
- data := c.send_request[int](.post, '/api/v1/targets', params)!
+pub fn (c &Client) add_target(t NewTarget) ?Response {
+ params := models.params_from(t)
+ data := c.send_request(Method.post, '/api/v1/targets', params)?
- return data.data
+ return data
}
// remove_target removes the target with the given id from the server.
-pub fn (c &Client) remove_target(id int) !string {
- data := c.send_request[string](.delete, '/api/v1/targets/${id}', {})!
+pub fn (c &Client) remove_target(id int) ?Response {
+ data := c.send_request(Method.delete, '/api/v1/targets/$id', {})?
- return data.data
+ return data
}
// patch_target sends a PATCH request to the given target with the params as
// payload.
-pub fn (c &Client) patch_target(id int, params map[string]string) !string {
- data := c.send_request[string](.patch, '/api/v1/targets/${id}', params)!
+pub fn (c &Client) patch_target(id int, params map[string]string) ?Response {
+ data := c.send_request(Method.patch, '/api/v1/targets/$id', params)?
- return data.data
+ return data
}
diff --git a/src/console/aur/aur.v b/src/console/aur/aur.v
deleted file mode 100644
index c1c409c..0000000
--- a/src/console/aur/aur.v
+++ /dev/null
@@ -1,62 +0,0 @@
-module aur
-
-import cli
-import console
-import client
-import aur
-import conf as vconf
-
-struct Config {
- address string [required]
- api_key string [required]
-}
-
-// cmd returns the cli module for interacting with the AUR API.
-pub fn cmd() cli.Command {
- return cli.Command{
- name: 'aur'
- description: 'Interact with the AUR.'
- commands: [
- cli.Command{
- name: 'search'
- description: 'Search for packages.'
- required_args: 1
- execute: fn (cmd cli.Command) ! {
- c := aur.new()
- pkgs := c.search(cmd.args[0])!
- data := pkgs.map([it.name, it.description])
-
- println(console.pretty_table(['name', 'description'], data)!)
- }
- },
- cli.Command{
- name: 'add'
- usage: 'repo pkg-name [pkg-name...]'
- description: 'Add the given AUR package(s) to Vieter. Non-existent packages will be silently ignored.'
- required_args: 2
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
-
- c := aur.new()
- pkgs := c.info(cmd.args[1..])!
-
- vc := client.new(conf_.address, conf_.api_key)
-
- for pkg in pkgs {
- vc.add_target(
- kind: 'git'
- url: 'https://aur.archlinux.org/${pkg.package_base}' + '.git'
- repo: cmd.args[0]
- ) or {
- println('Failed to add ${pkg.name}: ${err.msg()}')
- continue
- }
-
- println('Added ${pkg.name}' + '.')
- }
- }
- },
- ]
- }
-}
diff --git a/src/console/console.v b/src/console/console.v
index 5c40de8..7d782ba 100644
--- a/src/console/console.v
+++ b/src/console/console.v
@@ -5,15 +5,10 @@ import strings
import cli
import os
-// tabbed_table returns a simple textual table, with tabs as separators.
-pub fn tabbed_table(data [][]string) string {
- return data.map(it.join('\t')).join('\n')
-}
-
// pretty_table converts a list of string data into a pretty table. Many thanks
// to @hungrybluedev in the Vlang Discord for providing this code!
// https://ptb.discord.com/channels/592103645835821068/592106336838352923/970278787143045192
-pub fn pretty_table(header []string, data [][]string) !string {
+pub fn pretty_table(header []string, data [][]string) ?string {
column_count := header.len
mut column_widths := []int{len: column_count, init: header[it].len}
@@ -26,7 +21,7 @@ pub fn pretty_table(header []string, data [][]string) !string {
}
}
- single_line_length := arrays.sum(column_widths)! + (column_count + 1) * 3 - 4
+ single_line_length := arrays.sum(column_widths)? + (column_count + 1) * 3 - 4
horizontal_line := '+' + strings.repeat(`-`, single_line_length) + '+'
mut buffer := strings.new_builder(data.len * single_line_length)
@@ -64,12 +59,12 @@ pub fn pretty_table(header []string, data [][]string) !string {
// export_man_pages recursively generates all man pages for the given
// cli.Command & writes them to the given directory.
-pub fn export_man_pages(cmd cli.Command, path string) ! {
+pub fn export_man_pages(cmd cli.Command, path string) ? {
man := cmd.manpage()
os.write_file(os.join_path_single(path, cmd.full_name().replace(' ', '-') + '.1'),
- man)!
+ man)?
for sub_cmd in cmd.commands {
- export_man_pages(sub_cmd, path)!
+ export_man_pages(sub_cmd, path)?
}
}
diff --git a/src/console/logs/logs.v b/src/console/logs/logs.v
index b8e088c..0f023bc 100644
--- a/src/console/logs/logs.v
+++ b/src/console/logs/logs.v
@@ -1,7 +1,7 @@
module logs
import cli
-import conf as vconf
+import vieter_v.conf as vconf
import client
import console
import time
@@ -24,13 +24,11 @@ pub fn cmd() cli.Command {
flags: [
cli.Flag{
name: 'limit'
- abbrev: 'l'
description: 'How many results to return.'
flag: cli.FlagType.int
},
cli.Flag{
name: 'offset'
- abbrev: 'o'
description: 'Minimum index to return.'
flag: cli.FlagType.int
},
@@ -41,18 +39,16 @@ pub fn cmd() cli.Command {
},
cli.Flag{
name: 'today'
- abbrev: 't'
- description: 'Only list logs started today. This flag overwrites any other date-related flag.'
+ description: 'Only list logs started today.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'failed'
- description: 'Only list logs with non-zero exit codes. This flag overwrites the --code flag.'
+ description: 'Only list logs with non-zero exit codes.'
flag: cli.FlagType.bool
},
cli.Flag{
name: 'day'
- abbrev: 'd'
description: 'Only list logs started on this day. (format: YYYY-MM-DD)'
flag: cli.FlagType.string
},
@@ -66,36 +62,31 @@ pub fn cmd() cli.Command {
description: 'Only list logs started after this timestamp. (format: YYYY-MM-DD HH:mm:ss)'
flag: cli.FlagType.string
},
- cli.Flag{
- name: 'code'
- description: 'Only return logs with the given exit code. Prepend with `!` to exclude instead of include. Can be specified multiple times.'
- flag: cli.FlagType.string_array
- },
]
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
mut filter := BuildLogFilter{}
- limit := cmd.flags.get_int('limit')!
+ limit := cmd.flags.get_int('limit')?
if limit != 0 {
filter.limit = u64(limit)
}
- offset := cmd.flags.get_int('offset')!
+ offset := cmd.flags.get_int('offset')?
if offset != 0 {
filter.offset = u64(offset)
}
- target_id := cmd.flags.get_int('target')!
+ target_id := cmd.flags.get_int('target')?
if target_id != 0 {
filter.target = target_id
}
tz_offset := time.offset()
- if cmd.flags.get_bool('today')! {
+ if cmd.flags.get_bool('today')? {
today := time.now()
filter.after = time.new_time(time.Time{
@@ -107,12 +98,12 @@ pub fn cmd() cli.Command {
}
// The -today flag overwrites any of the other date flags.
else {
- day_str := cmd.flags.get_string('day')!
- before_str := cmd.flags.get_string('before')!
- after_str := cmd.flags.get_string('after')!
+ day_str := cmd.flags.get_string('day')?
+ before_str := cmd.flags.get_string('before')?
+ after_str := cmd.flags.get_string('after')?
if day_str != '' {
- day := time.parse_rfc3339(day_str)!
+ day := time.parse_rfc3339(day_str)?
day_utc := time.new_time(time.Time{
year: day.year
month: day.month
@@ -127,38 +118,22 @@ pub fn cmd() cli.Command {
filter.before = day_utc.add_days(1)
} else {
if before_str != '' {
- filter.before = time.parse(before_str)!.add_seconds(-tz_offset)
+ filter.before = time.parse(before_str)?.add_seconds(-tz_offset)
}
if after_str != '' {
- filter.after = time.parse(after_str)!.add_seconds(-tz_offset)
+ filter.after = time.parse(after_str)?.add_seconds(-tz_offset)
}
}
}
- if cmd.flags.get_bool('failed')! {
+ if cmd.flags.get_bool('failed')? {
filter.exit_codes = [
'!0',
]
- } else {
- filter.exit_codes = cmd.flags.get_strings('code')!
}
- raw := cmd.flags.get_bool('raw')!
-
- list(conf_, filter, raw)!
- }
- },
- cli.Command{
- name: 'remove'
- required_args: 1
- usage: 'id'
- description: 'Remove a build log that matches the given id.'
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
-
- remove(conf_, cmd.args[0])!
+ list(conf, filter)?
}
},
cli.Command{
@@ -166,12 +141,12 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Show all info for a specific build log.'
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
id := cmd.args[0].int()
- info(conf_, id)!
+ info(conf, id)?
}
},
cli.Command{
@@ -179,12 +154,12 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Output the content of a build log to stdout.'
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
id := cmd.args[0].int()
- content(conf_, id)!
+ content(conf, id)?
}
},
]
@@ -192,44 +167,42 @@ pub fn cmd() cli.Command {
}
// print_log_list prints a list of logs.
-fn print_log_list(logs []BuildLog, raw bool) ! {
+fn print_log_list(logs []BuildLog) ? {
data := logs.map([it.id.str(), it.target_id.str(), it.start_time.local().str(),
it.exit_code.str()])
- if raw {
- println(console.tabbed_table(data))
- } else {
- println(console.pretty_table(['id', 'target', 'start time', 'exit code'], data)!)
- }
+ println(console.pretty_table(['id', 'target', 'start time', 'exit code'], data)?)
}
// list prints a list of all build logs.
-fn list(conf_ Config, filter BuildLogFilter, raw bool) ! {
- c := client.new(conf_.address, conf_.api_key)
- logs := c.get_build_logs(filter)!
+fn list(conf Config, filter BuildLogFilter) ? {
+ c := client.new(conf.address, conf.api_key)
+ logs := c.get_build_logs(filter)?.data
- print_log_list(logs, raw)!
+ print_log_list(logs)?
+}
+
+// list prints a list of all build logs for a given target.
+fn list_for_target(conf Config, target_id int) ? {
+ c := client.new(conf.address, conf.api_key)
+ logs := c.get_build_logs_for_target(target_id)?.data
+
+ print_log_list(logs)?
}
// info print the detailed info for a given build log.
-fn info(conf_ Config, id int) ! {
- c := client.new(conf_.address, conf_.api_key)
- log := c.get_build_log(id)!
+fn info(conf Config, id int) ? {
+ c := client.new(conf.address, conf.api_key)
+ log := c.get_build_log(id)?.data
print(log)
}
// content outputs the contents of the log file for a given build log to
// stdout.
-fn content(conf_ Config, id int) ! {
- c := client.new(conf_.address, conf_.api_key)
- content := c.get_build_log_content(id)!
+fn content(conf Config, id int) ? {
+ c := client.new(conf.address, conf.api_key)
+ content := c.get_build_log_content(id)?
println(content)
}
-
-// remove removes a build log from the server's list.
-fn remove(conf_ Config, id string) ! {
- c := client.new(conf_.address, conf_.api_key)
- c.remove_build_log(id.int())!
-}
diff --git a/src/console/man/man.v b/src/console/man/man.v
index 22cb5f7..d91a140 100644
--- a/src/console/man/man.v
+++ b/src/console/man/man.v
@@ -11,11 +11,11 @@ pub fn cmd() cli.Command {
description: 'Generate all man pages & save them in the given directory.'
usage: 'dir'
required_args: 1
- execute: fn (cmd cli.Command) ! {
+ execute: fn (cmd cli.Command) ? {
root := cmd.root()
- os.mkdir_all(cmd.args[0])!
+ os.mkdir_all(cmd.args[0])?
- console.export_man_pages(root, cmd.args[0])!
+ console.export_man_pages(root, cmd.args[0])?
}
}
}
diff --git a/src/console/repos/repos.v b/src/console/repos/repos.v
deleted file mode 100644
index 3779d33..0000000
--- a/src/console/repos/repos.v
+++ /dev/null
@@ -1,52 +0,0 @@
-module repos
-
-import cli
-import conf as vconf
-import client
-
-struct Config {
- address string [required]
- api_key string [required]
-}
-
-// cmd returns the cli module that handles modifying the repository contents.
-pub fn cmd() cli.Command {
- return cli.Command{
- name: 'repos'
- description: 'Interact with the repositories & packages stored on the server.'
- commands: [
- cli.Command{
- name: 'remove'
- required_args: 1
- usage: 'repo [arch [pkgname]]'
- description: 'Remove a repo, arch-repo, or package from the server.'
- flags: [
- cli.Flag{
- name: 'force'
- flag: cli.FlagType.bool
- },
- ]
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
-
- if cmd.args.len < 3 {
- if !cmd.flags.get_bool('force')! {
- return error('Removing an arch-repo or repository is a very destructive command. If you really do wish to perform this operation, explicitely add the --force flag.')
- }
- }
-
- client_ := client.new(conf_.address, conf_.api_key)
-
- if cmd.args.len == 1 {
- client_.remove_repo(cmd.args[0])!
- } else if cmd.args.len == 2 {
- client_.remove_arch_repo(cmd.args[0], cmd.args[1])!
- } else {
- client_.remove_package(cmd.args[0], cmd.args[1], cmd.args[2])!
- }
- }
- },
- ]
- }
-}
diff --git a/src/console/schedule/schedule.v b/src/console/schedule/schedule.v
index ceabf24..8fceddd 100644
--- a/src/console/schedule/schedule.v
+++ b/src/console/schedule/schedule.v
@@ -1,7 +1,7 @@
module schedule
import cli
-import cron
+import cron.expression { parse_expression }
import time
// cmd returns the cli submodule for previewing a cron schedule.
@@ -18,11 +18,11 @@ pub fn cmd() cli.Command {
default_value: ['5']
},
]
- execute: fn (cmd cli.Command) ! {
- ce := cron.parse_expression(cmd.args.join(' '))!
- count := cmd.flags.get_int('count')!
+ execute: fn (cmd cli.Command) ? {
+ ce := parse_expression(cmd.args.join(' '))?
+ count := cmd.flags.get_int('count')?
- for t in ce.next_n(time.now(), count) {
+ for t in ce.next_n(time.now(), count)? {
println(t)
}
}
diff --git a/src/console/targets/build.v b/src/console/targets/build.v
index 93464af..6337aa3 100644
--- a/src/console/targets/build.v
+++ b/src/console/targets/build.v
@@ -1,34 +1,34 @@
module targets
import client
-import docker
+import vieter_v.docker
import os
import build
// build locally builds the target with the given id.
-fn build_target(conf Config, target_id int, force bool, timeout int) ! {
+fn build(conf Config, target_id int) ? {
c := client.new(conf.address, conf.api_key)
- target := c.get_target(target_id)!
+ target := c.get_target(target_id)?
build_arch := os.uname().machine
println('Creating base image...')
- image_id := build.create_build_image(conf.base_image)!
+ image_id := build.create_build_image(conf.base_image)?
println('Running build...')
- res := build.build_target(conf.address, conf.api_key, image_id, target, force, timeout)!
+ res := build.build_target(conf.address, conf.api_key, image_id, target)?
println('Removing build image...')
- mut dd := docker.new_conn()!
+ mut dd := docker.new_conn()?
defer {
dd.close() or {}
}
- dd.image_remove(image_id)!
+ dd.remove_image(image_id)?
println('Uploading logs to Vieter...')
c.add_build_log(target.id, res.start_time, res.end_time, build_arch, res.exit_code,
- res.logs)!
+ res.logs)?
}
diff --git a/src/console/targets/targets.v b/src/console/targets/targets.v
index f85c4c0..66d48fb 100644
--- a/src/console/targets/targets.v
+++ b/src/console/targets/targets.v
@@ -1,8 +1,8 @@
module targets
import cli
-import conf as vconf
-import cron
+import vieter_v.conf as vconf
+import cron.expression { parse_expression }
import client { NewTarget }
import console
import models { TargetFilter }
@@ -13,7 +13,7 @@ struct Config {
base_image string = 'archlinux:base-devel'
}
-// cmd returns the cli submodule that handles the targets API interaction
+// cmd returns the cli submodule that handles the repos API interaction
pub fn cmd() cli.Command {
return cli.Command{
name: 'targets'
@@ -25,13 +25,11 @@ pub fn cmd() cli.Command {
flags: [
cli.Flag{
name: 'limit'
- abbrev: 'l'
description: 'How many results to return.'
flag: cli.FlagType.int
},
cli.Flag{
name: 'offset'
- abbrev: 'o'
description: 'Minimum index to return.'
flag: cli.FlagType.int
},
@@ -40,52 +38,29 @@ pub fn cmd() cli.Command {
description: 'Only return targets that publish to this repo.'
flag: cli.FlagType.string
},
- cli.Flag{
- name: 'query'
- abbrev: 'q'
- description: 'Search string to filter targets by.'
- flag: cli.FlagType.string
- },
- cli.Flag{
- name: 'arch'
- description: 'Only list targets that build for this arch.'
- flag: cli.FlagType.string
- },
]
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
mut filter := TargetFilter{}
- limit := cmd.flags.get_int('limit')!
+ limit := cmd.flags.get_int('limit')?
if limit != 0 {
filter.limit = u64(limit)
}
- offset := cmd.flags.get_int('offset')!
+ offset := cmd.flags.get_int('offset')?
if offset != 0 {
filter.offset = u64(offset)
}
- repo := cmd.flags.get_string('repo')!
+ repo := cmd.flags.get_string('repo')?
if repo != '' {
filter.repo = repo
}
- query := cmd.flags.get_string('query')!
- if query != '' {
- filter.query = query
- }
-
- arch := cmd.flags.get_string('arch')!
- if arch != '' {
- filter.arch = arch
- }
-
- raw := cmd.flags.get_bool('raw')!
-
- list(conf_, filter, raw)!
+ list(conf, filter)?
}
},
cli.Command{
@@ -105,27 +80,19 @@ pub fn cmd() cli.Command {
description: "Which branch to clone; only applies to kind 'git'."
flag: cli.FlagType.string
},
- cli.Flag{
- name: 'path'
- description: 'Subdirectory inside Git repository to use.'
- flag: cli.FlagType.string
- },
]
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
t := NewTarget{
- kind: cmd.flags.get_string('kind')!
+ kind: cmd.flags.get_string('kind')?
url: cmd.args[0]
repo: cmd.args[1]
branch: cmd.flags.get_string('branch') or { '' }
- path: cmd.flags.get_string('path') or { '' }
}
- raw := cmd.flags.get_bool('raw')!
-
- add(conf_, t, raw)!
+ add(conf, t)?
}
},
cli.Command{
@@ -133,11 +100,11 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Remove a target that matches the given id.'
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
- remove(conf_, cmd.args[0])!
+ remove(conf, cmd.args[0])?
}
},
cli.Command{
@@ -145,11 +112,11 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Show detailed information for the target matching the id.'
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
- info(conf_, cmd.args[0])!
+ info(conf, cmd.args[0])?
}
},
cli.Command{
@@ -188,15 +155,10 @@ pub fn cmd() cli.Command {
description: 'Kind of target.'
flag: cli.FlagType.string
},
- cli.Flag{
- name: 'path'
- description: 'Subdirectory inside Git repository to use.'
- flag: cli.FlagType.string
- },
]
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
found := cmd.flags.get_all_found()
@@ -204,11 +166,11 @@ pub fn cmd() cli.Command {
for f in found {
if f.name != 'config-file' {
- params[f.name] = f.get_string()!
+ params[f.name] = f.get_string()?
}
}
- patch(conf_, cmd.args[0], params)!
+ patch(conf, cmd.args[0], params)?
}
},
cli.Command{
@@ -216,104 +178,76 @@ pub fn cmd() cli.Command {
required_args: 1
usage: 'id'
description: 'Build the target with the given id & publish it.'
- flags: [
- cli.Flag{
- name: 'force'
- description: 'Build the target without checking whether it needs to be renewed.'
- flag: cli.FlagType.bool
- },
- cli.Flag{
- name: 'remote'
- description: 'Schedule the build on the server instead of running it locally.'
- flag: cli.FlagType.bool
- },
- cli.Flag{
- name: 'arch'
- description: 'Architecture to schedule build for. Required when using -remote.'
- flag: cli.FlagType.string
- },
- cli.Flag{
- name: 'timeout'
- description: 'After how many minutes to cancel the build. Only applies to local builds.'
- flag: cli.FlagType.int
- default_value: ['3600']
- },
- ]
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
- remote := cmd.flags.get_bool('remote')!
- force := cmd.flags.get_bool('force')!
- timeout := cmd.flags.get_int('timeout')!
- target_id := cmd.args[0].int()
-
- if remote {
- arch := cmd.flags.get_string('arch')!
-
- if arch == '' {
- return error('When scheduling the build remotely, you have to specify an architecture.')
- }
-
- c := client.new(conf_.address, conf_.api_key)
- c.queue_job(target_id, arch, force)!
- } else {
- build_target(conf_, target_id, force, timeout)!
- }
+ build(conf, cmd.args[0].int())?
}
},
]
}
}
+// get_repo_by_prefix tries to find the repo with the given prefix in its
+// ID. If multiple or none are found, an error is raised.
+
// list prints out a list of all repositories.
-fn list(conf_ Config, filter TargetFilter, raw bool) ! {
- c := client.new(conf_.address, conf_.api_key)
- targets := c.get_targets(filter)!
- data := targets.map([it.id.str(), it.kind, it.url, it.repo])
+fn list(conf Config, filter TargetFilter) ? {
+ c := client.new(conf.address, conf.api_key)
+ repos := c.get_targets(filter)?
+ data := repos.map([it.id.str(), it.kind, it.url, it.repo])
- if raw {
- println(console.tabbed_table(data))
- } else {
- println(console.pretty_table(['id', 'kind', 'url', 'repo'], data)!)
+ println(console.pretty_table(['id', 'kind', 'url', 'repo'], data)?)
+}
+
+// add adds a new repository to the server's list.
+fn add(conf Config, t &NewTarget) ? {
+ c := client.new(conf.address, conf.api_key)
+ res := c.add_target(t)?
+
+ println(res.message)
+}
+
+// remove removes a repository from the server's list.
+fn remove(conf Config, id string) ? {
+ id_int := id.int()
+
+ if id_int != 0 {
+ c := client.new(conf.address, conf.api_key)
+ res := c.remove_target(id_int)?
+ println(res.message)
}
}
-// add adds a new target to the server's list.
-fn add(conf_ Config, t &NewTarget, raw bool) ! {
- c := client.new(conf_.address, conf_.api_key)
- target_id := c.add_target(t)!
-
- if raw {
- println(target_id)
- } else {
- println('Target added with id ${target_id}')
- }
-}
-
-// remove removes a target from the server's list.
-fn remove(conf_ Config, id string) ! {
- c := client.new(conf_.address, conf_.api_key)
- c.remove_target(id.int())!
-}
-
-// patch patches a given target with the provided params.
-fn patch(conf_ Config, id string, params map[string]string) ! {
+// patch patches a given repository with the provided params.
+fn patch(conf Config, id string, params map[string]string) ? {
// We check the cron expression first because it's useless to send an
// invalid one to the server.
if 'schedule' in params && params['schedule'] != '' {
- cron.parse_expression(params['schedule']) or {
- return error('Invalid cron expression: ${err.msg()}')
+ parse_expression(params['schedule']) or {
+ return error('Invalid cron expression: $err.msg()')
}
}
- c := client.new(conf_.address, conf_.api_key)
- c.patch_target(id.int(), params)!
+ id_int := id.int()
+ if id_int != 0 {
+ c := client.new(conf.address, conf.api_key)
+ res := c.patch_target(id_int, params)?
+
+ println(res.message)
+ }
}
-// info shows detailed information for a given target.
-fn info(conf_ Config, id string) ! {
- c := client.new(conf_.address, conf_.api_key)
- target := c.get_target(id.int())!
- println(target)
+// info shows detailed information for a given repo.
+fn info(conf Config, id string) ? {
+ id_int := id.int()
+
+ if id_int == 0 {
+ return
+ }
+
+ c := client.new(conf.address, conf.api_key)
+ repo := c.get_target(id_int)?
+ println(repo)
}
diff --git a/src/cron/cli.v b/src/cron/cli.v
new file mode 100644
index 0000000..4d95833
--- /dev/null
+++ b/src/cron/cli.v
@@ -0,0 +1,32 @@
+module cron
+
+import cli
+import vieter_v.conf as vconf
+
+struct Config {
+pub:
+ log_level string = 'WARN'
+ api_key string
+ address string
+ data_dir string
+ base_image string = 'archlinux:base-devel'
+ max_concurrent_builds int = 1
+ api_update_frequency int = 15
+ image_rebuild_frequency int = 1440
+ // Replicates the behavior of the original cron system
+ global_schedule string = '0 3'
+}
+
+// cmd returns the cli module that handles the cron daemon.
+pub fn cmd() cli.Command {
+ return cli.Command{
+ name: 'cron'
+ description: 'Start the cron service that periodically runs builds.'
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
+
+ cron(conf)?
+ }
+ }
+}
diff --git a/src/cron/cron.v b/src/cron/cron.v
new file mode 100644
index 0000000..5f128cf
--- /dev/null
+++ b/src/cron/cron.v
@@ -0,0 +1,33 @@
+module cron
+
+import log
+import cron.daemon
+import cron.expression
+import os
+
+const log_file_name = 'vieter.cron.log'
+
+// cron starts a cron daemon & starts periodically scheduling builds.
+pub fn cron(conf Config) ? {
+ // Configure logger
+ log_level := log.level_from_tag(conf.log_level) or {
+ return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
+ }
+
+ mut logger := log.Log{
+ level: log_level
+ }
+
+ log_file := os.join_path_single(conf.data_dir, cron.log_file_name)
+ logger.set_full_logpath(log_file)
+ logger.log_to_console_too()
+
+ ce := expression.parse_expression(conf.global_schedule) or {
+ return error('Error while parsing global cron expression: $err.msg()')
+ }
+
+ mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
+ conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)?
+
+ d.run()
+}
diff --git a/src/cron/daemon/build.v b/src/cron/daemon/build.v
new file mode 100644
index 0000000..beed9fc
--- /dev/null
+++ b/src/cron/daemon/build.v
@@ -0,0 +1,115 @@
+module daemon
+
+import time
+import sync.stdatomic
+import build
+import os
+
+const (
+ build_empty = 0
+ build_running = 1
+ build_done = 2
+)
+
+// clean_finished_builds removes finished builds from the build slots & returns
+// them.
+fn (mut d Daemon) clean_finished_builds() []ScheduledBuild {
+ mut out := []ScheduledBuild{}
+
+ for i in 0 .. d.atomics.len {
+ if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
+ stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
+ out << d.builds[i]
+ }
+ }
+
+ return out
+}
+
+// update_builds starts as many builds as possible.
+fn (mut d Daemon) start_new_builds() {
+ now := time.now()
+
+ for d.queue.len() > 0 {
+ elem := d.queue.peek() or {
+ d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
+
+ break
+ }
+
+ if elem.timestamp < now {
+ sb := d.queue.pop() or {
+ d.lerror("queue.pop() unexpectedly returned an error. This shouldn't happen.")
+
+ break
+ }
+
+ // If this build couldn't be scheduled, no more will be possible.
+ if !d.start_build(sb) {
+ d.queue.insert(sb)
+ break
+ }
+ } else {
+ break
+ }
+ }
+}
+
+// start_build starts a build for the given ScheduledBuild object.
+fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
+ for i in 0 .. d.atomics.len {
+ if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty {
+ stdatomic.store_u64(&d.atomics[i], daemon.build_running)
+ d.builds[i] = sb
+
+ go d.run_build(i, sb)
+
+ return true
+ }
+ }
+
+ return false
+}
+
+// run_build actually starts the build process for a given target.
+fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
+ d.linfo('started build: $sb.target.url -> $sb.target.repo')
+
+ // 0 means success, 1 means failure
+ mut status := 0
+
+ res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(),
+ &sb.target) or {
+ d.ldebug('build_target error: $err.msg()')
+ status = 1
+
+ build.BuildResult{}
+ }
+
+ if status == 0 {
+ d.linfo('finished build: $sb.target.url -> $sb.target.repo; uploading logs...')
+
+ build_arch := os.uname().machine
+ d.client.add_build_log(sb.target.id, res.start_time, res.end_time, build_arch,
+ res.exit_code, res.logs) or {
+ d.lerror('Failed to upload logs for build: $sb.target.url -> $sb.target.repo')
+ }
+ } else {
+ d.linfo('an error occured during build: $sb.target.url -> $sb.target.repo')
+ }
+
+ stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
+}
+
+// current_build_count returns how many builds are currently running.
+fn (mut d Daemon) current_build_count() int {
+ mut res := 0
+
+ for i in 0 .. d.atomics.len {
+ if stdatomic.load_u64(&d.atomics[i]) == daemon.build_running {
+ res += 1
+ }
+ }
+
+ return res
+}
diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v
new file mode 100644
index 0000000..934d35a
--- /dev/null
+++ b/src/cron/daemon/daemon.v
@@ -0,0 +1,274 @@
+module daemon
+
+import time
+import log
+import datatypes { MinHeap }
+import cron.expression { CronExpression, parse_expression }
+import math
+import build
+import vieter_v.docker
+import os
+import client
+import models { Target }
+
+const (
+ // How many seconds to wait before retrying to update API if failed
+ api_update_retry_timeout = 5
+ // How many seconds to wait before retrying to rebuild image if failed
+ rebuild_base_image_retry_timout = 30
+)
+
+struct ScheduledBuild {
+pub:
+ target Target
+ timestamp time.Time
+}
+
+// Overloaded operator for comparing ScheduledBuild objects
+fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
+ return r1.timestamp < r2.timestamp
+}
+
+pub struct Daemon {
+mut:
+ client client.Client
+ base_image string
+ builder_images []string
+ global_schedule CronExpression
+ api_update_frequency int
+ image_rebuild_frequency int
+ // Targets currently loaded from API.
+ targets []Target
+ // At what point to update the list of targets.
+ api_update_timestamp time.Time
+ image_build_timestamp time.Time
+ queue MinHeap
+ // Which builds are currently running
+ builds []ScheduledBuild
+ // Atomic variables used to detect when a build has finished; length is the
+ // same as builds
+ atomics []u64
+ logger shared log.Log
+}
+
+// init_daemon initializes a new Daemon object. It renews the targets &
+// populates the build queue for the first time.
+pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon {
+ mut d := Daemon{
+ client: client.new(address, api_key)
+ base_image: base_image
+ global_schedule: global_schedule
+ api_update_frequency: api_update_frequency
+ image_rebuild_frequency: image_rebuild_frequency
+ atomics: []u64{len: max_concurrent_builds}
+ builds: []ScheduledBuild{len: max_concurrent_builds}
+ logger: logger
+ }
+
+ // Initialize the targets & queue
+ d.renew_targets()
+ d.renew_queue()
+ if !d.rebuild_base_image() {
+ return error('The base image failed to build. The Vieter cron daemon cannot run without an initial builder image.')
+ }
+
+ return d
+}
+
+// run starts the actual daemon process. It runs builds when possible &
+// periodically refreshes the list of targets to ensure we stay in sync.
+pub fn (mut d Daemon) run() {
+ for {
+ finished_builds := d.clean_finished_builds()
+
+ // Update the API's contents if needed & renew the queue
+ if time.now() >= d.api_update_timestamp {
+ d.renew_targets()
+ d.renew_queue()
+ }
+ // The finished builds should only be rescheduled if the API contents
+ // haven't been renewed.
+ else {
+ for sb in finished_builds {
+ d.schedule_build(sb.target)
+ }
+ }
+
+ // TODO remove old builder images.
+ // This issue is less trivial than it sounds, because a build could
+ // still be running when the image has to be rebuilt. That would
+ // prevent the image from being removed. Therefore, we will need to
+ // keep track of a list or something & remove an image once we have
+ // made sure it isn't being used anymore.
+ if time.now() >= d.image_build_timestamp {
+ d.rebuild_base_image()
+ // In theory, executing this function here allows an old builder
+ // image to exist for at most image_rebuild_frequency minutes.
+ d.clean_old_base_images()
+ }
+
+ // Schedules new builds when possible
+ d.start_new_builds()
+
+ // If there are builds currently running, the daemon should refresh
+ // every second to clean up any finished builds & start new ones.
+ mut delay := time.Duration(1 * time.second)
+
+ // Sleep either until we have to refresh the targets or when the next
+ // build has to start, with a minimum of 1 second.
+ if d.current_build_count() == 0 {
+ now := time.now()
+ delay = d.api_update_timestamp - now
+
+ if d.queue.len() > 0 {
+ elem := d.queue.peek() or {
+ d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.")
+
+ // This is just a fallback option. In theory, queue.peek()
+ // should *never* return an error or none, because we check
+ // its len beforehand.
+ time.sleep(1)
+ continue
+ }
+
+ time_until_next_job := elem.timestamp - now
+
+ delay = math.min(delay, time_until_next_job)
+ }
+ }
+
+ // We sleep for at least one second. This is to prevent the program
+ // from looping agressively when a cronjob can be scheduled, but
+ // there's no spots free for it to be started.
+ delay = math.max(delay, 1 * time.second)
+
+ d.ldebug('Sleeping for ${delay}...')
+
+ time.sleep(delay)
+ }
+}
+
+// schedule_build adds the next occurence of the given targets build to the
+// queue.
+fn (mut d Daemon) schedule_build(target Target) {
+ ce := if target.schedule != '' {
+ parse_expression(target.schedule) or {
+ // TODO This shouldn't return an error if the expression is empty.
+ d.lerror("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()")
+
+ d.global_schedule
+ }
+ } else {
+ d.global_schedule
+ }
+
+ // A target that can't be scheduled will just be skipped for now
+ timestamp := ce.next_from_now() or {
+ d.lerror("Couldn't calculate next timestamp from '$target.schedule'; skipping")
+ return
+ }
+
+ d.queue.insert(ScheduledBuild{
+ target: target
+ timestamp: timestamp
+ })
+}
+
+// renew_targets requests the newest list of targets from the server & replaces
+// the old one.
+fn (mut d Daemon) renew_targets() {
+ d.linfo('Renewing targets...')
+
+ mut new_targets := d.client.get_all_targets() or {
+ d.lerror('Failed to renew targets. Retrying in ${daemon.api_update_retry_timeout}s...')
+ d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout)
+
+ return
+ }
+
+ // Filter out any targets that shouldn't run on this architecture
+ cur_arch := os.uname().machine
+ new_targets = new_targets.filter(it.arch.any(it.value == cur_arch))
+
+ d.targets = new_targets
+
+ d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency)
+}
+
+// renew_queue replaces the old queue with a new one that reflects the newest
+// values in targets.
+fn (mut d Daemon) renew_queue() {
+ d.linfo('Renewing queue...')
+ mut new_queue := MinHeap{}
+
+ // Move any jobs that should have already started from the old queue onto
+ // the new one
+ now := time.now()
+
+ // For some reason, using
+ // ```v
+ // for d.queue.len() > 0 && d.queue.peek() ?.timestamp < now {
+ //```
+ // here causes the function to prematurely just exit, without any errors or anything, very weird
+ // https://github.com/vlang/v/issues/14042
+ for d.queue.len() > 0 {
+ elem := d.queue.pop() or {
+ d.lerror("queue.pop() returned an error. This shouldn't happen.")
+ continue
+ }
+
+ if elem.timestamp < now {
+ new_queue.insert(elem)
+ } else {
+ break
+ }
+ }
+
+ d.queue = new_queue
+
+ // For each target in targets, parse their cron expression (or use the
+ // default one if not present) & add them to the queue
+ for target in d.targets {
+ d.schedule_build(target)
+ }
+}
+
+// rebuild_base_image recreates the builder image.
+fn (mut d Daemon) rebuild_base_image() bool {
+ d.linfo('Rebuilding builder image....')
+
+ d.builder_images << build.create_build_image(d.base_image) or {
+ d.lerror('Failed to rebuild base image. Retrying in ${daemon.rebuild_base_image_retry_timout}s...')
+ d.image_build_timestamp = time.now().add_seconds(daemon.rebuild_base_image_retry_timout)
+
+ return false
+ }
+
+ d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency)
+
+ return true
+}
+
+// clean_old_base_images tries to remove any old but still present builder
+// images.
+fn (mut d Daemon) clean_old_base_images() {
+ mut i := 0
+
+ mut dd := docker.new_conn() or {
+ d.lerror('Failed to connect to Docker socket.')
+ return
+ }
+
+ defer {
+ dd.close() or {}
+ }
+
+ for i < d.builder_images.len - 1 {
+ // For each builder image, we try to remove it by calling the Docker
+ // API. If the function returns an error or false, that means the image
+ // wasn't deleted. Therefore, we move the index over. If the function
+ // returns true, the array's length has decreased by one so we don't
+ // move the index.
+ dd.remove_image(d.builder_images[i]) or { i += 1 }
+ }
+}
diff --git a/src/cron/daemon/log.v b/src/cron/daemon/log.v
new file mode 100644
index 0000000..003898b
--- /dev/null
+++ b/src/cron/daemon/log.v
@@ -0,0 +1,35 @@
+module daemon
+
+import log
+
+// log reate a log message with the given level
+pub fn (mut d Daemon) log(msg &string, level log.Level) {
+ lock d.logger {
+ d.logger.send_output(msg, level)
+ }
+}
+
+// lfatal create a log message with the fatal level
+pub fn (mut d Daemon) lfatal(msg &string) {
+ d.log(msg, log.Level.fatal)
+}
+
+// lerror create a log message with the error level
+pub fn (mut d Daemon) lerror(msg &string) {
+ d.log(msg, log.Level.error)
+}
+
+// lwarn create a log message with the warn level
+pub fn (mut d Daemon) lwarn(msg &string) {
+ d.log(msg, log.Level.warn)
+}
+
+// linfo create a log message with the info level
+pub fn (mut d Daemon) linfo(msg &string) {
+ d.log(msg, log.Level.info)
+}
+
+// ldebug create a log message with the debug level
+pub fn (mut d Daemon) ldebug(msg &string) {
+ d.log(msg, log.Level.debug)
+}
diff --git a/src/cron/expression.c.v b/src/cron/expression.c.v
deleted file mode 100644
index e9686d6..0000000
--- a/src/cron/expression.c.v
+++ /dev/null
@@ -1,101 +0,0 @@
-module cron
-
-#flag -I @VMODROOT/libvieter/include
-#flag -L @VMODROOT/libvieter/build
-#flag -lvieter
-#include "vieter_cron.h"
-
-[typedef]
-pub struct C.vieter_cron_expression {
- minutes &u8
- hours &u8
- days &u8
- months &u8
- minute_count u8
- hour_count u8
- day_count u8
- month_count u8
-}
-
-pub type Expression = C.vieter_cron_expression
-
-// == returns whether the two expressions are equal by value.
-fn (ce1 Expression) == (ce2 Expression) bool {
- if ce1.month_count != ce2.month_count || ce1.day_count != ce2.day_count
- || ce1.hour_count != ce2.hour_count || ce1.minute_count != ce2.minute_count {
- return false
- }
-
- for i in 0 .. ce1.month_count {
- unsafe {
- if ce1.months[i] != ce2.months[i] {
- return false
- }
- }
- }
- for i in 0 .. ce1.day_count {
- unsafe {
- if ce1.days[i] != ce2.days[i] {
- return false
- }
- }
- }
- for i in 0 .. ce1.hour_count {
- unsafe {
- if ce1.hours[i] != ce2.hours[i] {
- return false
- }
- }
- }
- for i in 0 .. ce1.minute_count {
- unsafe {
- if ce1.minutes[i] != ce2.minutes[i] {
- return false
- }
- }
- }
-
- return true
-}
-
-[typedef]
-struct C.vieter_cron_simple_time {
- year int
- month int
- day int
- hour int
- minute int
-}
-
-type SimpleTime = C.vieter_cron_simple_time
-
-enum ParseError as u8 {
- ok = 0
- invalid_expression = 1
- invalid_number = 2
- out_of_range = 3
- too_many_parts = 4
- not_enough_parts = 5
-}
-
-// str returns the string representation of a ParseError.
-fn (e ParseError) str() string {
- return match e {
- .ok { '' }
- .invalid_expression { 'Invalid expression' }
- .invalid_number { 'Invalid number' }
- .out_of_range { 'Out of range' }
- .too_many_parts { 'Too many parts' }
- .not_enough_parts { 'Not enough parts' }
- }
-}
-
-fn C.vieter_cron_expr_init() &C.vieter_cron_expression
-
-fn C.vieter_cron_expr_free(ce &C.vieter_cron_expression)
-
-fn C.vieter_cron_expr_next(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression, ref &C.vieter_cron_simple_time)
-
-fn C.vieter_cron_expr_next_from_now(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression)
-
-fn C.vieter_cron_expr_parse(out &C.vieter_cron_expression, s &char) ParseError
diff --git a/src/cron/expression.v b/src/cron/expression.v
deleted file mode 100644
index 62692fa..0000000
--- a/src/cron/expression.v
+++ /dev/null
@@ -1,73 +0,0 @@
-module cron
-
-import time
-
-// free the memory associated with the Expression.
-[unsafe]
-pub fn (ce &Expression) free() {
- C.vieter_cron_expr_free(ce)
-}
-
-// parse_expression parses a string into an Expression.
-pub fn parse_expression(exp string) !&Expression {
- out := C.vieter_cron_expr_init()
- res := C.vieter_cron_expr_parse(out, exp.str)
-
- if res != .ok {
- return error(res.str())
- }
-
- return out
-}
-
-// next calculates the next occurence of the cron schedule, given a reference
-// point.
-pub fn (ce &Expression) next(ref time.Time) time.Time {
- st := SimpleTime{
- year: ref.year
- month: ref.month
- day: ref.day
- hour: ref.hour
- minute: ref.minute
- }
-
- out := SimpleTime{}
- C.vieter_cron_expr_next(&out, ce, &st)
-
- return time.new_time(time.Time{
- year: out.year
- month: out.month
- day: out.day
- hour: out.hour
- minute: out.minute
- })
-}
-
-// next_from_now calculates the next occurence of the cron schedule with the
-// current time as reference.
-pub fn (ce &Expression) next_from_now() time.Time {
- out := SimpleTime{}
- C.vieter_cron_expr_next_from_now(&out, ce)
-
- return time.new_time(time.Time{
- year: out.year
- month: out.month
- day: out.day
- hour: out.hour
- minute: out.minute
- })
-}
-
-// next_n returns the n next occurences of the expression, given a starting
-// time.
-pub fn (ce &Expression) next_n(ref time.Time, n int) []time.Time {
- mut times := []time.Time{cap: n}
-
- times << ce.next(ref)
-
- for i in 1 .. n {
- times << ce.next(times[i - 1])
- }
-
- return times
-}
diff --git a/src/cron/expression/expression.v b/src/cron/expression/expression.v
new file mode 100644
index 0000000..17d2dde
--- /dev/null
+++ b/src/cron/expression/expression.v
@@ -0,0 +1,275 @@
+module expression
+
+import time
+
+pub struct CronExpression {
+ minutes []int
+ hours []int
+ days []int
+ months []int
+}
+
+// next calculates the earliest time this cron expression is valid. It will
+// always pick a moment in the future, even if ref matches completely up to the
+// minute. This function conciously does not take gap years into account.
+pub fn (ce &CronExpression) next(ref time.Time) ?time.Time {
+ // If the given ref matches the next cron occurence up to the minute, it
+ // will return that value. Because we always want to return a value in the
+ // future, we artifically shift the ref 60 seconds to make sure we always
+ // match in the future. A shift of 60 seconds is enough because the cron
+ // expression does not allow for accuracy smaller than one minute.
+ sref := ref
+
+ // For all of these values, the rule is the following: if their value is
+ // the length of their respective array in the CronExpression object, that
+ // means we've looped back around. This means that the "bigger" value has
+ // to be incremented by one. For example, if the minutes have looped
+ // around, that means that the hour has to be incremented as well.
+ mut minute_index := 0
+ mut hour_index := 0
+ mut day_index := 0
+ mut month_index := 0
+
+ // This chain is the same logic multiple times, namely that if a "bigger"
+ // value loops around, then the smaller value will always reset as well.
+ // For example, if we're going to a new day, the hour & minute will always
+ // be their smallest value again.
+ for month_index < ce.months.len && sref.month > ce.months[month_index] {
+ month_index++
+ }
+
+ if month_index < ce.months.len && sref.month == ce.months[month_index] {
+ for day_index < ce.days.len && sref.day > ce.days[day_index] {
+ day_index++
+ }
+
+ if day_index < ce.days.len && ce.days[day_index] == sref.day {
+ for hour_index < ce.hours.len && sref.hour > ce.hours[hour_index] {
+ hour_index++
+ }
+
+ if hour_index < ce.hours.len && ce.hours[hour_index] == sref.hour {
+ // Minute is the only value where we explicitely make sure we
+ // can't match sref's value exactly. This is to ensure we only
+ // return values in the future.
+ for minute_index < ce.minutes.len && sref.minute >= ce.minutes[minute_index] {
+ minute_index++
+ }
+ }
+ }
+ }
+
+ // Here, we increment the "bigger" values by one if the smaller ones loop
+ // around. The order is important, as it allows a sort-of waterfall effect
+ // to occur which updates all values if required.
+ if minute_index == ce.minutes.len && hour_index < ce.hours.len {
+ hour_index += 1
+ }
+
+ if hour_index == ce.hours.len && day_index < ce.days.len {
+ day_index += 1
+ }
+
+ if day_index == ce.days.len && month_index < ce.months.len {
+ month_index += 1
+ }
+
+ mut minute := ce.minutes[minute_index % ce.minutes.len]
+ mut hour := ce.hours[hour_index % ce.hours.len]
+ mut day := ce.days[day_index % ce.days.len]
+
+ // Sometimes, we end up with a day that does not exist within the selected
+ // month, e.g. day 30 in February. When this occurs, we reset day back to
+ // the smallest value & loop over to the next month that does have this
+ // day.
+ if day > time.month_days[ce.months[month_index % ce.months.len] - 1] {
+ day = ce.days[0]
+ month_index += 1
+
+ for day > time.month_days[ce.months[month_index & ce.months.len] - 1] {
+ month_index += 1
+
+ // If for whatever reason the day value ends up being something
+ // that can't be scheduled in any month, we have to make sure we
+ // don't create an infinite loop.
+ if month_index == 2 * ce.months.len {
+ return error('No schedulable moment.')
+ }
+ }
+ }
+
+ month := ce.months[month_index % ce.months.len]
+ mut year := sref.year
+
+ // If the month loops over, we need to increment the year.
+ if month_index >= ce.months.len {
+ year++
+ }
+
+ return time.new_time(time.Time{
+ year: year
+ month: month
+ day: day
+ minute: minute
+ hour: hour
+ })
+}
+
+// next_from_now returns the result of ce.next(ref) where ref is the result of
+// time.now().
+pub fn (ce &CronExpression) next_from_now() ?time.Time {
+ return ce.next(time.now())
+}
+
+// next_n returns the n next occurences of the expression, given a starting
+// time.
+pub fn (ce &CronExpression) next_n(ref time.Time, n int) ?[]time.Time {
+ mut times := []time.Time{cap: n}
+
+ times << ce.next(ref)?
+
+ for i in 1 .. n {
+ times << ce.next(times[i - 1])?
+ }
+
+ return times
+}
+
+// parse_range parses a given string into a range of sorted integers, if
+// possible.
+fn parse_range(s string, min int, max int, mut bitv []bool) ? {
+ mut start := min
+ mut end := max
+ mut interval := 1
+
+ exps := s.split('/')
+
+ if exps.len > 2 {
+ return error('Invalid expression.')
+ }
+
+ if exps[0] != '*' {
+ dash_parts := exps[0].split('-')
+
+ if dash_parts.len > 2 {
+ return error('Invalid expression.')
+ }
+
+ start = dash_parts[0].int()
+
+ // The builtin parsing functions return zero if the string can't be
+ // parsed into a number, so we have to explicitely check whether they
+ // actually entered zero or if it's an invalid number.
+ if start == 0 && dash_parts[0] != '0' {
+ return error('Invalid number.')
+ }
+
+ // Check whether the start value is out of range
+ if start < min || start > max {
+ return error('Out of range.')
+ }
+
+ if dash_parts.len == 2 {
+ end = dash_parts[1].int()
+
+ if end == 0 && dash_parts[1] != '0' {
+ return error('Invalid number.')
+ }
+
+ if end < start || end > max {
+ return error('Out of range.')
+ }
+ }
+ }
+
+ if exps.len > 1 {
+ interval = exps[1].int()
+
+ // interval being zero is always invalid, but we want to check why
+ // it's invalid for better error messages.
+ if interval == 0 {
+ if exps[1] != '0' {
+ return error('Invalid number.')
+ } else {
+ return error('Step size zero not allowed.')
+ }
+ }
+
+ if interval > max - min {
+ return error('Step size too large.')
+ }
+ }
+ // Here, s solely consists of a number, so that's the only value we
+ // should return.
+ else if exps[0] != '*' && !exps[0].contains('-') {
+ bitv[start - min] = true
+ return
+ }
+
+ for start <= end {
+ bitv[start - min] = true
+ start += interval
+ }
+}
+
+// bitv_to_ints converts a bit vector into an array containing the
+// corresponding values.
+fn bitv_to_ints(bitv []bool, min int) []int {
+ mut out := []int{}
+
+ for i in 0 .. bitv.len {
+ if bitv[i] {
+ out << min + i
+ }
+ }
+
+ return out
+}
+
+// parse_part parses a given part of a cron expression & returns the
+// corresponding array of ints.
+fn parse_part(s string, min int, max int) ?[]int {
+ mut bitv := []bool{len: max - min + 1, init: false}
+
+ for range in s.split(',') {
+ parse_range(range, min, max, mut bitv)?
+ }
+
+ return bitv_to_ints(bitv, min)
+}
+
+// parse_expression parses an entire cron expression string into a
+// CronExpression object, if possible.
+pub fn parse_expression(exp string) ?CronExpression {
+ // The filter allows for multiple spaces between parts
+ mut parts := exp.split(' ').filter(it != '')
+
+ if parts.len < 2 || parts.len > 4 {
+ return error('Expression must contain between 2 and 4 space-separated parts.')
+ }
+
+ // For ease of use, we allow the user to only specify as many parts as they
+ // need.
+ for parts.len < 4 {
+ parts << '*'
+ }
+
+ mut part_results := [][]int{}
+
+ mins := [0, 0, 1, 1]
+ maxs := [59, 23, 31, 12]
+
+ // This for loop allows us to more clearly propagate the error to the user.
+ for i, min in mins {
+ part_results << parse_part(parts[i], min, maxs[i]) or {
+ return error('An error occurred with part $i: $err.msg()')
+ }
+ }
+
+ return CronExpression{
+ minutes: part_results[0]
+ hours: part_results[1]
+ days: part_results[2]
+ months: part_results[3]
+ }
+}
diff --git a/src/cron/expression/expression_parse_test.v b/src/cron/expression/expression_parse_test.v
new file mode 100644
index 0000000..4eebc49
--- /dev/null
+++ b/src/cron/expression/expression_parse_test.v
@@ -0,0 +1,98 @@
+module expression
+
+// parse_range_error returns the returned error message. If the result is '',
+// that means the function didn't error.
+fn parse_range_error(s string, min int, max int) string {
+ mut bitv := []bool{len: max - min + 1, init: false}
+
+ parse_range(s, min, max, mut bitv) or { return err.msg }
+
+ return ''
+}
+
+// =====parse_range=====
+fn test_range_star_range() ? {
+ mut bitv := []bool{len: 6, init: false}
+ parse_range('*', 0, 5, mut bitv)?
+
+ assert bitv == [true, true, true, true, true, true]
+}
+
+fn test_range_number() ? {
+ mut bitv := []bool{len: 6, init: false}
+ parse_range('4', 0, 5, mut bitv)?
+
+ assert bitv_to_ints(bitv, 0) == [4]
+}
+
+fn test_range_number_too_large() ? {
+ assert parse_range_error('10', 0, 6) == 'Out of range.'
+}
+
+fn test_range_number_too_small() ? {
+ assert parse_range_error('0', 2, 6) == 'Out of range.'
+}
+
+fn test_range_number_invalid() ? {
+ assert parse_range_error('x', 0, 6) == 'Invalid number.'
+}
+
+fn test_range_step_star_1() ? {
+ mut bitv := []bool{len: 21, init: false}
+ parse_range('*/4', 0, 20, mut bitv)?
+
+ assert bitv_to_ints(bitv, 0) == [0, 4, 8, 12, 16, 20]
+}
+
+fn test_range_step_star_2() ? {
+ mut bitv := []bool{len: 8, init: false}
+ parse_range('*/3', 1, 8, mut bitv)?
+
+ assert bitv_to_ints(bitv, 1) == [1, 4, 7]
+}
+
+fn test_range_step_star_too_large() ? {
+ assert parse_range_error('*/21', 0, 20) == 'Step size too large.'
+}
+
+fn test_range_step_zero() ? {
+ assert parse_range_error('*/0', 0, 20) == 'Step size zero not allowed.'
+}
+
+fn test_range_step_number() ? {
+ mut bitv := []bool{len: 21, init: false}
+ parse_range('5/4', 2, 22, mut bitv)?
+
+ assert bitv_to_ints(bitv, 2) == [5, 9, 13, 17, 21]
+}
+
+fn test_range_step_number_too_large() ? {
+ assert parse_range_error('10/4', 0, 5) == 'Out of range.'
+}
+
+fn test_range_step_number_too_small() ? {
+ assert parse_range_error('2/4', 5, 10) == 'Out of range.'
+}
+
+fn test_range_dash() ? {
+ mut bitv := []bool{len: 10, init: false}
+ parse_range('4-8', 0, 9, mut bitv)?
+
+ assert bitv_to_ints(bitv, 0) == [4, 5, 6, 7, 8]
+}
+
+fn test_range_dash_step() ? {
+ mut bitv := []bool{len: 10, init: false}
+ parse_range('4-8/2', 0, 9, mut bitv)?
+
+ assert bitv_to_ints(bitv, 0) == [4, 6, 8]
+}
+
+// =====parse_part=====
+fn test_part_single() ? {
+ assert parse_part('*', 0, 5)? == [0, 1, 2, 3, 4, 5]
+}
+
+fn test_part_multiple() ? {
+ assert parse_part('*/2,2/3', 1, 8)? == [1, 2, 3, 5, 7, 8]
+}
diff --git a/src/cron/expression/expression_test.v b/src/cron/expression/expression_test.v
new file mode 100644
index 0000000..9e25e92
--- /dev/null
+++ b/src/cron/expression/expression_test.v
@@ -0,0 +1,34 @@
+module expression
+
+import time { parse }
+
+fn util_test_time(exp string, t1_str string, t2_str string) ? {
+ ce := parse_expression(exp)?
+ t1 := parse(t1_str)?
+ t2 := parse(t2_str)?
+
+ t3 := ce.next(t1)?
+
+ assert t2.year == t3.year
+ assert t2.month == t3.month
+ assert t2.day == t3.day
+ assert t2.hour == t3.hour
+ assert t2.minute == t3.minute
+}
+
+fn test_next_simple() ? {
+ // Very simple
+ util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')?
+
+ // Overlap to next day
+ util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')?
+ util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')?
+
+ util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')?
+
+ // Overlap to next month
+ util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')?
+
+ // Overlap to next year
+ util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')?
+}
diff --git a/src/cron/expression_test.v b/src/cron/expression_test.v
deleted file mode 100644
index c7065f8..0000000
--- a/src/cron/expression_test.v
+++ /dev/null
@@ -1,35 +0,0 @@
-module cron
-
-import time { parse }
-
-fn util_test_time(exp string, t1_str string, t2_str string) ! {
- ce := parse_expression(exp)!
- t1 := parse(t1_str)!
- t2 := parse(t2_str)!
-
- t3 := ce.next(t1)
-
- assert t2.year == t3.year
- assert t2.month == t3.month
- assert t2.day == t3.day
- assert t2.hour == t3.hour
- assert t2.minute == t3.minute
-}
-
-fn test_next_simple() ! {
- // Very simple
- // util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')!
-
- // Overlap to next day
- mut exp := '0 3 '
- util_test_time(exp, '2002-01-01 03:00:00', '2002-01-02 03:00:00')!
- util_test_time(exp, '2002-01-01 04:00:00', '2002-01-02 03:00:00')!
-
- util_test_time('0 3-7/4,7-19', '2002-01-01 04:00:00', '2002-01-01 07:00:00')!
-
- //// Overlap to next month
- util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')!
-
- //// Overlap to next year
- util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')!
-}
diff --git a/src/cron/parse_test.v b/src/cron/parse_test.v
deleted file mode 100644
index 19575d7..0000000
--- a/src/cron/parse_test.v
+++ /dev/null
@@ -1,42 +0,0 @@
-module cron
-
-fn test_not_allowed() {
- illegal_expressions := [
- '4 *-7',
- '4 *-7/4',
- '4 7/*',
- '0 0 30 2',
- '0 /5',
- '0 ',
- '0',
- ' 0',
- ' 0 ',
- '1 2 3 4~9',
- '1 1-3-5',
- '0 5/2-5',
- '',
- '1 1/2/3',
- '*5 8',
- 'x 8',
- ]
-
- mut res := false
-
- for exp in illegal_expressions {
- res = false
- parse_expression(exp) or { res = true }
- assert res, "'${exp}' should produce an error"
- }
-}
-
-fn test_auto_extend() ! {
- ce1 := parse_expression('5 5')!
- ce2 := parse_expression('5 5 *')!
- ce3 := parse_expression('5 5 * *')!
-
- assert ce1 == ce2 && ce2 == ce3
-}
-
-fn test_four() {
- parse_expression('0 1 2 3 ') or { assert false }
-}
diff --git a/src/dbms/dbms.v b/src/db/db.v
similarity index 72%
rename from src/dbms/dbms.v
rename to src/db/db.v
index e5676ab..9459c05 100644
--- a/src/dbms/dbms.v
+++ b/src/db/db.v
@@ -1,6 +1,6 @@
-module dbms
+module db
-import db.sqlite
+import sqlite
import time
pub struct VieterDb {
@@ -17,21 +17,17 @@ const (
$embed_file('migrations/001-initial/up.sql'),
$embed_file('migrations/002-rename-to-targets/up.sql'),
$embed_file('migrations/003-target-url-type/up.sql'),
- $embed_file('migrations/004-nullable-branch/up.sql'),
- $embed_file('migrations/005-repo-path/up.sql'),
]
migrations_down = [
$embed_file('migrations/001-initial/down.sql'),
$embed_file('migrations/002-rename-to-targets/down.sql'),
$embed_file('migrations/003-target-url-type/down.sql'),
- $embed_file('migrations/004-nullable-branch/down.sql'),
- $embed_file('migrations/005-repo-path/down.sql'),
]
)
// init initializes a database & adds the correct tables.
-pub fn init(db_path string) !VieterDb {
- conn := sqlite.connect(db_path)!
+pub fn init(db_path string) ?VieterDb {
+ conn := sqlite.connect(db_path)?
sql conn {
create table MigrationVersion
@@ -49,13 +45,13 @@ pub fn init(db_path string) !VieterDb {
}
// Apply each migration in order
- for i in cur_version.version .. dbms.migrations_up.len {
- migration := dbms.migrations_up[i].to_string()
+ for i in cur_version.version .. db.migrations_up.len {
+ migration := db.migrations_up[i].to_string()
version_num := i + 1
// vfmt does not like these dots
- println('Applying migration ${version_num}' + '...')
+ println('Applying migration $version_num' + '...')
// The sqlite library seems to not like it when multiple statements are
// passed in a single exec. Therefore, we split them & run them all
@@ -64,7 +60,7 @@ pub fn init(db_path string) !VieterDb {
res := conn.exec_none(part)
if res != sqlite.sqlite_done {
- return error('An error occurred while applying migration ${version_num}: SQLite error code ${res}')
+ return error('An error occurred while applying migration $version_num')
}
}
@@ -80,9 +76,9 @@ pub fn init(db_path string) !VieterDb {
}
}
-// row_into[T] converts an sqlite.Row into a given type T by parsing each field
+// row_into converts an sqlite.Row into a given type T by parsing each field
// from a string according to its type.
-pub fn row_into[T](row sqlite.Row) T {
+pub fn row_into(row sqlite.Row) T {
mut i := 0
mut out := T{}
diff --git a/src/dbms/logs.v b/src/db/logs.v
similarity index 64%
rename from src/dbms/logs.v
rename to src/db/logs.v
index b0786b8..af5f53c 100644
--- a/src/dbms/logs.v
+++ b/src/db/logs.v
@@ -1,4 +1,4 @@
-module dbms
+module db
import models { BuildLog, BuildLogFilter }
import time
@@ -8,20 +8,20 @@ pub fn (db &VieterDb) get_build_logs(filter BuildLogFilter) []BuildLog {
mut where_parts := []string{}
if filter.target != 0 {
- where_parts << 'target_id == ${filter.target}'
+ where_parts << 'target_id == $filter.target'
}
if filter.before != time.Time{} {
- where_parts << 'start_time < ${filter.before.unix_time()}'
+ where_parts << 'start_time < $filter.before.unix_time()'
}
if filter.after != time.Time{} {
- where_parts << 'start_time > ${filter.after.unix_time()}'
+ where_parts << 'start_time > $filter.after.unix_time()'
}
// NOTE: possible SQL injection
if filter.arch != '' {
- where_parts << "arch == '${filter.arch}'"
+ where_parts << "arch == '$filter.arch'"
}
mut parts := []string{}
@@ -30,27 +30,27 @@ pub fn (db &VieterDb) get_build_logs(filter BuildLogFilter) []BuildLog {
if exp[0] == `!` {
code := exp[1..].int()
- parts << 'exit_code != ${code}'
+ parts << 'exit_code != $code'
} else {
code := exp.int()
- parts << 'exit_code == ${code}'
+ parts << 'exit_code == $code'
}
}
if parts.len > 0 {
- where_parts << parts.map('(${it})').join(' or ')
+ where_parts << parts.map('($it)').join(' or ')
}
mut where_str := ''
if where_parts.len > 0 {
- where_str = 'where ' + where_parts.map('(${it})').join(' and ')
+ where_str = 'where ' + where_parts.map('($it)').join(' and ')
}
- query := 'select * from BuildLog ${where_str} limit ${filter.limit} offset ${filter.offset}'
+ query := 'select * from BuildLog $where_str limit $filter.limit offset $filter.offset'
rows, _ := db.conn.exec(query)
- res := rows.map(row_into[BuildLog](it))
+ res := rows.map(row_into(it))
return res
}
@@ -79,16 +79,10 @@ pub fn (db &VieterDb) get_build_log(id int) ?BuildLog {
}
// add_build_log inserts the given BuildLog into the database.
-pub fn (db &VieterDb) add_build_log(log BuildLog) int {
+pub fn (db &VieterDb) add_build_log(log BuildLog) {
sql db.conn {
insert log into BuildLog
}
-
- // Here, this does work because a log doesn't contain any foreign keys,
- // meaning the ORM only has to do a single add
- inserted_id := db.conn.last_id() as int
-
- return inserted_id
}
// delete_build_log delete the BuildLog with the given ID from the database.
diff --git a/src/dbms/migrations/001-initial/down.sql b/src/db/migrations/001-initial/down.sql
similarity index 100%
rename from src/dbms/migrations/001-initial/down.sql
rename to src/db/migrations/001-initial/down.sql
diff --git a/src/dbms/migrations/001-initial/up.sql b/src/db/migrations/001-initial/up.sql
similarity index 100%
rename from src/dbms/migrations/001-initial/up.sql
rename to src/db/migrations/001-initial/up.sql
diff --git a/src/dbms/migrations/002-rename-to-targets/down.sql b/src/db/migrations/002-rename-to-targets/down.sql
similarity index 100%
rename from src/dbms/migrations/002-rename-to-targets/down.sql
rename to src/db/migrations/002-rename-to-targets/down.sql
diff --git a/src/dbms/migrations/002-rename-to-targets/up.sql b/src/db/migrations/002-rename-to-targets/up.sql
similarity index 100%
rename from src/dbms/migrations/002-rename-to-targets/up.sql
rename to src/db/migrations/002-rename-to-targets/up.sql
diff --git a/src/dbms/migrations/003-target-url-type/down.sql b/src/db/migrations/003-target-url-type/down.sql
similarity index 100%
rename from src/dbms/migrations/003-target-url-type/down.sql
rename to src/db/migrations/003-target-url-type/down.sql
diff --git a/src/dbms/migrations/003-target-url-type/up.sql b/src/db/migrations/003-target-url-type/up.sql
similarity index 100%
rename from src/dbms/migrations/003-target-url-type/up.sql
rename to src/db/migrations/003-target-url-type/up.sql
diff --git a/src/dbms/targets.v b/src/db/targets.v
similarity index 71%
rename from src/dbms/targets.v
rename to src/db/targets.v
index a55220f..9102033 100644
--- a/src/dbms/targets.v
+++ b/src/db/targets.v
@@ -1,6 +1,25 @@
-module dbms
+module db
-import models { Target, TargetArch }
+import models { Target, TargetArch, TargetFilter }
+
+// get_targets returns all targets in the database.
+pub fn (db &VieterDb) get_targets(filter TargetFilter) []Target {
+ // This seems to currently be blocked by a bug in the ORM, I'll have to ask
+ // around.
+ if filter.repo != '' {
+ res := sql db.conn {
+ select from Target where repo == filter.repo order by id limit filter.limit offset filter.offset
+ }
+
+ return res
+ }
+
+ res := sql db.conn {
+ select from Target order by id limit filter.limit offset filter.offset
+ }
+
+ return res
+}
// get_target tries to return a specific target.
pub fn (db &VieterDb) get_target(target_id int) ?Target {
@@ -19,17 +38,10 @@ pub fn (db &VieterDb) get_target(target_id int) ?Target {
}
// add_target inserts the given target into the database.
-pub fn (db &VieterDb) add_target(target Target) int {
+pub fn (db &VieterDb) add_target(repo Target) {
sql db.conn {
- insert target into Target
+ insert repo into Target
}
-
- // ID of inserted target is the largest id
- inserted_target := sql db.conn {
- select from Target order by id desc limit 1
- }
-
- return inserted_target.id
}
// delete_target deletes the target with the given id from the database.
@@ -49,13 +61,13 @@ pub fn (db &VieterDb) update_target(target_id int, params map[string]string) {
if field.name in params {
// Any fields that are array types require their own update method
$if field.typ is string {
- values << "${field.name} = '${params[field.name]}'"
+ values << "$field.name = '${params[field.name]}'"
}
}
}
values_str := values.join(', ')
// I think this is actual SQL & not the ORM language
- query := 'update Target set ${values_str} where id == ${target_id}'
+ query := 'update Target set $values_str where id == $target_id'
db.conn.exec_none(query)
}
diff --git a/src/dbms/migrations/004-nullable-branch/down.sql b/src/dbms/migrations/004-nullable-branch/down.sql
deleted file mode 100644
index 2515593..0000000
--- a/src/dbms/migrations/004-nullable-branch/down.sql
+++ /dev/null
@@ -1,26 +0,0 @@
--- This down won't really work because it'll throw NOT NULL errors, but I'm
--- just putting it here for future reference (still not sure whether I'm even
- -- gonna use these)
-PRAGMA foreign_keys=off;
-
-BEGIN TRANSACTION;
-
-ALTER TABLE Target RENAME TO _Target_old;
-
-CREATE TABLE Target (
- id INTEGER PRIMARY KEY,
- url TEXT NOT NULL,
- branch TEXT NOT NULL,
- repo TEXT NOT NULL,
- schedule TEXT,
- kind TEXT NOT NULL DEFAULT 'git'
-);
-
-INSERT INTO Target (id, url, branch, repo, schedule, kind)
- SELECT id, url, branch, repo, schedule, kind FROM _Target_old;
-
-DROP TABLE _Target_old;
-
-COMMIT;
-
-PRAGMA foreign_keys=on;
diff --git a/src/dbms/migrations/004-nullable-branch/up.sql b/src/dbms/migrations/004-nullable-branch/up.sql
deleted file mode 100644
index 6333c37..0000000
--- a/src/dbms/migrations/004-nullable-branch/up.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-PRAGMA foreign_keys=off;
-
-BEGIN TRANSACTION;
-
-ALTER TABLE Target RENAME TO _Target_old;
-
-CREATE TABLE Target (
- id INTEGER PRIMARY KEY,
- url TEXT NOT NULL,
- branch TEXT,
- repo TEXT NOT NULL,
- schedule TEXT,
- kind TEXT NOT NULL DEFAULT 'git'
-);
-
-INSERT INTO Target (id, url, branch, repo, schedule, kind)
- SELECT id, url, branch, repo, schedule, kind FROM _Target_old;
-
-DROP TABLE _Target_old;
-
-COMMIT;
-
-PRAGMA foreign_keys=on;
diff --git a/src/dbms/migrations/005-repo-path/down.sql b/src/dbms/migrations/005-repo-path/down.sql
deleted file mode 100644
index 8a6f021..0000000
--- a/src/dbms/migrations/005-repo-path/down.sql
+++ /dev/null
@@ -1 +0,0 @@
-ALTER TABLE Target DROP COLUMN path;
diff --git a/src/dbms/migrations/005-repo-path/up.sql b/src/dbms/migrations/005-repo-path/up.sql
deleted file mode 100644
index f7e5c29..0000000
--- a/src/dbms/migrations/005-repo-path/up.sql
+++ /dev/null
@@ -1 +0,0 @@
-ALTER TABLE Target ADD COLUMN path TEXT;
diff --git a/src/dbms/targets_iter.v b/src/dbms/targets_iter.v
deleted file mode 100644
index ca149b9..0000000
--- a/src/dbms/targets_iter.v
+++ /dev/null
@@ -1,129 +0,0 @@
-module dbms
-
-import models { Target, TargetFilter }
-import db.sqlite
-
-// Iterator providing a filtered view into the list of targets currently stored
-// in the database. It replaces functionality usually performed in the database
-// using SQL queries that can't currently be used due to missing stuff in V's
-// ORM.
-pub struct TargetsIterator {
- conn sqlite.DB
- filter TargetFilter
- window_size int = 32
-mut:
- window []Target
- window_index u64
- // Offset in entire list of unfiltered targets
- offset int
- // Offset in filtered list of targets
- filtered_offset u64
- started bool
- done bool
-}
-
-// targets returns an iterator allowing filtered access to the list of targets.
-pub fn (db &VieterDb) targets(filter TargetFilter) TargetsIterator {
- window_size := 32
-
- return TargetsIterator{
- conn: db.conn
- filter: filter
- window: []Target{cap: window_size}
- window_size: window_size
- }
-}
-
-// advance_window moves the sliding window over the filtered list of targets
-// until it either reaches the end of the list of targets, or has encountered a
-// non-empty window.
-fn (mut ti TargetsIterator) advance_window() {
- for {
- ti.window = sql ti.conn {
- select from Target order by id limit ti.window_size offset ti.offset
- }
- ti.offset += ti.window.len
-
- if ti.window.len == 0 {
- ti.done = true
-
- return
- }
-
- if ti.filter.repo != '' {
- ti.window = ti.window.filter(it.repo == ti.filter.repo)
- }
-
- if ti.filter.arch != '' {
- ti.window = ti.window.filter(it.arch.any(it.value == ti.filter.arch))
- }
-
- if ti.filter.query != '' {
- ti.window = ti.window.filter(it.url.contains(ti.filter.query)
- || it.path.contains(ti.filter.query) || it.branch.contains(ti.filter.query))
- }
-
- // We break out of the loop once we found a non-empty window
- if ti.window.len > 0 {
- break
- }
- }
-}
-
-// next returns the next target, if possible.
-pub fn (mut ti TargetsIterator) next() ?Target {
- if ti.done {
- return none
- }
-
- // The first call to `next` will cause the sliding window to move to where
- // the requested offset starts
- if !ti.started {
- ti.advance_window()
-
- // Skip all matched targets until the requested offset
- for !ti.done && ti.filtered_offset + u64(ti.window.len) <= ti.filter.offset {
- ti.filtered_offset += u64(ti.window.len)
- ti.advance_window()
- }
-
- if ti.done {
- return none
- }
-
- left_inside_window := ti.filter.offset - ti.filtered_offset
- ti.window_index = left_inside_window
- ti.filtered_offset += left_inside_window
-
- ti.started = true
- }
-
- return_value := ti.window[ti.window_index]
-
- ti.window_index++
- ti.filtered_offset++
-
- // Next call will be past the requested offset
- if ti.filter.limit > 0 && ti.filtered_offset == ti.filter.offset + ti.filter.limit {
- ti.done = true
- }
-
- // Ensure the next call has a new valid window
- if ti.window_index == u64(ti.window.len) {
- ti.advance_window()
- ti.window_index = 0
- }
-
- return return_value
-}
-
-// collect consumes the entire iterator & returns the result as an array.
-pub fn (mut ti TargetsIterator) collect() []Target {
- mut out := []Target{}
-
- for t in ti {
- out << t
- }
-
- return out
-}
diff --git a/src/libvieter b/src/libvieter
deleted file mode 160000
index 379a05a..0000000
--- a/src/libvieter
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 379a05a7b6b604c107360e0a679fb3ea5400e02c
diff --git a/src/main.v b/src/main.v
index e3b8a1a..cba410c 100644
--- a/src/main.v
+++ b/src/main.v
@@ -7,21 +7,13 @@ import console.targets
import console.logs
import console.schedule
import console.man
-import console.aur
-import console.repos
-import agent
+import cron
fn main() {
- // Stop buffering output so logs always show up immediately
- unsafe {
- C.setbuf(C.stdout, 0)
- }
-
mut app := cli.Command{
name: 'vieter'
description: 'Vieter is a lightweight implementation of an Arch repository server.'
- version: '0.6.0'
- posix_mode: true
+ version: '0.3.0'
flags: [
cli.Flag{
flag: cli.FlagType.string
@@ -31,23 +23,14 @@ fn main() {
global: true
default_value: [os.expand_tilde_to_home('~/.vieterrc')]
},
- cli.Flag{
- flag: cli.FlagType.bool
- name: 'raw'
- abbrev: 'r'
- description: 'Only output minimal information (no formatted tables, etc.)'
- global: true
- },
]
commands: [
server.cmd(),
targets.cmd(),
+ cron.cmd(),
logs.cmd(),
schedule.cmd(),
man.cmd(),
- aur.cmd(),
- agent.cmd(),
- repos.cmd(),
]
}
app.setup()
diff --git a/src/models/builds.v b/src/models/builds.v
deleted file mode 100644
index 6923115..0000000
--- a/src/models/builds.v
+++ /dev/null
@@ -1,19 +0,0 @@
-module models
-
-pub struct BuildConfig {
-pub:
- target_id int
- kind string
- url string
- branch string
- path string
- repo string
- base_image string
- force bool
- timeout int
-}
-
-// str return a single-line string representation of a build log
-pub fn (c BuildConfig) str() string {
- return '{ target: ${c.target_id}, kind: ${c.kind}, url: ${c.url}, branch: ${c.branch}, path: ${c.path}, repo: ${c.repo}, base_image: ${c.base_image}, force: ${c.force}, timeout: ${c.timeout} }'
-}
diff --git a/src/models/logs.v b/src/models/logs.v
index cb01d08..12907d8 100644
--- a/src/models/logs.v
+++ b/src/models/logs.v
@@ -1,7 +1,6 @@
module models
import time
-import os
pub struct BuildLog {
pub mut:
@@ -16,26 +15,19 @@ pub mut:
// str returns a string representation.
pub fn (bl &BuildLog) str() string {
mut parts := [
- 'id: ${bl.id}',
- 'target id: ${bl.target_id}',
- 'start time: ${bl.start_time.local()}',
- 'end time: ${bl.end_time.local()}',
+ 'id: $bl.id',
+ 'target id: $bl.target_id',
+ 'start time: $bl.start_time.local()',
+ 'end time: $bl.end_time.local()',
'duration: ${bl.end_time - bl.start_time}',
- 'arch: ${bl.arch}',
- 'exit code: ${bl.exit_code}',
+ 'arch: $bl.arch',
+ 'exit code: $bl.exit_code',
]
str := parts.join('\n')
return str
}
-// path returns the path to the log file, relative to the logs directory
-pub fn (bl &BuildLog) path() string {
- filename := bl.start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
-
- return os.join_path(bl.target_id.str(), bl.arch, filename)
-}
-
[params]
pub struct BuildLogFilter {
pub mut:
diff --git a/src/models/models.v b/src/models/models.v
index 1ed0da8..b6103d3 100644
--- a/src/models/models.v
+++ b/src/models/models.v
@@ -2,19 +2,19 @@ module models
import time
-// from_params[T] creates a new instance of T from the given map by parsing all
+// from_params creates a new instance of T from the given map by parsing all
// of its fields from the map.
-pub fn from_params[T](params map[string]string) ?T {
+pub fn from_params(params map[string]string) ?T {
mut o := T{}
- patch_from_params[T](mut o, params)?
+ patch_from_params(mut o, params)?
return o
}
-// patch_from_params[T] updates the given T object with the params defined in
+// patch_from_params updates the given T object with the params defined in
// the map.
-pub fn patch_from_params[T](mut o T, params map[string]string) ? {
+pub fn patch_from_params(mut o T, params map[string]string) ? {
$for field in T.fields {
if field.name in params && params[field.name] != '' {
$if field.typ is string {
@@ -36,8 +36,8 @@ pub fn patch_from_params[T](mut o T, params map[string]string) ? {
}
}
-// params_from[T] converts a given T struct into a map of strings.
-pub fn params_from[T](o &T) map[string]string {
+// params_from converts a given T struct into a map of strings.
+pub fn params_from(o &T) map[string]string {
mut out := map[string]string{}
$for field in T.fields {
diff --git a/src/models/targets.v b/src/models/targets.v
index 14cc8a6..c8aa535 100644
--- a/src/models/targets.v
+++ b/src/models/targets.v
@@ -28,52 +28,31 @@ pub mut:
repo string [nonull]
// Cron schedule describing how frequently to build the repo.
schedule string
- // Subdirectory in the Git repository to cd into
- path string
// On which architectures the package is allowed to be built. In reality,
- // this controls which agents will build this package when scheduled.
+ // this controls which builders will periodically build the image.
arch []TargetArch [fkey: 'target_id']
}
// str returns a string representation.
-pub fn (t &Target) str() string {
+pub fn (gr &Target) str() string {
mut parts := [
- 'id: ${t.id}',
- 'kind: ${t.kind}',
- 'url: ${t.url}',
- 'branch: ${t.branch}',
- 'path: ${t.path}',
- 'repo: ${t.repo}',
- 'schedule: ${t.schedule}',
- 'arch: ${t.arch.map(it.value).join(', ')}',
+ 'id: $gr.id',
+ 'kind: $gr.kind',
+ 'url: $gr.url',
+ 'branch: $gr.branch',
+ 'repo: $gr.repo',
+ 'schedule: $gr.schedule',
+ 'arch: ${gr.arch.map(it.value).join(', ')}',
]
str := parts.join('\n')
return str
}
-// as_build_config converts a Target into a BuildConfig, given some extra
-// needed information.
-pub fn (t &Target) as_build_config(base_image string, force bool, timeout int) BuildConfig {
- return BuildConfig{
- target_id: t.id
- kind: t.kind
- url: t.url
- branch: t.branch
- path: t.path
- repo: t.repo
- base_image: base_image
- force: force
- timeout: timeout
- }
-}
-
[params]
pub struct TargetFilter {
pub mut:
limit u64 = 25
offset u64
repo string
- query string
- arch string
}
diff --git a/src/package/README.md b/src/package/README.md
deleted file mode 100644
index b2bcbd7..0000000
--- a/src/package/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# package
-
-This module handles both parsing the published Arch tarballs & the contents of
-their `.PKGINFO` files, as well as generating the contents of the database
-archives' `desc` & `files` files.
diff --git a/src/package/format.v b/src/package/format.v
deleted file mode 100644
index b126f3a..0000000
--- a/src/package/format.v
+++ /dev/null
@@ -1,103 +0,0 @@
-module package
-
-// format_entry returns a string properly formatted to be added to a desc file.
-[inline]
-fn format_entry(key string, value string) string {
- return '\n%${key}%\n${value}\n'
-}
-
-// full_name returns the properly formatted name for the package, including
-// version & architecture
-pub fn (pkg &Pkg) full_name() string {
- p := pkg.info
- return '${p.name}-${p.version}-${p.arch}'
-}
-
-// filename returns the correct filename of the package file
-pub fn (pkg &Pkg) filename() string {
- ext := match pkg.compression {
- 0 { '.tar' }
- 1 { '.tar.gz' }
- 6 { '.tar.xz' }
- 14 { '.tar.zst' }
- else { panic("Another compression code shouldn't be possible. Faulty code: ${pkg.compression}") }
- }
-
- return '${pkg.full_name()}.pkg${ext}'
-}
-
-// to_desc returns a desc file valid string representation
-pub fn (pkg &Pkg) to_desc() !string {
- p := pkg.info
-
- // filename
- mut desc := '%FILENAME%\n${pkg.filename()}\n'
-
- desc += format_entry('NAME', p.name)
- desc += format_entry('BASE', p.base)
- desc += format_entry('VERSION', p.version)
-
- if p.description.len > 0 {
- desc += format_entry('DESC', p.description)
- }
-
- if p.groups.len > 0 {
- desc += format_entry('GROUPS', p.groups.join_lines())
- }
-
- desc += format_entry('CSIZE', p.csize.str())
- desc += format_entry('ISIZE', p.size.str())
-
- sha256sum := pkg.checksum()!
-
- desc += format_entry('SHA256SUM', sha256sum)
-
- // TODO add pgpsig stuff
-
- if p.url.len > 0 {
- desc += format_entry('URL', p.url)
- }
-
- if p.licenses.len > 0 {
- desc += format_entry('LICENSE', p.licenses.join_lines())
- }
-
- desc += format_entry('ARCH', p.arch)
- desc += format_entry('BUILDDATE', p.build_date.str())
- desc += format_entry('PACKAGER', p.packager)
-
- if p.replaces.len > 0 {
- desc += format_entry('REPLACES', p.replaces.join_lines())
- }
-
- if p.conflicts.len > 0 {
- desc += format_entry('CONFLICTS', p.conflicts.join_lines())
- }
-
- if p.provides.len > 0 {
- desc += format_entry('PROVIDES', p.provides.join_lines())
- }
-
- if p.depends.len > 0 {
- desc += format_entry('DEPENDS', p.depends.join_lines())
- }
-
- if p.optdepends.len > 0 {
- desc += format_entry('OPTDEPENDS', p.optdepends.join_lines())
- }
-
- if p.makedepends.len > 0 {
- desc += format_entry('MAKEDEPENDS', p.makedepends.join_lines())
- }
-
- if p.checkdepends.len > 0 {
- desc += format_entry('CHECKDEPENDS', p.checkdepends.join_lines())
- }
-
- return '${desc}\n'
-}
-
-// to_files returns a files file valid string representation
-pub fn (pkg &Pkg) to_files() string {
- return '%FILES%\n${pkg.files.join_lines()}\n'
-}
diff --git a/src/package/package.v b/src/package/package.v
index 6cf8e3d..9eaf5a2 100644
--- a/src/package/package.v
+++ b/src/package/package.v
@@ -43,12 +43,12 @@ pub mut:
}
// checksum calculates the sha256 hash of the package
-pub fn (p &Pkg) checksum() !string {
+pub fn (p &Pkg) checksum() ?string {
return util.hash_file(p.path)
}
// parse_pkg_info_string parses a PkgInfo object from a string
-fn parse_pkg_info_string(pkg_info_str &string) !PkgInfo {
+fn parse_pkg_info_string(pkg_info_str &string) ?PkgInfo {
mut pkg_info := PkgInfo{}
// Iterate over the entire string
@@ -101,9 +101,9 @@ fn parse_pkg_info_string(pkg_info_str &string) !PkgInfo {
// read_pkg_archive extracts the file list & .PKGINFO contents from an archive
// NOTE: this command only supports zstd-, xz- & gzip-compressed tarballs.
-pub fn read_pkg_archive(pkg_path string) !Pkg {
+pub fn read_pkg_archive(pkg_path string) ?Pkg {
if !os.is_file(pkg_path) {
- return error("'${pkg_path}' doesn't exist or isn't a file.")
+ return error("'$pkg_path' doesn't exist or isn't a file.")
}
a := C.archive_read_new()
@@ -159,7 +159,7 @@ pub fn read_pkg_archive(pkg_path string) !Pkg {
pkg_text := unsafe { buf.vstring_with_len(size).clone() }
- pkg_info = parse_pkg_info_string(pkg_text)!
+ pkg_info = parse_pkg_info_string(pkg_text)?
} else {
C.archive_read_data_skip(a)
}
@@ -174,3 +174,104 @@ pub fn read_pkg_archive(pkg_path string) !Pkg {
compression: compression_code
}
}
+
+// format_entry returns a string properly formatted to be added to a desc file.
+fn format_entry(key string, value string) string {
+ return '\n%$key%\n$value\n'
+}
+
+// full_name returns the properly formatted name for the package, including
+// version & architecture
+pub fn (pkg &Pkg) full_name() string {
+ p := pkg.info
+ return '$p.name-$p.version-$p.arch'
+}
+
+// filename returns the correct filename of the package file
+pub fn (pkg &Pkg) filename() string {
+ ext := match pkg.compression {
+ 0 { '.tar' }
+ 1 { '.tar.gz' }
+ 6 { '.tar.xz' }
+ 14 { '.tar.zst' }
+ else { panic("Another compression code shouldn't be possible. Faulty code: $pkg.compression") }
+ }
+
+ return '${pkg.full_name()}.pkg$ext'
+}
+
+// to_desc returns a desc file valid string representation
+pub fn (pkg &Pkg) to_desc() ?string {
+ p := pkg.info
+
+ // filename
+ mut desc := '%FILENAME%\n$pkg.filename()\n'
+
+ desc += format_entry('NAME', p.name)
+ desc += format_entry('BASE', p.base)
+ desc += format_entry('VERSION', p.version)
+
+ if p.description.len > 0 {
+ desc += format_entry('DESC', p.description)
+ }
+
+ if p.groups.len > 0 {
+ desc += format_entry('GROUPS', p.groups.join_lines())
+ }
+
+ desc += format_entry('CSIZE', p.csize.str())
+ desc += format_entry('ISIZE', p.size.str())
+
+ sha256sum := pkg.checksum()?
+
+ desc += format_entry('SHA256SUM', sha256sum)
+
+ // TODO add pgpsig stuff
+
+ if p.url.len > 0 {
+ desc += format_entry('URL', p.url)
+ }
+
+ if p.licenses.len > 0 {
+ desc += format_entry('LICENSE', p.licenses.join_lines())
+ }
+
+ desc += format_entry('ARCH', p.arch)
+ desc += format_entry('BUILDDATE', p.build_date.str())
+ desc += format_entry('PACKAGER', p.packager)
+
+ if p.replaces.len > 0 {
+ desc += format_entry('REPLACES', p.replaces.join_lines())
+ }
+
+ if p.conflicts.len > 0 {
+ desc += format_entry('CONFLICTS', p.conflicts.join_lines())
+ }
+
+ if p.provides.len > 0 {
+ desc += format_entry('PROVIDES', p.provides.join_lines())
+ }
+
+ if p.depends.len > 0 {
+ desc += format_entry('DEPENDS', p.depends.join_lines())
+ }
+
+ if p.optdepends.len > 0 {
+ desc += format_entry('OPTDEPENDS', p.optdepends.join_lines())
+ }
+
+ if p.makedepends.len > 0 {
+ desc += format_entry('MAKEDEPENDS', p.makedepends.join_lines())
+ }
+
+ if p.checkdepends.len > 0 {
+ desc += format_entry('CHECKDEPENDS', p.checkdepends.join_lines())
+ }
+
+ return '$desc\n'
+}
+
+// to_files returns a files file valid string representation
+pub fn (pkg &Pkg) to_files() string {
+ return '%FILES%\n$pkg.files.join_lines()\n'
+}
diff --git a/src/repo/README.md b/src/repo/README.md
deleted file mode 100644
index f06b1d3..0000000
--- a/src/repo/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# repo
-
-This module manages the contents of the various repositories stored within a
-Vieter instance.
-
-## Terminology
-
-* Arch-repository (arch-repo): specific architecture of a given repository. This is what
- Pacman actually uses as a repository, and contains its own `.db` & `.files`
- files.
-* Repository (repo): a collection of arch-repositories. A single repository can
- contain packages of different architectures, with each package being stored
- in that specific architecture' arch-repository.
-* Repository group (repo-group): a collection of repositories. Each Vieter
- instance consists of a single repository group, which manages all underlying
- repositories & arch-repositories.
-
-## Arch-repository layout
-
-An arch-repository (aka a regular Pacman repository) consists of a directory
-with the following files (`{repo}` should be replaced with the name of the
-repository):
-
-* One or more package directories. These directories follow the naming scheme
- `${pkgname}-${pkgver}-${pkgrel}`. Each of these directories contains two
- files, `desc` & `files`. The `desc` file is a list of the package's metadata,
- while `files` contains a list of all files that the package contains. The
- latter is used when using `pacman -F`.
-* `{repo}.db` & `{repo}.db.tar.gz`: the database file of the repository. This
- is just a compressed tarball of all package directories, but only their
- `desc` files. Both these files should have the same content (`repo-add`
- creates a symlink, but Vieter just serves the same file for both routes)
-* `{repo}.files` & `{repo}.files.tar.gz`: the same as the `.db` file, but this
- also contains the `files` files, instead of just the `desc` files.
-
-## Filesystem layout
-
-The repository part of Vieter consists of two directories. One is the `repos`
-directory inside the configured `data_dir`, while the other is the configured
-`pkg_dir`. `repos` contains only the repository group, while `pkg_dir` contains
-the actual package archives. `pkg_dir` is the directory that can take up a
-significant amount of memory, while `repos` solely consists of small text
-files.
diff --git a/src/repo/remove.v b/src/repo/remove.v
deleted file mode 100644
index 6d949c3..0000000
--- a/src/repo/remove.v
+++ /dev/null
@@ -1,85 +0,0 @@
-module repo
-
-import os
-
-// remove_pkg_from_arch_repo removes a package from an arch-repo's database. It
-// returns false if the package wasn't present in the database. It also
-// optionally re-syncs the repo archives.
-pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, perform_sync bool) !bool {
- repo_dir := os.join_path(r.repos_dir, repo, arch)
-
- // If the repository doesn't exist yet, the result is automatically false
- if !os.exists(repo_dir) {
- return false
- }
-
- // We iterate over every directory in the repo dir
- // TODO filter so we only check directories
- for d in os.ls(repo_dir)! {
- // Because a repository only allows a single version of each package,
- // we need only compare whether the name of the package is the same,
- // not the version.
- name := d.split('-')#[..-2].join('-')
-
- if name == pkg_name {
- // We lock the mutex here to prevent other routines from creating a
- // new archive while we remove an entry
- lock r.mutex {
- os.rmdir_all(os.join_path_single(repo_dir, d))!
- }
-
- // Also remove the package archive
- repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch)
-
- archives := os.ls(repo_pkg_dir)!.filter(it.split('-')#[..-3].join('-') == name)
-
- for archive_name in archives {
- full_path := os.join_path_single(repo_pkg_dir, archive_name)
- os.rm(full_path)!
- }
-
- // Sync the db archives if requested
- if perform_sync {
- r.sync(repo, arch)!
- }
-
- return true
- }
- }
-
- return false
-}
-
-// remove_arch_repo removes an arch-repo & its packages.
-pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) !bool {
- repo_dir := os.join_path(r.repos_dir, repo, arch)
-
- // If the repository doesn't exist yet, the result is automatically false
- if !os.exists(repo_dir) {
- return false
- }
-
- os.rmdir_all(repo_dir)!
-
- pkg_dir := os.join_path(r.pkg_dir, repo, arch)
- os.rmdir_all(pkg_dir)!
-
- return true
-}
-
-// remove_repo removes a repo & its packages.
-pub fn (r &RepoGroupManager) remove_repo(repo string) !bool {
- repo_dir := os.join_path_single(r.repos_dir, repo)
-
- // If the repository doesn't exist yet, the result is automatically false
- if !os.exists(repo_dir) {
- return false
- }
-
- os.rmdir_all(repo_dir)!
-
- pkg_dir := os.join_path_single(r.pkg_dir, repo)
- os.rmdir_all(pkg_dir)!
-
- return true
-}
diff --git a/src/repo/add.v b/src/repo/repo.v
similarity index 57%
rename from src/repo/add.v
rename to src/repo/repo.v
index 47b0d7e..c4b85c0 100644
--- a/src/repo/add.v
+++ b/src/repo/repo.v
@@ -23,23 +23,18 @@ pub:
pub struct RepoAddResult {
pub:
- name string
- version string
- archs []string
+ added bool [required]
+ pkg &package.Pkg [required]
}
// new creates a new RepoGroupManager & creates the directories as needed
-pub fn new(repos_dir string, pkg_dir string, default_arch string) !RepoGroupManager {
+pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager {
if !os.is_dir(repos_dir) {
- os.mkdir_all(repos_dir) or {
- return error('Failed to create repos directory: ${err.msg()}')
- }
+ os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') }
}
if !os.is_dir(pkg_dir) {
- os.mkdir_all(pkg_dir) or {
- return error('Failed to create package directory: ${err.msg()}')
- }
+ os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg()') }
}
return RepoGroupManager{
@@ -53,32 +48,31 @@ pub fn new(repos_dir string, pkg_dir string, default_arch string) !RepoGroupMana
// pkg archive. It's a wrapper around add_pkg_in_repo that parses the archive
// file, passes the result to add_pkg_in_repo, and hard links the archive to
// the right subdirectories in r.pkg_dir if it was successfully added.
-pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) !RepoAddResult {
+pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult {
pkg := package.read_pkg_archive(pkg_path) or {
- return error('Failed to read package file: ${err.msg()}')
+ return error('Failed to read package file: $err.msg()')
}
- archs := r.add_pkg_in_repo(repo, pkg)!
+ added := r.add_pkg_in_repo(repo, pkg)?
// If the add was successful, we move the file to the packages directory
- for arch in archs {
+ for arch in added {
repo_pkg_path := os.real_path(os.join_path(r.pkg_dir, repo, arch))
dest_path := os.join_path_single(repo_pkg_path, pkg.filename())
- os.mkdir_all(repo_pkg_path)!
+ os.mkdir_all(repo_pkg_path)?
// We create hard links so that "any" arch packages aren't stored
// multiple times
- os.link(pkg_path, dest_path)!
+ os.link(pkg_path, dest_path)?
}
// After linking, we can remove the original file
- os.rm(pkg_path)!
+ os.rm(pkg_path)?
return RepoAddResult{
- name: pkg.info.name
- version: pkg.info.version
- archs: archs
+ added: added.len > 0
+ pkg: &pkg
}
}
@@ -89,13 +83,15 @@ pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) !Re
// r.default_arch. If this arch-repo doesn't exist yet, it is created. If the
// architecture isn't 'any', the package is only added to the specific
// architecture.
-fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]string {
+fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]string {
// A package not of arch 'any' can be handled easily by adding it to the
// respective repo
if pkg.info.arch != 'any' {
- r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)!
-
- return [pkg.info.arch]
+ if r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)? {
+ return [pkg.info.arch]
+ } else {
+ return []
+ }
}
mut arch_repos := []string{}
@@ -108,7 +104,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]strin
// If this is the first package that's added to the repo, the directory
// won't exist yet
if os.exists(repo_dir) {
- arch_repos = os.ls(repo_dir)!
+ arch_repos = os.ls(repo_dir)?
}
// The default_arch should always be updated when a package with arch 'any'
@@ -117,39 +113,92 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]strin
arch_repos << r.default_arch
}
- // Add the package to each found architecture
- // NOTE: if any of these fail, the function fails. This means the user does
- // not know which arch-repositories did succeed in adding the package, if
- // any.
+ mut added := []string{}
+
+ // We add the package to each repository. If any of the repositories
+ // return true, the result of the function is also true.
for arch in arch_repos {
- r.add_pkg_in_arch_repo(repo, arch, pkg)!
+ if r.add_pkg_in_arch_repo(repo, arch, pkg)? {
+ added << arch
+ }
}
- return arch_repos
+ return added
}
// add_pkg_in_arch_repo is the function that actually adds a package to a given
// arch-repo. It records the package's data in the arch-repo's desc & files
// files, and afterwards updates the db & files archives to reflect these
-// changes.
-fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ! {
- pkg_dir := os.join_path(r.repos_dir, repo, arch, '${pkg.info.name}-${pkg.info.version}')
+// changes. The function returns false if the package was already present in
+// the repo, and true otherwise.
+fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ?bool {
+ pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version')
// Remove the previous version of the package, if present
- r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)!
+ r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)?
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') }
- os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()!) or {
- os.rmdir_all(pkg_dir)!
+ os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()?) or {
+ os.rmdir_all(pkg_dir)?
return error('Failed to write desc file.')
}
os.write_file(os.join_path_single(pkg_dir, 'files'), pkg.to_files()) or {
- os.rmdir_all(pkg_dir)!
+ os.rmdir_all(pkg_dir)?
return error('Failed to write files file.')
}
- r.sync(repo, arch)!
+ r.sync(repo, arch)?
+
+ return true
+}
+
+// remove_pkg_from_arch_repo removes a package from an arch-repo's database. It
+// returns false if the package wasn't present in the database. It also
+// optionally re-syncs the repo archives.
+fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) ?bool {
+ repo_dir := os.join_path(r.repos_dir, repo, arch)
+
+ // If the repository doesn't exist yet, the result is automatically false
+ if !os.exists(repo_dir) {
+ return false
+ }
+
+ // We iterate over every directory in the repo dir
+ // TODO filter so we only check directories
+ for d in os.ls(repo_dir)? {
+ // Because a repository only allows a single version of each package,
+ // we need only compare whether the name of the package is the same,
+ // not the version.
+ name := d.split('-')#[..-2].join('-')
+
+ if name == pkg_name {
+ // We lock the mutex here to prevent other routines from creating a
+ // new archive while we remove an entry
+ lock r.mutex {
+ os.rmdir_all(os.join_path_single(repo_dir, d))?
+ }
+
+ // Also remove the package archive
+ repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch)
+
+ archives := os.ls(repo_pkg_dir)?.filter(it.split('-')#[..-3].join('-') == name)
+
+ for archive_name in archives {
+ full_path := os.join_path_single(repo_pkg_dir, archive_name)
+ os.rm(full_path)?
+ }
+
+ // Sync the db archives if requested
+ if sync {
+ r.sync(repo, arch)?
+ }
+
+ return true
+ }
+ }
+
+ return false
}
diff --git a/src/repo/sync.v b/src/repo/sync.v
index 9554748..73d21c8 100644
--- a/src/repo/sync.v
+++ b/src/repo/sync.v
@@ -32,7 +32,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
}
// sync regenerates the repository archive files.
-fn (r &RepoGroupManager) sync(repo string, arch string) ! {
+fn (r &RepoGroupManager) sync(repo string, arch string) ? {
subrepo_path := os.join_path(r.repos_dir, repo, arch)
lock r.mutex {
@@ -54,7 +54,7 @@ fn (r &RepoGroupManager) sync(repo string, arch string) ! {
C.archive_write_open_filename(a_files, &char(files_path.str))
// Iterate over each directory
- for d in os.ls(subrepo_path)!.filter(os.is_dir(os.join_path_single(subrepo_path,
+ for d in os.ls(subrepo_path)?.filter(os.is_dir(os.join_path_single(subrepo_path,
it))) {
// desc
mut inner_path := os.join_path_single(d, 'desc')
diff --git a/src/response/response.v b/src/response/response.v
new file mode 100644
index 0000000..a06a589
--- /dev/null
+++ b/src/response/response.v
@@ -0,0 +1,34 @@
+module response
+
+pub struct Response {
+pub:
+ message string
+ data T
+}
+
+// new_response constructs a new Response object with the given message
+// & an empty data field.
+pub fn new_response(message string) Response {
+ return Response{
+ message: message
+ data: ''
+ }
+}
+
+// new_data_response constructs a new Response object with the given data
+// & an empty message field.
+pub fn new_data_response(data T) Response {
+ return Response{
+ message: ''
+ data: data
+ }
+}
+
+// new_full_response constructs a new Response object with the given
+// message & data.
+pub fn new_full_response(message string, data T) Response {
+ return Response{
+ message: message
+ data: data
+ }
+}
diff --git a/src/server/api_jobs.v b/src/server/api_jobs.v
deleted file mode 100644
index 62bcb27..0000000
--- a/src/server/api_jobs.v
+++ /dev/null
@@ -1,49 +0,0 @@
-module server
-
-import web
-import web.response { new_data_response, new_response }
-
-// v1_poll_job_queue allows agents to poll for new build jobs.
-['/api/v1/jobs/poll'; auth; get; markused]
-fn (mut app App) v1_poll_job_queue() web.Result {
- arch := app.query['arch'] or {
- return app.json(.bad_request, new_response('Missing arch query arg.'))
- }
-
- max_str := app.query['max'] or {
- return app.json(.bad_request, new_response('Missing max query arg.'))
- }
- max := max_str.int()
-
- mut out := app.job_queue.pop_n(arch, max).map(it.config)
-
- return app.json(.ok, new_data_response(out))
-}
-
-// v1_queue_job allows queueing a new one-time build job for the given target.
-['/api/v1/jobs/queue'; auth; markused; post]
-fn (mut app App) v1_queue_job() web.Result {
- target_id := app.query['target'] or {
- return app.json(.bad_request, new_response('Missing target query arg.'))
- }.int()
-
- arch := app.query['arch'] or {
- return app.json(.bad_request, new_response('Missing arch query arg.'))
- }
-
- if arch == '' {
- app.json(.bad_request, new_response('Empty arch query arg.'))
- }
-
- force := 'force' in app.query
-
- target := app.db.get_target(target_id) or {
- return app.json(.bad_request, new_response('Unknown target id.'))
- }
-
- app.job_queue.insert(target: target, arch: arch, single: true, now: true, force: force) or {
- return app.status(.internal_server_error)
- }
-
- return app.status(.ok)
-}
diff --git a/src/server/api_logs.v b/src/server/api_logs.v
index 00a7e2e..fa3338e 100644
--- a/src/server/api_logs.v
+++ b/src/server/api_logs.v
@@ -1,8 +1,10 @@
module server
import web
+import net.http
import net.urllib
-import web.response { new_data_response, new_response }
+import response { new_data_response, new_response }
+import db
import time
import os
import util
@@ -10,28 +12,40 @@ import models { BuildLog, BuildLogFilter }
// v1_get_logs returns all build logs in the database. A 'target' query param can
// optionally be added to limit the list of build logs to that repository.
-['/api/v1/logs'; auth; get; markused]
+['/api/v1/logs'; get]
fn (mut app App) v1_get_logs() web.Result {
- filter := models.from_params[BuildLogFilter](app.query) or {
- return app.json(.bad_request, new_response('Invalid query parameters.'))
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
+ }
+
+ filter := models.from_params(app.query) or {
+ return app.json(http.Status.bad_request, new_response('Invalid query parameters.'))
}
logs := app.db.get_build_logs(filter)
- return app.json(.ok, new_data_response(logs))
+ return app.json(http.Status.ok, new_data_response(logs))
}
// v1_get_single_log returns the build log with the given id.
-['/api/v1/logs/:id'; auth; get; markused]
+['/api/v1/logs/:id'; get]
fn (mut app App) v1_get_single_log(id int) web.Result {
- log := app.db.get_build_log(id) or { return app.status(.not_found) }
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
+ }
- return app.json(.ok, new_data_response(log))
+ log := app.db.get_build_log(id) or { return app.not_found() }
+
+ return app.json(http.Status.ok, new_data_response(log))
}
// v1_get_log_content returns the actual build log file for the given id.
-['/api/v1/logs/:id/content'; auth; get; markused]
+['/api/v1/logs/:id/content'; get]
fn (mut app App) v1_get_log_content(id int) web.Result {
- log := app.db.get_build_log(id) or { return app.status(.not_found) }
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
+ }
+
+ log := app.db.get_build_log(id) or { return app.not_found() }
file_name := log.start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
full_path := os.join_path(app.conf.data_dir, logs_dir_name, log.target_id.str(), log.arch,
file_name)
@@ -41,39 +55,43 @@ fn (mut app App) v1_get_log_content(id int) web.Result {
// parse_query_time unescapes an HTTP query parameter & tries to parse it as a
// time.Time struct.
-fn parse_query_time(query string) !time.Time {
- unescaped := urllib.query_unescape(query)!
- t := time.parse(unescaped)!
+fn parse_query_time(query string) ?time.Time {
+ unescaped := urllib.query_unescape(query)?
+ t := time.parse(unescaped)?
return t
}
// v1_post_log adds a new log to the database.
-['/api/v1/logs'; auth; markused; post]
+['/api/v1/logs'; post]
fn (mut app App) v1_post_log() web.Result {
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
+ }
+
// Parse query params
start_time_int := app.query['startTime'].int()
if start_time_int == 0 {
- return app.json(.bad_request, new_response('Invalid or missing start time.'))
+ return app.json(http.Status.bad_request, new_response('Invalid or missing start time.'))
}
start_time := time.unix(start_time_int)
end_time_int := app.query['endTime'].int()
if end_time_int == 0 {
- return app.json(.bad_request, new_response('Invalid or missing end time.'))
+ return app.json(http.Status.bad_request, new_response('Invalid or missing end time.'))
}
end_time := time.unix(end_time_int)
if 'exitCode' !in app.query {
- return app.json(.bad_request, new_response('Missing exit code.'))
+ return app.json(http.Status.bad_request, new_response('Missing exit code.'))
}
exit_code := app.query['exitCode'].int()
if 'arch' !in app.query {
- return app.json(.bad_request, new_response("Missing parameter 'arch'."))
+ return app.json(http.Status.bad_request, new_response("Missing parameter 'arch'."))
}
arch := app.query['arch']
@@ -81,11 +99,11 @@ fn (mut app App) v1_post_log() web.Result {
target_id := app.query['target'].int()
if !app.db.target_exists(target_id) {
- return app.json(.bad_request, new_response('Unknown target.'))
+ return app.json(http.Status.bad_request, new_response('Unknown target.'))
}
// Store log in db
- mut log := BuildLog{
+ log := BuildLog{
target_id: target_id
start_time: start_time
end_time: end_time
@@ -93,45 +111,32 @@ fn (mut app App) v1_post_log() web.Result {
exit_code: exit_code
}
- // id of newly created log
- log.id = app.db.add_build_log(log)
- log_file_path := os.join_path(app.conf.data_dir, logs_dir_name, log.path())
+ app.db.add_build_log(log)
+
+ repo_logs_dir := os.join_path(app.conf.data_dir, logs_dir_name, target_id.str(), arch)
// Create the logs directory of it doesn't exist
- if !os.exists(os.dir(log_file_path)) {
- os.mkdir_all(os.dir(log_file_path)) or {
- app.lerror('Error while creating log file: ${err.msg()}')
+ if !os.exists(repo_logs_dir) {
+ os.mkdir_all(repo_logs_dir) or {
+ app.lerror("Couldn't create dir '$repo_logs_dir'.")
- return app.status(.internal_server_error)
+ return app.json(http.Status.internal_server_error, new_response('An error occured while processing the request.'))
}
}
+ // Stream log contents to correct file
+ file_name := start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
+ full_path := os.join_path_single(repo_logs_dir, file_name)
+
if length := app.req.header.get(.content_length) {
- util.reader_to_file(mut app.reader, length.int(), log_file_path) or {
- app.lerror('An error occured while receiving logs: ${err.msg()}')
+ util.reader_to_file(mut app.reader, length.int(), full_path) or {
+ app.lerror('An error occured while receiving logs: $err.msg()')
- return app.status(.internal_server_error)
+ return app.json(http.Status.internal_server_error, new_response('Failed to upload logs.'))
}
} else {
- return app.status(.length_required)
+ return app.status(http.Status.length_required)
}
- return app.json(.ok, new_data_response(log.id))
-}
-
-// v1_delete_log allows removing a build log from the system.
-['/api/v1/logs/:id'; auth; delete; markused]
-fn (mut app App) v1_delete_log(id int) web.Result {
- log := app.db.get_build_log(id) or { return app.status(.not_found) }
- full_path := os.join_path(app.conf.data_dir, logs_dir_name, log.path())
-
- os.rm(full_path) or {
- app.lerror('Failed to remove log file ${full_path}: ${err.msg()}')
-
- return app.status(.internal_server_error)
- }
-
- app.db.delete_build_log(id)
-
- return app.status(.ok)
+ return app.json(http.Status.ok, new_response('Logs added successfully.'))
}
diff --git a/src/server/api_metrics.v b/src/server/api_metrics.v
deleted file mode 100644
index 5ba0452..0000000
--- a/src/server/api_metrics.v
+++ /dev/null
@@ -1,19 +0,0 @@
-module server
-
-import metrics
-import web
-
-// v1_metrics serves a Prometheus-compatible metrics endpoint.
-['/api/v1/metrics'; get; markused]
-fn (mut app App) v1_metrics() web.Result {
- if !app.conf.collect_metrics {
- return app.status(.not_found)
- }
-
- mut exporter := metrics.new_prometheus_exporter()
- exporter.load('vieter_', app.collector)
-
- // TODO stream to connection instead
- body := exporter.export_to_string() or { return app.status(.internal_server_error) }
- return app.body(.ok, 'text/plain', body)
-}
diff --git a/src/server/api_targets.v b/src/server/api_targets.v
index ed121d9..3867c94 100644
--- a/src/server/api_targets.v
+++ b/src/server/api_targets.v
@@ -1,70 +1,86 @@
module server
import web
-import web.response { new_data_response, new_response }
+import net.http
+import response { new_data_response, new_response }
+import db
import models { Target, TargetArch, TargetFilter }
// v1_get_targets returns the current list of targets.
-['/api/v1/targets'; auth; get; markused]
+['/api/v1/targets'; get]
fn (mut app App) v1_get_targets() web.Result {
- filter := models.from_params[TargetFilter](app.query) or {
- return app.json(.bad_request, new_response('Invalid query parameters.'))
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
- mut iter := app.db.targets(filter)
- return app.json(.ok, new_data_response(iter.collect()))
+ filter := models.from_params(app.query) or {
+ return app.json(http.Status.bad_request, new_response('Invalid query parameters.'))
+ }
+ repos := app.db.get_targets(filter)
+
+ return app.json(http.Status.ok, new_data_response(repos))
}
// v1_get_single_target returns the information for a single target.
-['/api/v1/targets/:id'; auth; get; markused]
+['/api/v1/targets/:id'; get]
fn (mut app App) v1_get_single_target(id int) web.Result {
- target := app.db.get_target(id) or { return app.status(.not_found) }
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
+ }
- return app.json(.ok, new_data_response(target))
+ repo := app.db.get_target(id) or { return app.not_found() }
+
+ return app.json(http.Status.ok, new_data_response(repo))
}
// v1_post_target creates a new target from the provided query string.
-['/api/v1/targets'; auth; markused; post]
+['/api/v1/targets'; post]
fn (mut app App) v1_post_target() web.Result {
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
+ }
+
mut params := app.query.clone()
- // If a target is created without specifying the arch, we assume it's meant
+ // If a repo is created without specifying the arch, we assume it's meant
// for the default architecture.
- if 'arch' !in params || params['arch'] == '' {
+ if 'arch' !in params {
params['arch'] = app.conf.default_arch
}
- mut new_target := models.from_params[Target](params) or {
- return app.json(.bad_request, new_response(err.msg()))
+ new_repo := models.from_params(params) or {
+ return app.json(http.Status.bad_request, new_response(err.msg()))
}
// Ensure someone doesn't submit an invalid kind
- if new_target.kind !in models.valid_kinds {
- return app.json(.bad_request, new_response('Invalid kind.'))
+ if new_repo.kind !in models.valid_kinds {
+ return app.json(http.Status.bad_request, new_response('Invalid kind.'))
}
- id := app.db.add_target(new_target)
- new_target.id = id
+ app.db.add_target(new_repo)
- // Add the target to the job queue
- // TODO return better error here if it's the cron schedule that's incorrect
- app.job_queue.insert_all(new_target) or { return app.status(.internal_server_error) }
-
- return app.json(.ok, new_data_response(id))
+ return app.json(http.Status.ok, new_response('Repo added successfully.'))
}
// v1_delete_target removes a given target from the server's list.
-['/api/v1/targets/:id'; auth; delete; markused]
+['/api/v1/targets/:id'; delete]
fn (mut app App) v1_delete_target(id int) web.Result {
- app.db.delete_target(id)
- app.job_queue.invalidate(id)
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
+ }
- return app.status(.ok)
+ app.db.delete_target(id)
+
+ return app.json(http.Status.ok, new_response('Repo removed successfully.'))
}
// v1_patch_target updates a target's data with the given query params.
-['/api/v1/targets/:id'; auth; markused; patch]
+['/api/v1/targets/:id'; patch]
fn (mut app App) v1_patch_target(id int) web.Result {
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
+ }
+
app.db.update_target(id, app.query)
if 'arch' in app.query {
@@ -73,10 +89,5 @@ fn (mut app App) v1_patch_target(id int) web.Result {
app.db.update_target_archs(id, arch_objs)
}
- target := app.db.get_target(id) or { return app.status(.internal_server_error) }
-
- app.job_queue.invalidate(id)
- app.job_queue.insert_all(target) or { return app.status(.internal_server_error) }
-
- return app.json(.ok, new_data_response(target))
+ return app.json(http.Status.ok, new_response('Repo updated successfully.'))
}
diff --git a/src/server/auth.v b/src/server/auth.v
new file mode 100644
index 0000000..7c8a676
--- /dev/null
+++ b/src/server/auth.v
@@ -0,0 +1,12 @@
+module server
+
+import net.http
+
+// is_authorized checks whether the provided API key is correct.
+fn (mut app App) is_authorized() bool {
+ x_header := app.req.header.get_custom('X-Api-Key', http.HeaderQueryConfig{ exact: true }) or {
+ return false
+ }
+
+ return x_header.trim_space() == app.conf.api_key
+}
diff --git a/src/server/cli.v b/src/server/cli.v
index abb5fe3..6fd09c5 100644
--- a/src/server/cli.v
+++ b/src/server/cli.v
@@ -1,22 +1,16 @@
module server
import cli
-import conf as vconf
+import vieter_v.conf as vconf
struct Config {
pub:
- port int = 8000
- log_level string = 'WARN'
- pkg_dir string
- data_dir string
- api_key string
- default_arch string
- global_schedule string = '0 3'
- base_image string = 'archlinux:base-devel'
- max_log_age int [empty_default]
- log_removal_schedule string = '0 0'
- collect_metrics bool [empty_default]
- default_build_timeout int = 3600
+ log_level string = 'WARN'
+ pkg_dir string
+ data_dir string
+ api_key string
+ default_arch string
+ port int = 8000
}
// cmd returns the cli submodule that handles starting the server
@@ -24,11 +18,11 @@ pub fn cmd() cli.Command {
return cli.Command{
name: 'server'
description: 'Start the Vieter server.'
- execute: fn (cmd cli.Command) ! {
- config_file := cmd.flags.get_string('config-file')!
- conf_ := vconf.load[Config](prefix: 'VIETER_', default_path: config_file)!
+ execute: fn (cmd cli.Command) ? {
+ config_file := cmd.flags.get_string('config-file')?
+ conf := vconf.load(prefix: 'VIETER_', default_path: config_file)?
- server(conf_)!
+ server(conf)?
}
}
}
diff --git a/src/server/log_removal.v b/src/server/log_removal.v
deleted file mode 100644
index bc51bcf..0000000
--- a/src/server/log_removal.v
+++ /dev/null
@@ -1,53 +0,0 @@
-module server
-
-import time
-import models { BuildLog }
-import os
-import cron
-
-const fallback_log_removal_frequency = 24 * time.hour
-
-// log_removal_daemon removes old build logs every `log_removal_frequency`.
-fn (mut app App) log_removal_daemon(schedule &cron.Expression) {
- for {
- mut too_old_timestamp := time.now().add_days(-app.conf.max_log_age)
-
- app.linfo('Cleaning logs before ${too_old_timestamp}')
-
- mut logs := []BuildLog{}
- mut counter := 0
- mut failed := u64(0)
-
- // Remove old logs
- for {
- // The offset is used to skip logs that failed to remove. Besides
- // this, we don't need to move the offset, because all previously
- // oldest logs will have been removed.
- logs = app.db.get_build_logs(before: too_old_timestamp, offset: failed, limit: 50)
-
- for log in logs {
- log_file_path := os.join_path(app.conf.data_dir, logs_dir_name, log.path())
-
- os.rm(log_file_path) or {
- app.lerror('Failed to remove log file ${log_file_path}: ${err.msg()}')
- failed += 1
-
- continue
- }
- app.db.delete_build_log(log.id)
-
- counter += 1
- }
-
- if logs.len < 50 {
- break
- }
- }
-
- app.linfo('Cleaned ${counter} logs (${failed} failed)')
-
- // Sleep until the next cycle
- next_time := schedule.next_from_now()
- time.sleep(next_time - time.now())
- }
-}
diff --git a/src/server/repo.v b/src/server/repo.v
deleted file mode 100644
index 8f8270d..0000000
--- a/src/server/repo.v
+++ /dev/null
@@ -1,96 +0,0 @@
-module server
-
-import web
-import os
-import repo
-import time
-import rand
-import util
-import web.response { new_data_response, new_response }
-
-// healthcheck just returns a string, but can be used to quickly check if the
-// server is still responsive.
-['/health'; get; markused]
-pub fn (mut app App) healthcheck() web.Result {
- return app.json(.ok, new_response('Healthy.'))
-}
-
-// get_repo_file handles all Pacman-related routes. It returns both the
-// repository's archives, but also package archives or the contents of a
-// package's desc file.
-['/:repo/:arch/:filename'; get; head; markused]
-fn (mut app App) get_repo_file(repo_ string, arch string, filename string) web.Result {
- mut full_path := ''
-
- db_exts := ['.db', '.files', '.db.tar.gz', '.files.tar.gz']
-
- // There's no point in having the ability to serve db archives with wrong
- // filenames
- if db_exts.any(filename == '${repo_}${it}') {
- full_path = os.join_path(app.repo.repos_dir, repo_, arch, filename)
-
- // repo-add does this using symlinks, but we just change the requested
- // path
- if !full_path.ends_with('.tar.gz') {
- full_path += '.tar.gz'
- }
- } else if filename.contains('.pkg') {
- full_path = os.join_path(app.repo.pkg_dir, repo_, arch, filename)
- }
- // Default behavior is to return the desc file for the package, if present.
- // This can then also be used by the build system to properly check whether
- // a package is present in an arch-repo.
- else {
- full_path = os.join_path(app.repo.repos_dir, repo_, arch, filename, 'desc')
- }
-
- return app.file(full_path)
-}
-
-// put_package handles publishing a package to a repository.
-['/:repo/publish'; auth; markused; post]
-fn (mut app App) put_package(repo_ string) web.Result {
- // api is a reserved keyword for api routes & should never be allowed to be
- // a repository.
- if repo_.to_lower() == 'api' {
- return app.json(.bad_request, new_response("'api' is a reserved keyword & cannot be used as a repository name."))
- }
-
- mut pkg_path := ''
-
- if length := app.req.header.get(.content_length) {
- // Generate a random filename for the temp file
- pkg_path = os.join_path_single(app.repo.pkg_dir, rand.uuid_v4())
-
- app.ldebug("Uploading ${length} bytes (${util.pretty_bytes(length.int())}) to '${pkg_path}'.")
-
- // This is used to time how long it takes to upload a file
- mut sw := time.new_stopwatch(time.StopWatchOptions{ auto_start: true })
-
- util.reader_to_file(mut app.reader, length.int(), pkg_path) or {
- app.lwarn("Failed to upload '${pkg_path}': ${err.msg()}")
-
- return app.status(.internal_server_error)
- }
-
- sw.stop()
- app.ldebug("Upload of '${pkg_path}' completed in ${sw.elapsed().seconds():.3}s.")
- } else {
- app.lwarn('Tried to upload package without specifying a Content-Length.')
-
- // length required
- return app.status(.length_required)
- }
-
- res := app.repo.add_pkg_from_path(repo_, pkg_path) or {
- app.lerror('Error while adding package: ${err.msg()}')
-
- os.rm(pkg_path) or { app.lerror("Failed to remove download '${pkg_path}': ${err.msg()}") }
-
- return app.status(.internal_server_error)
- }
-
- app.linfo("Added '${res.name}-${res.version}' to '${repo_} (${res.archs.join(',')})'.")
-
- return app.json(.ok, new_data_response(res))
-}
diff --git a/src/server/repo_remove.v b/src/server/repo_remove.v
deleted file mode 100644
index 24baeaf..0000000
--- a/src/server/repo_remove.v
+++ /dev/null
@@ -1,63 +0,0 @@
-module server
-
-import web
-
-// delete_package tries to remove the given package.
-['/:repo/:arch/:pkg'; auth; delete; markused]
-fn (mut app App) delete_package(repo string, arch string, pkg string) web.Result {
- res := app.repo.remove_pkg_from_arch_repo(repo, arch, pkg, true) or {
- app.lerror('Error while deleting package: ${err.msg()}')
-
- return app.status(.internal_server_error)
- }
-
- if res {
- app.linfo("Removed package '${pkg}' from '${repo}/${arch}'")
-
- return app.status(.ok)
- } else {
- app.linfo("Tried removing package '${pkg}' from '${repo}/${arch}', but it doesn't exist.")
-
- return app.status(.not_found)
- }
-}
-
-// delete_arch_repo tries to remove the given arch-repo.
-['/:repo/:arch'; auth; delete; markused]
-fn (mut app App) delete_arch_repo(repo string, arch string) web.Result {
- res := app.repo.remove_arch_repo(repo, arch) or {
- app.lerror('Error while deleting arch-repo: ${err.msg()}')
-
- return app.status(.internal_server_error)
- }
-
- if res {
- app.linfo("Removed arch-repo '${repo}/${arch}'")
-
- return app.status(.ok)
- } else {
- app.linfo("Tried removing '${repo}/${arch}', but it doesn't exist.")
-
- return app.status(.not_found)
- }
-}
-
-// delete_repo tries to remove the given repo.
-['/:repo'; auth; delete; markused]
-fn (mut app App) delete_repo(repo string) web.Result {
- res := app.repo.remove_repo(repo) or {
- app.lerror('Error while deleting repo: ${err.msg()}')
-
- return app.status(.internal_server_error)
- }
-
- if res {
- app.linfo("Removed repo '${repo}'")
-
- return app.status(.ok)
- } else {
- app.linfo("Tried removing '${repo}', but it doesn't exist.")
-
- return app.status(.not_found)
- }
-}
diff --git a/src/server/routes.v b/src/server/routes.v
new file mode 100644
index 0000000..fbf37df
--- /dev/null
+++ b/src/server/routes.v
@@ -0,0 +1,112 @@
+module server
+
+import web
+import os
+import repo
+import time
+import rand
+import util
+import net.http
+import response { new_response }
+
+// healthcheck just returns a string, but can be used to quickly check if the
+// server is still responsive.
+['/health'; get]
+pub fn (mut app App) healthcheck() web.Result {
+ return app.json(http.Status.ok, new_response('Healthy.'))
+}
+
+// get_repo_file handles all Pacman-related routes. It returns both the
+// repository's archives, but also package archives or the contents of a
+// package's desc file.
+['/:repo/:arch/:filename'; get; head]
+fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Result {
+ mut full_path := ''
+
+ db_exts := ['.db', '.files', '.db.tar.gz', '.files.tar.gz']
+
+ // There's no point in having the ability to serve db archives with wrong
+ // filenames
+ if db_exts.any(filename == '$repo$it') {
+ full_path = os.join_path(app.repo.repos_dir, repo, arch, filename)
+
+ // repo-add does this using symlinks, but we just change the requested
+ // path
+ if !full_path.ends_with('.tar.gz') {
+ full_path += '.tar.gz'
+ }
+ } else if filename.contains('.pkg') {
+ full_path = os.join_path(app.repo.pkg_dir, repo, arch, filename)
+ }
+ // Default behavior is to return the desc file for the package, if present.
+ // This can then also be used by the build system to properly check whether
+ // a package is present in an arch-repo.
+ else {
+ full_path = os.join_path(app.repo.repos_dir, repo, arch, filename, 'desc')
+ }
+
+ // Scuffed way to respond to HEAD requests
+ if app.req.method == http.Method.head {
+ if os.exists(full_path) {
+ return app.status(http.Status.ok)
+ }
+
+ return app.not_found()
+ }
+
+ return app.file(full_path)
+}
+
+// put_package handles publishing a package to a repository.
+['/:repo/publish'; post]
+fn (mut app App) put_package(repo string) web.Result {
+ if !app.is_authorized() {
+ return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
+ }
+
+ mut pkg_path := ''
+
+ if length := app.req.header.get(.content_length) {
+ // Generate a random filename for the temp file
+ pkg_path = os.join_path_single(app.repo.pkg_dir, rand.uuid_v4())
+
+ app.ldebug("Uploading $length bytes (${util.pretty_bytes(length.int())}) to '$pkg_path'.")
+
+ // This is used to time how long it takes to upload a file
+ mut sw := time.new_stopwatch(time.StopWatchOptions{ auto_start: true })
+
+ util.reader_to_file(mut app.reader, length.int(), pkg_path) or {
+ app.lwarn("Failed to upload '$pkg_path'")
+
+ return app.json(http.Status.internal_server_error, new_response('Failed to upload file.'))
+ }
+
+ sw.stop()
+ app.ldebug("Upload of '$pkg_path' completed in ${sw.elapsed().seconds():.3}s.")
+ } else {
+ app.lwarn('Tried to upload package without specifying a Content-Length.')
+
+ // length required
+ return app.status(http.Status.length_required)
+ }
+
+ res := app.repo.add_pkg_from_path(repo, pkg_path) or {
+ app.lerror('Error while adding package: $err.msg()')
+
+ os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg()") }
+
+ return app.json(http.Status.internal_server_error, new_response('Failed to add package.'))
+ }
+
+ if !res.added {
+ os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg()") }
+
+ app.lwarn("Duplicate package '$res.pkg.full_name()' in repo '$repo'.")
+
+ return app.json(http.Status.bad_request, new_response('File already exists.'))
+ }
+
+ app.linfo("Added '$res.pkg.full_name()' to repo '$repo ($res.pkg.info.arch)'.")
+
+ return app.json(http.Status.ok, new_response('Package added successfully.'))
+}
diff --git a/src/server/server.v b/src/server/server.v
index e1516fa..1a9df3f 100644
--- a/src/server/server.v
+++ b/src/server/server.v
@@ -5,10 +5,7 @@ import os
import log
import repo
import util
-import dbms
-import build { BuildJobQueue }
-import cron
-import metrics
+import db
const (
log_file_name = 'vieter.log'
@@ -23,34 +20,16 @@ pub:
conf Config [required; web_global]
pub mut:
repo repo.RepoGroupManager [required; web_global]
- // Keys are the various architectures for packages
- job_queue BuildJobQueue [required; web_global]
- db dbms.VieterDb
-}
-
-// init_job_queue populates a fresh job queue with all the targets currently
-// stored in the database.
-fn (mut app App) init_job_queue() ! {
- for target in app.db.targets(limit: 0) {
- app.job_queue.insert_all(target)!
- }
+ db db.VieterDb
}
// server starts the web server & starts listening for requests
-pub fn server(conf Config) ! {
+pub fn server(conf Config) ? {
// Prevent using 'any' as the default arch
if conf.default_arch == 'any' {
util.exit_with_message(1, "'any' is not allowed as the value for default_arch.")
}
- global_ce := cron.parse_expression(conf.global_schedule) or {
- util.exit_with_message(1, 'Invalid global cron expression: ${err.msg()}')
- }
-
- log_removal_ce := cron.parse_expression(conf.log_removal_schedule) or {
- util.exit_with_message(1, 'Invalid log removal cron expression: ${err.msg()}')
- }
-
// Configure logger
log_level := log.level_from_tag(conf.log_level) or {
util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
@@ -82,41 +61,20 @@ pub fn server(conf Config) ! {
repo_dir := os.join_path_single(conf.data_dir, server.repo_dir_name)
// This also creates the directories if needed
- repo_ := repo.new(repo_dir, conf.pkg_dir, conf.default_arch) or {
+ repo := repo.new(repo_dir, conf.pkg_dir, conf.default_arch) or {
logger.error(err.msg())
exit(1)
}
db_file := os.join_path_single(conf.data_dir, server.db_file_name)
- db := dbms.init(db_file) or {
- util.exit_with_message(1, 'Failed to initialize database: ${err.msg()}')
+ db := db.init(db_file) or {
+ util.exit_with_message(1, 'Failed to initialize database: $err.msg()')
}
- mut collector := if conf.collect_metrics {
- &metrics.MetricsCollector(metrics.new_default_collector())
- } else {
- &metrics.MetricsCollector(metrics.new_null_collector())
- }
-
- collector.histogram_buckets_set('http_requests_duration_seconds', [0.001, 0.005, 0.01, 0.05,
- 0.1, 0.5, 1, 5, 10])
-
- mut app := &App{
+ web.run(&App{
logger: logger
- api_key: conf.api_key
conf: conf
- repo: repo_
+ repo: repo
db: db
- collector: collector
- job_queue: build.new_job_queue(global_ce, conf.base_image, conf.default_build_timeout)
- }
- app.init_job_queue() or {
- util.exit_with_message(1, 'Failed to inialize job queue: ${err.msg()}')
- }
-
- if conf.max_log_age > 0 {
- spawn app.log_removal_daemon(log_removal_ce)
- }
-
- web.run(app, conf.port)
+ }, conf.port)
}
diff --git a/src/util/stream.v b/src/util/stream.v
index ef6e872..06397aa 100644
--- a/src/util/stream.v
+++ b/src/util/stream.v
@@ -5,7 +5,7 @@ import io
import os
// reader_to_writer tries to consume the entire reader & write it to the writer.
-pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ! {
+pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ? {
mut buf := []u8{len: 10 * 1024}
for {
@@ -21,8 +21,8 @@ pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ! {
}
// reader_to_file writes the contents of a BufferedReader to a file
-pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ! {
- mut file := os.create(path)!
+pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? {
+ mut file := os.create(path)?
defer {
file.close()
}
@@ -46,16 +46,12 @@ pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ! {
to_write = to_write - bytes_written
}
}
-
- if bytes_left > 0 {
- return error('Not all bytes were received.')
- }
}
-// match_array_in_array[T] returns how many elements of a2 overlap with a1. For
+// match_array_in_array returns how many elements of a2 overlap with a1. For
// example, if a1 = "abcd" & a2 = "cd", the result will be 2. If the match is
// not at the end of a1, the result is 0.
-pub fn match_array_in_array[T](a1 []T, a2 []T) int {
+pub fn match_array_in_array(a1 []T, a2 []T) int {
mut i := 0
mut match_len := 0
@@ -73,11 +69,11 @@ pub fn match_array_in_array[T](a1 []T, a2 []T) int {
// read_until_separator consumes an io.Reader until it encounters some
// separator array. The data read is stored inside the provided res array.
-pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ! {
+pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ? {
mut buf := []u8{len: sep.len}
for {
- c := reader.read(mut buf)!
+ c := reader.read(mut buf)?
res << buf[..c]
match_len := match_array_in_array(buf[..c], sep)
@@ -88,7 +84,7 @@ pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ! {
if match_len > 0 {
match_left := sep.len - match_len
- c2 := reader.read(mut buf[..match_left])!
+ c2 := reader.read(mut buf[..match_left])?
res << buf[..c2]
if buf[..c2] == sep[match_len..] {
diff --git a/src/util/util.v b/src/util/util.v
index 213104c..4cd374e 100644
--- a/src/util/util.v
+++ b/src/util/util.v
@@ -23,7 +23,7 @@ pub fn exit_with_message(code int, msg string) {
}
// hash_file returns the sha256 hash of a given file
-pub fn hash_file(path &string) !string {
+pub fn hash_file(path &string) ?string {
file := os.open(path) or { return error('Failed to open file.') }
mut sha256sum := sha256.new()
@@ -39,7 +39,7 @@ pub fn hash_file(path &string) !string {
// This function never actually fails, but returns an option to follow
// the Writer interface.
- sha256sum.write(buf[..bytes_read])!
+ sha256sum.write(buf[..bytes_read])?
}
return sha256sum.checksum().hex()
diff --git a/src/v.mod b/src/v.mod
index 461af6a..5b89062 100644
--- a/src/v.mod
+++ b/src/v.mod
@@ -1,8 +1,6 @@
Module {
dependencies: [
'https://git.rustybever.be/vieter-v/conf',
- 'https://git.rustybever.be/vieter-v/docker',
- 'https://git.rustybever.be/vieter-v/aur',
- 'https://git.rustybever.be/vieter-v/metrics'
+ 'https://git.rustybever.be/vieter-v/docker'
]
}
diff --git a/src/web/consts.v b/src/web/consts.v
deleted file mode 100644
index df8cdb2..0000000
--- a/src/web/consts.v
+++ /dev/null
@@ -1,133 +0,0 @@
-module web
-
-import net.http
-
-// A dummy structure that returns from routes to indicate that you actually sent something to a user
-[noinit]
-pub struct Result {}
-
-pub const (
- methods_with_form = [http.Method.post, .put, .patch]
- headers_close = http.new_custom_header_from_map({
- 'Server': 'Vieter'
- http.CommonHeader.connection.str(): 'close'
- }) or { panic('should never fail') }
-
- http_302 = http.new_response(
- status: .found
- body: '302 Found'
- header: headers_close
- )
- http_400 = http.new_response(
- status: .bad_request
- body: '400 Bad Request'
- header: http.new_header(
- key: .content_type
- value: 'text/plain'
- ).join(headers_close)
- )
- http_401 = http.new_response(
- status: .unauthorized
- body: '401 Unauthorized'
- header: http.new_header(
- key: .content_type
- value: 'text/plain'
- ).join(headers_close)
- )
- http_404 = http.new_response(
- status: .not_found
- body: '404 Not Found'
- header: http.new_header(
- key: .content_type
- value: 'text/plain'
- ).join(headers_close)
- )
- http_500 = http.new_response(
- status: .internal_server_error
- body: '500 Internal Server Error'
- header: http.new_header(
- key: .content_type
- value: 'text/plain'
- ).join(headers_close)
- )
- mime_types = {
- '.aac': 'audio/aac'
- '.abw': 'application/x-abiword'
- '.arc': 'application/x-freearc'
- '.avi': 'video/x-msvideo'
- '.azw': 'application/vnd.amazon.ebook'
- '.bin': 'application/octet-stream'
- '.bmp': 'image/bmp'
- '.bz': 'application/x-bzip'
- '.bz2': 'application/x-bzip2'
- '.cda': 'application/x-cdf'
- '.csh': 'application/x-csh'
- '.css': 'text/css'
- '.csv': 'text/csv'
- '.doc': 'application/msword'
- '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
- '.eot': 'application/vnd.ms-fontobject'
- '.epub': 'application/epub+zip'
- '.gz': 'application/gzip'
- '.gif': 'image/gif'
- '.htm': 'text/html'
- '.html': 'text/html'
- '.ico': 'image/vnd.microsoft.icon'
- '.ics': 'text/calendar'
- '.jar': 'application/java-archive'
- '.jpeg': 'image/jpeg'
- '.jpg': 'image/jpeg'
- '.js': 'text/javascript'
- '.json': 'application/json'
- '.jsonld': 'application/ld+json'
- '.mid': 'audio/midi audio/x-midi'
- '.midi': 'audio/midi audio/x-midi'
- '.mjs': 'text/javascript'
- '.mp3': 'audio/mpeg'
- '.mp4': 'video/mp4'
- '.mpeg': 'video/mpeg'
- '.mpkg': 'application/vnd.apple.installer+xml'
- '.odp': 'application/vnd.oasis.opendocument.presentation'
- '.ods': 'application/vnd.oasis.opendocument.spreadsheet'
- '.odt': 'application/vnd.oasis.opendocument.text'
- '.oga': 'audio/ogg'
- '.ogv': 'video/ogg'
- '.ogx': 'application/ogg'
- '.opus': 'audio/opus'
- '.otf': 'font/otf'
- '.png': 'image/png'
- '.pdf': 'application/pdf'
- '.php': 'application/x-httpd-php'
- '.ppt': 'application/vnd.ms-powerpoint'
- '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation'
- '.rar': 'application/vnd.rar'
- '.rtf': 'application/rtf'
- '.sh': 'application/x-sh'
- '.svg': 'image/svg+xml'
- '.swf': 'application/x-shockwave-flash'
- '.tar': 'application/x-tar'
- '.tif': 'image/tiff'
- '.tiff': 'image/tiff'
- '.ts': 'video/mp2t'
- '.ttf': 'font/ttf'
- '.txt': 'text/plain'
- '.vsd': 'application/vnd.visio'
- '.wav': 'audio/wav'
- '.weba': 'audio/webm'
- '.webm': 'video/webm'
- '.webp': 'image/webp'
- '.woff': 'font/woff'
- '.woff2': 'font/woff2'
- '.xhtml': 'application/xhtml+xml'
- '.xls': 'application/vnd.ms-excel'
- '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
- '.xml': 'application/xml'
- '.xul': 'application/vnd.mozilla.xul+xml'
- '.zip': 'application/zip'
- '.3gp': 'video/3gpp'
- '.3g2': 'video/3gpp2'
- '.7z': 'application/x-7z-compressed'
- }
- max_http_post_size = 1024 * 1024
- default_port = 8080
-)
diff --git a/src/web/logging.v b/src/web/logging.v
index 7ba649c..fc697ff 100644
--- a/src/web/logging.v
+++ b/src/web/logging.v
@@ -1,36 +1,35 @@
module web
-// lfatal create a log message with the fatal level
-pub fn (mut ctx Context) lfatal(msg string) {
+import log
+
+// log reate a log message with the given level
+pub fn (mut ctx Context) log(msg &string, level log.Level) {
lock ctx.logger {
- ctx.logger.fatal(msg)
+ ctx.logger.send_output(msg, level)
}
}
+// lfatal create a log message with the fatal level
+pub fn (mut ctx Context) lfatal(msg &string) {
+ ctx.log(msg, log.Level.fatal)
+}
+
// lerror create a log message with the error level
-pub fn (mut ctx Context) lerror(msg string) {
- lock ctx.logger {
- ctx.logger.error(msg)
- }
+pub fn (mut ctx Context) lerror(msg &string) {
+ ctx.log(msg, log.Level.error)
}
// lwarn create a log message with the warn level
-pub fn (mut ctx Context) lwarn(msg string) {
- lock ctx.logger {
- ctx.logger.warn(msg)
- }
+pub fn (mut ctx Context) lwarn(msg &string) {
+ ctx.log(msg, log.Level.warn)
}
// linfo create a log message with the info level
-pub fn (mut ctx Context) linfo(msg string) {
- lock ctx.logger {
- ctx.logger.info(msg)
- }
+pub fn (mut ctx Context) linfo(msg &string) {
+ ctx.log(msg, log.Level.info)
}
// ldebug create a log message with the debug level
-pub fn (mut ctx Context) ldebug(msg string) {
- lock ctx.logger {
- ctx.logger.debug(msg)
- }
+pub fn (mut ctx Context) ldebug(msg &string) {
+ ctx.log(msg, log.Level.debug)
}
diff --git a/src/web/parse.v b/src/web/parse.v
index 9e26f85..a095f0c 100644
--- a/src/web/parse.v
+++ b/src/web/parse.v
@@ -3,14 +3,10 @@ module web
import net.urllib
import net.http
-// Method attributes that should be ignored when parsing, as they're used
-// elsewhere.
-const attrs_to_ignore = ['auth', 'markused']
-
// Parsing function attributes for methods and path.
-fn parse_attrs(name string, attrs []string) !([]http.Method, string) {
+fn parse_attrs(name string, attrs []string) ?([]http.Method, string) {
if attrs.len == 0 {
- return [http.Method.get], '/${name}'
+ return [http.Method.get], '/$name'
}
mut x := attrs.clone()
@@ -36,7 +32,7 @@ fn parse_attrs(name string, attrs []string) !([]http.Method, string) {
}
i++
}
- if x.len > 0 && x.any(!web.attrs_to_ignore.contains(it)) {
+ if x.len > 0 {
return IError(http.UnexpectedExtraAttributeError{
attributes: x
})
@@ -45,7 +41,7 @@ fn parse_attrs(name string, attrs []string) !([]http.Method, string) {
methods = [http.Method.get]
}
if path == '' {
- path = '/${name}'
+ path = '/$name'
}
// Make path lowercase for case-insensitive comparisons
return methods, path.to_lower()
@@ -61,7 +57,7 @@ fn parse_query_from_url(url urllib.URL) map[string]string {
}
// Extract form data from an HTTP request.
-fn parse_form_from_request(request http.Request) !(map[string]string, map[string][]http.FileData) {
+fn parse_form_from_request(request http.Request) ?(map[string]string, map[string][]http.FileData) {
mut form := map[string]string{}
mut files := map[string][]http.FileData{}
if request.method in methods_with_form {
diff --git a/src/web/response/response.v b/src/web/response/response.v
deleted file mode 100644
index c1475ff..0000000
--- a/src/web/response/response.v
+++ /dev/null
@@ -1,34 +0,0 @@
-module response
-
-pub struct Response[T] {
-pub:
- message string
- data T
-}
-
-// new_response constructs a new Response object with the given message
-// & an empty data field.
-pub fn new_response(message string) Response[string] {
- return Response[string]{
- message: message
- data: ''
- }
-}
-
-// new_data_response[T] constructs a new Response object with the given data
-// & an empty message field.
-pub fn new_data_response[T](data T) Response[T] {
- return Response[T]{
- message: ''
- data: data
- }
-}
-
-// new_full_response[T] constructs a new Response object with the given
-// message & data.
-pub fn new_full_response[T](message string, data T) Response[T] {
- return Response[T]{
- message: message
- data: data
- }
-}
diff --git a/src/web/web.v b/src/web/web.v
index 775354a..b053904 100644
--- a/src/web/web.v
+++ b/src/web/web.v
@@ -11,29 +11,147 @@ import net.urllib
import time
import json
import log
-import metrics
+
+// A dummy structure that returns from routes to indicate that you actually sent something to a user
+[noinit]
+pub struct Result {}
+
+pub const (
+ methods_with_form = [http.Method.post, .put, .patch]
+ headers_close = http.new_custom_header_from_map({
+ 'Server': 'VWeb'
+ http.CommonHeader.connection.str(): 'close'
+ }) or { panic('should never fail') }
+
+ http_302 = http.new_response(
+ status: .found
+ body: '302 Found'
+ header: headers_close
+ )
+ http_400 = http.new_response(
+ status: .bad_request
+ body: '400 Bad Request'
+ header: http.new_header(
+ key: .content_type
+ value: 'text/plain'
+ ).join(headers_close)
+ )
+ http_404 = http.new_response(
+ status: .not_found
+ body: '404 Not Found'
+ header: http.new_header(
+ key: .content_type
+ value: 'text/plain'
+ ).join(headers_close)
+ )
+ http_500 = http.new_response(
+ status: .internal_server_error
+ body: '500 Internal Server Error'
+ header: http.new_header(
+ key: .content_type
+ value: 'text/plain'
+ ).join(headers_close)
+ )
+ mime_types = {
+ '.aac': 'audio/aac'
+ '.abw': 'application/x-abiword'
+ '.arc': 'application/x-freearc'
+ '.avi': 'video/x-msvideo'
+ '.azw': 'application/vnd.amazon.ebook'
+ '.bin': 'application/octet-stream'
+ '.bmp': 'image/bmp'
+ '.bz': 'application/x-bzip'
+ '.bz2': 'application/x-bzip2'
+ '.cda': 'application/x-cdf'
+ '.csh': 'application/x-csh'
+ '.css': 'text/css'
+ '.csv': 'text/csv'
+ '.doc': 'application/msword'
+ '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
+ '.eot': 'application/vnd.ms-fontobject'
+ '.epub': 'application/epub+zip'
+ '.gz': 'application/gzip'
+ '.gif': 'image/gif'
+ '.htm': 'text/html'
+ '.html': 'text/html'
+ '.ico': 'image/vnd.microsoft.icon'
+ '.ics': 'text/calendar'
+ '.jar': 'application/java-archive'
+ '.jpeg': 'image/jpeg'
+ '.jpg': 'image/jpeg'
+ '.js': 'text/javascript'
+ '.json': 'application/json'
+ '.jsonld': 'application/ld+json'
+ '.mid': 'audio/midi audio/x-midi'
+ '.midi': 'audio/midi audio/x-midi'
+ '.mjs': 'text/javascript'
+ '.mp3': 'audio/mpeg'
+ '.mp4': 'video/mp4'
+ '.mpeg': 'video/mpeg'
+ '.mpkg': 'application/vnd.apple.installer+xml'
+ '.odp': 'application/vnd.oasis.opendocument.presentation'
+ '.ods': 'application/vnd.oasis.opendocument.spreadsheet'
+ '.odt': 'application/vnd.oasis.opendocument.text'
+ '.oga': 'audio/ogg'
+ '.ogv': 'video/ogg'
+ '.ogx': 'application/ogg'
+ '.opus': 'audio/opus'
+ '.otf': 'font/otf'
+ '.png': 'image/png'
+ '.pdf': 'application/pdf'
+ '.php': 'application/x-httpd-php'
+ '.ppt': 'application/vnd.ms-powerpoint'
+ '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation'
+ '.rar': 'application/vnd.rar'
+ '.rtf': 'application/rtf'
+ '.sh': 'application/x-sh'
+ '.svg': 'image/svg+xml'
+ '.swf': 'application/x-shockwave-flash'
+ '.tar': 'application/x-tar'
+ '.tif': 'image/tiff'
+ '.tiff': 'image/tiff'
+ '.ts': 'video/mp2t'
+ '.ttf': 'font/ttf'
+ '.txt': 'text/plain'
+ '.vsd': 'application/vnd.visio'
+ '.wav': 'audio/wav'
+ '.weba': 'audio/webm'
+ '.webm': 'video/webm'
+ '.webp': 'image/webp'
+ '.woff': 'font/woff'
+ '.woff2': 'font/woff2'
+ '.xhtml': 'application/xhtml+xml'
+ '.xls': 'application/vnd.ms-excel'
+ '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
+ '.xml': 'application/xml'
+ '.xul': 'application/vnd.mozilla.xul+xml'
+ '.zip': 'application/zip'
+ '.3gp': 'video/3gpp'
+ '.3g2': 'video/3gpp2'
+ '.7z': 'application/x-7z-compressed'
+ }
+ max_http_post_size = 1024 * 1024
+ default_port = 8080
+)
// The Context struct represents the Context which hold the HTTP request and response.
// It has fields for the query, form, files.
pub struct Context {
+mut:
+ content_type string = 'text/plain'
+ status http.Status = http.Status.ok
pub:
// HTTP Request
req http.Request
- // API key used when authenticating requests
- api_key string
// TODO Response
pub mut:
- // TCP connection to client.
- // But beware, do not store it for further use, after request processing web will close connection.
- conn &net.TcpConn = unsafe { nil }
- // Gives access to a shared logger object
- logger shared log.Log
- // Used to collect metrics on the web server
- collector &metrics.MetricsCollector
+ done bool
// time.ticks() from start of web connection handle.
// You can use it to determine how much time is spent on your request.
page_gen_start i64
- // REQUEST
+ // TCP connection to client.
+ // But beware, do not store it for further use, after request processing web will close connection.
+ conn &net.TcpConn
static_files map[string]string
static_mime_types map[string]string
// Map containing query params for the route.
@@ -43,13 +161,14 @@ pub mut:
form map[string]string
// Files from multipart-form.
files map[string][]http.FileData
+
+ header http.Header // response headers
+ // ? It doesn't seem to be used anywhere
+ form_error string
// Allows reading the request body
- reader &io.BufferedReader = unsafe { nil }
- // RESPONSE
- status http.Status = http.Status.ok
- content_type string = 'text/plain'
- // response headers
- header http.Header
+ reader io.BufferedReader
+ // Gives access to a shared logger object
+ logger shared log.Log
}
struct FileData {
@@ -69,101 +188,50 @@ struct Route {
// Probably you can use it for check user session cookie or add header.
pub fn (ctx Context) before_request() {}
-// send_string writes the given string to the TCP connection socket.
-fn (mut ctx Context) send_string(s string) ! {
- ctx.conn.write(s.bytes())!
+// send_string
+fn send_string(mut conn net.TcpConn, s string) ? {
+ conn.write(s.bytes())?
}
-// send_reader reads at most `size` bytes from the given reader & writes them
-// to the TCP connection socket. Internally, a 10KB buffer is used, to avoid
-// having to store all bytes in memory at once.
-fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ! {
- mut buf := []u8{len: 10_000}
- mut bytes_left := size
-
- // Repeat as long as the stream still has data
- for bytes_left > 0 {
- bytes_read := reader.read(mut buf)!
- bytes_left -= u64(bytes_read)
-
- mut to_write := bytes_read
-
- for to_write > 0 {
- bytes_written := ctx.conn.write(buf[bytes_read - to_write..bytes_read]) or { break }
-
- to_write = to_write - bytes_written
- }
+// send_response_to_client sends a response to the client
+[manualfree]
+pub fn (mut ctx Context) send_response_to_client(mimetype string, res string) bool {
+ if ctx.done {
+ return false
}
-}
+ ctx.done = true
-// send_custom_response sends the given http.Response to the client. It can be
-// used to overwrite the Context object & send a completely custom
-// http.Response instead.
-fn (mut ctx Context) send_custom_response(resp &http.Response) ! {
- ctx.send_string(resp.bytestr())!
-}
+ // build header
+ header := http.new_header_from_map({
+ http.CommonHeader.content_type: mimetype
+ http.CommonHeader.content_length: res.len.str()
+ }).join(ctx.header)
-// send_response_header constructs a valid HTTP response with an empty body &
-// sends it to the client.
-pub fn (mut ctx Context) send_response_header() ! {
- mut resp := http.new_response(
- header: ctx.header.join(headers_close)
- )
- resp.header.add(.content_type, ctx.content_type)
+ mut resp := http.Response{
+ header: header.join(web.headers_close)
+ body: res
+ }
+ resp.set_version(.v1_1)
resp.set_status(ctx.status)
-
- ctx.send_custom_response(resp)!
-}
-
-// send is a convenience function for sending the HTTP response with an empty
-// body.
-pub fn (mut ctx Context) send() bool {
- return ctx.send_response('')
-}
-
-// send_response constructs the resulting HTTP response with the given body
-// string & sends it to the client.
-pub fn (mut ctx Context) send_response(res string) bool {
- ctx.send_response_header() or { return false }
- ctx.send_string(res) or { return false }
-
+ send_string(mut ctx.conn, resp.bytestr()) or { return false }
return true
}
-// send_reader_response constructs the resulting HTTP response with the given
-// body & streams the reader's contents to the client.
-pub fn (mut ctx Context) send_reader_response(mut reader io.Reader, size u64) bool {
- ctx.send_response_header() or { return false }
- ctx.send_reader(mut reader, size) or { return false }
-
- return true
-}
-
-// is_authenticated checks whether the request passes a correct API key.
-pub fn (ctx &Context) is_authenticated() bool {
- if provided_key := ctx.req.header.get_custom('X-Api-Key') {
- return provided_key == ctx.api_key
- }
-
- return false
-}
-
-// body sends the given body as an HTTP response.
-pub fn (mut ctx Context) body(status http.Status, content_type string, body string) Result {
+// text responds to a request with some plaintext.
+pub fn (mut ctx Context) text(status http.Status, s string) Result {
ctx.status = status
- ctx.content_type = content_type
- ctx.send_response(body)
+
+ ctx.send_response_to_client('text/plain', s)
return Result{}
}
-// json[T] HTTP_OK with json_s as payload with content-type `application/json`
-pub fn (mut ctx Context) json[T](status http.Status, j T) Result {
+// json HTTP_OK with json_s as payload with content-type `application/json`
+pub fn (mut ctx Context) json(status http.Status, j T) Result {
ctx.status = status
- ctx.content_type = 'application/json'
json_s := json.encode(j)
- ctx.send_response(json_s)
+ ctx.send_response_to_client('application/json', json_s)
return Result{}
}
@@ -171,121 +239,135 @@ pub fn (mut ctx Context) json[T](status http.Status, j T) Result {
// file Response HTTP_OK with file as payload
// This function manually implements responses because it needs to stream the file contents
pub fn (mut ctx Context) file(f_path string) Result {
- // If the file doesn't exist, just respond with a 404
+ if ctx.done {
+ return Result{}
+ }
+
if !os.is_file(f_path) {
- ctx.status = .not_found
- ctx.send()
-
- return Result{}
+ return ctx.not_found()
}
- ctx.header.add(.accept_ranges, 'bytes')
+ // ext := os.file_ext(f_path)
+ // data := os.read_file(f_path) or {
+ // eprint(err.msg())
+ // ctx.server_error(500)
+ // return Result{}
+ // }
+ // content_type := web.mime_types[ext]
+ // if content_type == '' {
+ // eprintln('no MIME type found for extension $ext')
+ // ctx.server_error(500)
+ // return Result{}
+ // }
+
+ // First, we return the headers for the request
+
+ // We open the file before sending the headers in case reading fails
file_size := os.file_size(f_path)
- ctx.header.add(http.CommonHeader.content_length, file_size.str())
- // A HEAD request only returns the size of the file.
- if ctx.req.method == .head {
- ctx.send()
-
- return Result{}
- }
-
- mut file := os.open(f_path) or {
+ file := os.open(f_path) or {
eprintln(err.msg())
ctx.server_error(500)
return Result{}
}
- defer {
- file.close()
- }
-
- // Currently, this only supports a single provided range, e.g.
- // bytes=0-1023, and not multiple ranges, e.g. bytes=0-50, 100-150
- if range_str := ctx.req.header.get(.range) {
- mut parts := range_str.split_nth('=', 2)
-
- // We only support the 'bytes' range type
- if parts[0] != 'bytes' {
- ctx.status = .requested_range_not_satisfiable
- ctx.header.delete(.content_length)
- ctx.send()
- return Result{}
- }
-
- parts = parts[1].split_nth('-', 2)
-
- start := parts[0].i64()
- end := if parts[1] == '' { file_size - 1 } else { parts[1].u64() }
-
- // Either the actual number 0 or the result of an invalid integer
- if end == 0 {
- ctx.status = .requested_range_not_satisfiable
- ctx.header.delete(.content_length)
- ctx.send()
- return Result{}
- }
-
- // Move cursor to start of data to read
- file.seek(start, .start) or {
- ctx.server_error(500)
- return Result{}
- }
-
- length := end - u64(start) + 1
-
- ctx.status = .partial_content
- ctx.header.set(.content_length, length.str())
- ctx.send_reader_response(mut file, length)
- } else {
- ctx.send_reader_response(mut file, file_size)
+ // build header
+ header := http.new_header_from_map({
+ // http.CommonHeader.content_type: content_type
+ http.CommonHeader.content_length: file_size.str()
+ }).join(ctx.header)
+
+ mut resp := http.Response{
+ header: header.join(web.headers_close)
+ }
+ resp.set_version(.v1_1)
+ resp.set_status(ctx.status)
+ send_string(mut ctx.conn, resp.bytestr()) or { return Result{} }
+
+ mut buf := []u8{len: 1_000_000}
+ mut bytes_left := file_size
+
+ // Repeat as long as the stream still has data
+ for bytes_left > 0 {
+ // TODO check if just breaking here is safe
+ bytes_read := file.read(mut buf) or { break }
+ bytes_left -= u64(bytes_read)
+
+ mut to_write := bytes_read
+
+ for to_write > 0 {
+ // TODO don't just loop infinitely here
+ bytes_written := ctx.conn.write(buf[bytes_read - to_write..bytes_read]) or { continue }
+
+ to_write = to_write - bytes_written
+ }
}
+ ctx.done = true
return Result{}
}
// status responds with an empty textual response, essentially only returning
// the given status code.
pub fn (mut ctx Context) status(status http.Status) Result {
- ctx.status = status
- ctx.send()
-
- return Result{}
+ return ctx.text(status, '')
}
// server_error Response a server error
pub fn (mut ctx Context) server_error(ecode int) Result {
- ctx.send_custom_response(http_500) or {}
-
+ $if debug {
+ eprintln('> ctx.server_error ecode: $ecode')
+ }
+ if ctx.done {
+ return Result{}
+ }
+ send_string(mut ctx.conn, web.http_500.bytestr()) or {}
return Result{}
}
// redirect Redirect to an url
pub fn (mut ctx Context) redirect(url string) Result {
- mut resp := http_302
+ if ctx.done {
+ return Result{}
+ }
+ ctx.done = true
+ mut resp := web.http_302
resp.header = resp.header.join(ctx.header)
resp.header.add(.location, url)
-
- ctx.send_custom_response(resp) or {}
-
+ send_string(mut ctx.conn, resp.bytestr()) or { return Result{} }
return Result{}
}
+// not_found Send an not_found response
+pub fn (mut ctx Context) not_found() Result {
+ return ctx.status(http.Status.not_found)
+}
+
+// add_header Adds an header to the response with key and val
+pub fn (mut ctx Context) add_header(key string, val string) {
+ ctx.header.add_custom(key, val) or {}
+}
+
+// get_header Returns the header data from the key
+pub fn (ctx &Context) get_header(key string) string {
+ return ctx.req.header.get_custom(key) or { '' }
+}
+
interface DbInterface {
db voidptr
}
// run runs the app
[manualfree]
-pub fn run[T](global_app &T, port int) {
- mut l := net.listen_tcp(.ip6, ':${port}') or { panic('failed to listen ${err.code()} ${err}') }
+pub fn run(global_app &T, port int) {
+ mut l := net.listen_tcp(.ip6, ':$port') or { panic('failed to listen $err.code() $err') }
// Parsing methods attributes
mut routes := map[string]Route{}
$for method in T.methods {
http_methods, route_path := parse_attrs(method.name, method.attrs) or {
- eprintln('error parsing method attributes: ${err}')
+ eprintln('error parsing method attributes: $err')
return
}
@@ -294,7 +376,7 @@ pub fn run[T](global_app &T, port int) {
path: route_path
}
}
- println('[Vweb] Running app on http://localhost:${port}')
+ println('[Vweb] Running app on http://localhost:$port')
for {
// Create a new app object for each connection, copy global data like db connections
mut request_app := &T{}
@@ -311,16 +393,16 @@ pub fn run[T](global_app &T, port int) {
request_app.Context = global_app.Context // copy the context ref that contains static files map etc
mut conn := l.accept() or {
// failures should not panic
- eprintln('accept() failed with error: ${err.msg()}')
+ eprintln('accept() failed with error: $err.msg()')
continue
}
- spawn handle_conn[T](mut conn, mut request_app, routes)
+ go handle_conn(mut conn, mut request_app, routes)
}
}
// handle_conn handles a connection
[manualfree]
-fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
+fn handle_conn(mut conn net.TcpConn, mut app T, routes map[string]Route) {
conn.set_read_timeout(30 * time.second)
conn.set_write_timeout(30 * time.second)
@@ -331,23 +413,6 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
app.logger.flush()
}
- // Record how long request took to process
- path := urllib.parse(app.req.url) or { urllib.URL{} }.path
- labels := [
- ['method', app.req.method.str()]!,
- ['path', path]!,
- // Not all methods properly set this value yet I think
- ['status', app.status.int().str()]!,
- ]
- app.collector.counter_increment(name: 'http_requests_total', labels: labels)
- // Prometheus prefers metrics containing base units, as defined here
- // https://prometheus.io/docs/practices/naming/
- app.collector.histogram_record(f64(time.ticks() - app.page_gen_start) / 1000,
-
- name: 'http_requests_duration_seconds'
- labels: labels
- )
-
unsafe {
free(app)
}
@@ -363,8 +428,8 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
// Request parse
head := http.parse_request_head(mut reader) or {
// Prevents errors from being thrown when BufferedReader is empty
- if '${err}' != 'none' {
- eprintln('error parsing request head: ${err}')
+ if '$err' != 'none' {
+ eprintln('error parsing request head: $err')
}
return
}
@@ -372,7 +437,7 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
// The healthcheck spams the logs, which isn't very useful
if head.url != '/health' {
lock app.logger {
- app.logger.debug('${head.method} ${head.url} ${head.version}')
+ app.logger.debug('$head.method $head.url $head.version')
}
}
@@ -386,7 +451,7 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
// URL Parse
url := urllib.parse(head.url) or {
- eprintln('error parsing path: ${err}')
+ eprintln('error parsing path: $err')
return
}
@@ -413,8 +478,6 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
static_mime_types: app.static_mime_types
reader: reader
logger: app.logger
- collector: app.collector
- api_key: app.api_key
}
// Calling middleware...
@@ -424,7 +487,7 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
$for method in T.methods {
$if method.return_type is Result {
route := routes[method.name] or {
- eprintln('parsed attributes for the `${method.name}` are not found, skipping...')
+ eprintln('parsed attributes for the `$method.name` are not found, skipping...')
Route{}
}
@@ -433,30 +496,34 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
// Used for route matching
route_words := route.path.split('/').filter(it != '')
- // Route immediate matches & index files first
+ // Route immediate matches first
// For example URL `/register` matches route `/:user`, but `fn register()`
// should be called first.
- if (!route.path.contains('/:') && url_words == route_words)
- || (url_words.len == 0 && route_words == ['index'] && method.name == 'index') {
- // Check whether the request is authorised
- if 'auth' in method.attrs && !app.is_authenticated() {
- conn.write(http_401.bytes()) or {}
- return
- }
-
+ if !route.path.contains('/:') && url_words == route_words {
// We found a match
+ if head.method == .post && method.args.len > 0 {
+ // TODO implement POST requests
+ // Populate method args with form values
+ // mut args := []string{cap: method.args.len}
+ // for param in method.args {
+ // args << form[param.name]
+ // }
+ // app.$method(args)
+ } else {
+ app.$method()
+ }
+ return
+ }
+
+ if url_words.len == 0 && route_words == ['index'] && method.name == 'index' {
app.$method()
return
- } else if params := route_matches(url_words, route_words) {
- // Check whether the request is authorised
- if 'auth' in method.attrs && !app.is_authenticated() {
- conn.write(http_401.bytes()) or {}
- return
- }
+ }
+ if params := route_matches(url_words, route_words) {
method_args := params.clone()
if method_args.len != method.args.len {
- eprintln('warning: uneven parameters count (${method.args.len}) in `${method.name}`, compared to the web route `${method.attrs}` (${method_args.len})')
+ eprintln('warning: uneven parameters count ($method.args.len) in `$method.name`, compared to the web route `$method.attrs` ($method_args.len)')
}
app.$method(method_args)
return
@@ -465,7 +532,7 @@ fn handle_conn[T](mut conn net.TcpConn, mut app T, routes map[string]Route) {
}
}
// Route not found
- conn.write(http_404.bytes()) or {}
+ conn.write(web.http_404.bytes()) or {}
}
// route_matches returns wether a route matches
@@ -511,6 +578,28 @@ fn route_matches(url_words []string, route_words []string) ?[]string {
return params
}
+// ip Returns the ip address from the current user
+pub fn (ctx &Context) ip() string {
+ mut ip := ctx.req.header.get(.x_forwarded_for) or { '' }
+ if ip == '' {
+ ip = ctx.req.header.get_custom('X-Real-Ip') or { '' }
+ }
+
+ if ip.contains(',') {
+ ip = ip.all_before(',')
+ }
+ if ip == '' {
+ ip = ctx.conn.peer_ip() or { '' }
+ }
+ return ip
+}
+
+// error Set s to the form error
+pub fn (mut ctx Context) error(s string) {
+ println('web error: $s')
+ ctx.form_error = s
+}
+
// filter Do not delete.
// It used by `vlib/v/gen/c/str_intp.v:130` for string interpolation inside web templates
// TODO: move it to template render
diff --git a/vieter.toml b/vieter.toml
index 34b4f4e..d3922a4 100644
--- a/vieter.toml
+++ b/vieter.toml
@@ -4,14 +4,11 @@ data_dir = "data"
pkg_dir = "data/pkgs"
log_level = "DEBUG"
default_arch = "x86_64"
-arch = "x86_64"
address = "http://localhost:8000"
-# global_schedule = '* *'
+global_schedule = '* *'
api_update_frequency = 2
image_rebuild_frequency = 1
max_concurrent_builds = 3
-# max_log_age = 64
-log_removal_schedule = '* * *'
-collect_metrics = true
+