Merge pull request 'Release 0.3.0-alpha.2' (#185) from release-0.3.0-alpha.2 into main

Reviewed-on: vieter/vieter#185
main 0.3.0-alpha.2
Jef Roosens 2022-05-18 07:56:03 +02:00
commit 7627b28bcf
48 changed files with 1398 additions and 427 deletions

View File

@ -0,0 +1,39 @@
matrix:
PLATFORM:
- linux/amd64
- linux/arm64
platform: ${PLATFORM}
branches: [main]
skip_clone: true
pipeline:
build:
image: 'menci/archlinuxarm:base-devel'
commands:
# Add the vieter repository so we can use the compiler
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
# Update packages
- pacman -Syu --noconfirm
# Create non-root user to perform build & switch to their home
- groupadd -g 1000 builder
- useradd -mg builder builder
- chown -R builder:builder "$PWD"
- "echo 'builder ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers"
- su builder
# Due to a bug with the V compiler, we can't just use the PKGBUILD from
# inside the repo
- curl -OL "https://git.rustybever.be/vieter/vieter/raw/tag/$CI_COMMIT_TAG/PKGBUILD"
- makepkg -s --noconfirm --needed
when:
event: tag
publish:
image: 'curlimages/curl'
commands:
# Publish the package
- 'for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $VIETER_API_KEY" https://arch.r8r.be/vieter/publish; done'
secrets:
- vieter_api_key
when:
event: tag

View File

@ -23,7 +23,7 @@ pipeline:
- su builder - su builder
# Due to a bug with the V compiler, we can't just use the PKGBUILD from # Due to a bug with the V compiler, we can't just use the PKGBUILD from
# inside the repo # inside the repo
- curl -OL https://git.rustybever.be/vieter/vieter/raw/branch/dev/PKGBUILD - curl -o PKGBUILD -L https://git.rustybever.be/vieter/vieter/raw/branch/dev/PKGBUILD.dev
- makepkg -s --noconfirm --needed - makepkg -s --noconfirm --needed
when: when:
event: push event: push

View File

@ -11,7 +11,9 @@ pipeline:
- 'docker_password' - 'docker_password'
settings: settings:
repo: 'chewingbever/vieter' repo: 'chewingbever/vieter'
tag: 'dev' tags:
- 'dev'
- ${CI_COMMIT_SHA}
platforms: [ 'linux/arm64/v8', 'linux/amd64' ] platforms: [ 'linux/arm64/v8', 'linux/amd64' ]
build_args_from_env: build_args_from_env:
- 'CI_COMMIT_SHA' - 'CI_COMMIT_SHA'

View File

@ -5,6 +5,33 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased](https://git.rustybever.be/vieter/vieter/src/branch/dev)
## [0.3.0-alpha.2](https://git.rustybever.be/vieter/vieter/src/tag/0.3.0-alpha.2)
### Added
* Web API for adding & querying build logs
* CLI commands to access build logs API
* Cron build logs are uploaded to above API
* Proper ASCII table output in CLI
* `vieter repos build id` command to run builds locally
### Removed
* `vieter build` command
* This command was used alongside cron for periodic builds, but this has
been replaced by `vieter cron`
### Changed
* `vieter build` command now only builds a single repository & uploads the
build logs
* Official Arch packages are now split between `vieter` & `vieter-git`
* `vieter` is the latest release
* `vieter-git` is the latest commit on the dev branch
* Full refactor of Docker socket code
## [0.3.0-alpha.1](https://git.rustybever.be/vieter/vieter/src/tag/0.3.0-alpha.1) ## [0.3.0-alpha.1](https://git.rustybever.be/vieter/vieter/src/tag/0.3.0-alpha.1)
### Changed ### Changed

View File

@ -36,15 +36,8 @@ ENV PATH=/bin \
COPY --from=builder /app/dumb-init /app/vieter /bin/ COPY --from=builder /app/dumb-init /app/vieter /bin/
HEALTHCHECK --interval=30s \
--timeout=3s \
--start-period=5s \
CMD /bin/wget --spider http://localhost:8000/health || exit 1
RUN mkdir /data && \ RUN mkdir /data && \
chown -R www-data:www-data /data && \ chown -R www-data:www-data /data
mkdir -p '/var/spool/cron/crontabs' && \
echo '0 3 * * * /bin/vieter build' | crontab -
WORKDIR /data WORKDIR /data

View File

@ -1,23 +1,18 @@
# vim: ft=bash
# Maintainer: Jef Roosens # Maintainer: Jef Roosens
pkgbase='vieter' pkgbase='vieter'
pkgname='vieter' pkgname='vieter'
pkgver=0.2.0.r25.g20112b8 pkgver='0.3.0_alpha.2'
pkgrel=1 pkgrel=1
depends=('glibc' 'openssl' 'libarchive' 'gc' 'sqlite') depends=('glibc' 'openssl' 'libarchive' 'sqlite')
makedepends=('git' 'gcc' 'vieter-v') makedepends=('git' 'vieter-v')
arch=('x86_64' 'aarch64') arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter/vieter' url='https://git.rustybever.be/vieter/vieter'
license=('AGPL3') license=('AGPL3')
source=($pkgname::git+https://git.rustybever.be/vieter/vieter#branch=dev) source=("$pkgname::git+https://git.rustybever.be/vieter/vieter#tag=${pkgver//_/-}")
md5sums=('SKIP') md5sums=('SKIP')
pkgver() {
cd "$pkgname"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
build() { build() {
cd "$pkgname" cd "$pkgname"
@ -28,5 +23,5 @@ package() {
pkgdesc="Vieter is a lightweight implementation of an Arch repository server." pkgdesc="Vieter is a lightweight implementation of an Arch repository server."
install -dm755 "$pkgdir/usr/bin" install -dm755 "$pkgdir/usr/bin"
install -Dm755 "$pkgbase/pvieter" "$pkgdir/usr/bin/vieter" install -Dm755 "$pkgname/pvieter" "$pkgdir/usr/bin/vieter"
} }

35
PKGBUILD.dev 100644
View File

@ -0,0 +1,35 @@
# vim: ft=bash
# Maintainer: Jef Roosens
pkgbase='vieter-git'
pkgname='vieter-git'
pkgver=0.2.0.r25.g20112b8
pkgrel=1
depends=('glibc' 'openssl' 'libarchive' 'sqlite')
makedepends=('git' 'vieter-v')
arch=('x86_64' 'aarch64')
url='https://git.rustybever.be/vieter/vieter'
license=('AGPL3')
source=("$pkgname::git+https://git.rustybever.be/vieter/vieter#branch=dev")
md5sums=('SKIP')
provides=('vieter')
conflicts=('vieter')
pkgver() {
cd "$pkgname"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
cd "$pkgname"
make prod
}
package() {
pkgdesc="Vieter is a lightweight implementation of an Arch repository server."
install -dm755 "$pkgdir/usr/bin"
install -Dm755 "$pkgname/pvieter" "$pkgdir/usr/bin/vieter"
}

View File

@ -55,3 +55,13 @@ clone my compiler in the `v` directory & build it. Afterwards, you can use this
compiler with make by prepending all make commands with `V_PATH=v/v`. If you do compiler with make by prepending all make commands with `V_PATH=v/v`. If you do
encounter this issue, please let me know so I can update my mirror & the encounter this issue, please let me know so I can update my mirror & the
codebase to fix it! codebase to fix it!
## Contributing
If you wish to contribute to the project, please take note of the following:
* Rebase instead of merging whenever possible, e.g. when updating your branch
with the dev branch.
* Please follow the
[Conventional Commits](https://www.conventionalcommits.org/) style for your
commit messages.

View File

@ -56,6 +56,11 @@ Vieter only supports uploading archives compressed using either gzip, zstd or
xz at the moment. xz at the moment.
{{< /hint >}} {{< /hint >}}
### `GET /health`
This endpoint's only use is to be used with healthchecks. It returns a JSON
response with the message "Healthy.".
## API ## API
All API routes require the API key to provided using the `X-Api-Key` header. All API routes require the API key to provided using the `X-Api-Key` header.

View File

@ -15,7 +15,7 @@ repositories. After the image has been created, each repository returned by
previously created image as a base. Each container goes through the following steps: previously created image as a base. Each container goes through the following steps:
1. The repository is cloned 1. The repository is cloned
2. `makepkg --nobuild --nodeps` is ran to update the `pkgver` variable inside 2. `makepkg --nobuild --syncdeps --needed --noconfirm` is ran to update the `pkgver` variable inside
the `PKGBUILD` file the `PKGBUILD` file
3. A HEAD request is sent to the Vieter server to check whether the specific 3. A HEAD request is sent to the Vieter server to check whether the specific
version of the package is already present. If it is, the container exits. version of the package is already present. If it is, the container exits.

View File

@ -3,13 +3,15 @@ module build
import docker import docker
import encoding.base64 import encoding.base64
import time import time
import git
import os import os
import db import db
import strings
import util
const container_build_dir = '/build' const (
container_build_dir = '/build'
const build_image_repo = 'vieter-build' build_image_repo = 'vieter-build'
)
// create_build_image creates a builder image given some base image which can // create_build_image creates a builder image given some base image which can
// then be used to build & package Arch images. It mostly just updates the // then be used to build & package Arch images. It mostly just updates the
@ -17,6 +19,12 @@ const build_image_repo = 'vieter-build'
// makepkg with. The base image should be some Linux distribution that uses // makepkg with. The base image should be some Linux distribution that uses
// Pacman as its package manager. // Pacman as its package manager.
pub fn create_build_image(base_image string) ?string { pub fn create_build_image(base_image string) ?string {
mut dd := docker.new_conn()?
defer {
dd.close() or {}
}
commands := [ commands := [
// Update repos & install required packages // Update repos & install required packages
'pacman -Syu --needed --noconfirm base-devel git' 'pacman -Syu --needed --noconfirm base-devel git'
@ -46,14 +54,15 @@ pub fn create_build_image(base_image string) ?string {
image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' } image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' }
// We pull the provided image // We pull the provided image
docker.pull_image(image_name, image_tag) ? dd.pull_image(image_name, image_tag)?
id := docker.create_container(c) ? id := dd.create_container(c)?.id
docker.start_container(id) ? // id := docker.create_container(c)?
dd.start_container(id)?
// This loop waits until the container has stopped, so we can remove it after // This loop waits until the container has stopped, so we can remove it after
for { for {
data := docker.inspect_container(id) ? data := dd.inspect_container(id)?
if !data.state.running { if !data.state.running {
break break
@ -67,27 +76,41 @@ pub fn create_build_image(base_image string) ?string {
// TODO also add the base image's name into the image name to prevent // TODO also add the base image's name into the image name to prevent
// conflicts. // conflicts.
tag := time.sys_mono_now().str() tag := time.sys_mono_now().str()
image := docker.create_image_from_container(id, 'vieter-build', tag) ? image := dd.create_image_from_container(id, 'vieter-build', tag)?
docker.remove_container(id) ? dd.remove_container(id)?
return image.id return image.id
} }
pub struct BuildResult {
pub:
start_time time.Time
end_time time.Time
exit_code int
logs string
}
// build_repo builds, packages & publishes a given Arch package based on the // build_repo builds, packages & publishes a given Arch package based on the
// provided GitRepo. The base image ID should be of an image previously created // provided GitRepo. The base image ID should be of an image previously created
// by create_build_image. // by create_build_image. It returns the logs of the container.
pub fn build_repo(address string, api_key string, base_image_id string, repo &db.GitRepo) ? { pub fn build_repo(address string, api_key string, base_image_id string, repo &db.GitRepo) ?BuildResult {
mut dd := docker.new_conn()?
defer {
dd.close() or {}
}
build_arch := os.uname().machine build_arch := os.uname().machine
// TODO what to do with PKGBUILDs that build multiple packages? // TODO what to do with PKGBUILDs that build multiple packages?
commands := [ commands := [
'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo', 'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
'cd repo', 'cd repo',
'makepkg --nobuild --nodeps', 'makepkg --nobuild --syncdeps --needed --noconfirm',
'source PKGBUILD', 'source PKGBUILD',
// The build container checks whether the package is already // The build container checks whether the package is already
// present on the server // present on the server
'curl --head --fail $address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0', 'curl -s --head --fail $address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $address/$repo.repo/publish; done', 'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $address/$repo.repo/publish; done',
] ]
@ -104,46 +127,30 @@ pub fn build_repo(address string, api_key string, base_image_id string, repo &db
user: 'builder:builder' user: 'builder:builder'
} }
id := docker.create_container(c) ? id := dd.create_container(c)?.id
docker.start_container(id) ? dd.start_container(id)?
mut data := dd.inspect_container(id)?
// This loop waits until the container has stopped, so we can remove it after // This loop waits until the container has stopped, so we can remove it after
for { for data.state.running {
data := docker.inspect_container(id) ?
if !data.state.running {
break
}
time.sleep(1 * time.second) time.sleep(1 * time.second)
data = dd.inspect_container(id)?
} }
docker.remove_container(id) ? mut logs_stream := dd.get_container_logs(id)?
}
// Read in the entire stream
// build builds every Git repo in the server's list. mut logs_builder := strings.new_builder(10 * 1024)
fn build(conf Config) ? { util.reader_to_writer(mut logs_stream, mut logs_builder)?
build_arch := os.uname().machine
dd.remove_container(id)?
// We get the repos map from the Vieter instance
repos := git.get_repos(conf.address, conf.api_key) ? return BuildResult{
start_time: data.state.start_time
// We filter out any repos that aren't allowed to be built on this end_time: data.state.end_time
// architecture exit_code: data.state.exit_code
filtered_repos := repos.filter(it.arch.map(it.value).contains(build_arch)) logs: logs_builder.str()
}
// No point in doing work if there's no repos present
if filtered_repos.len == 0 {
return
}
// First, we create a base image which has updated repos n stuff
image_id := create_build_image(conf.base_image) ?
for repo in filtered_repos {
build_repo(conf.address, conf.api_key, image_id, repo) ?
}
// Finally, we remove the builder image
docker.remove_image(image_id) ?
} }

View File

@ -1,25 +0,0 @@
module build
import cli
import env
pub struct Config {
pub:
api_key string
address string
base_image string = 'archlinux:base-devel'
}
// cmd returns the cli submodule that handles the build process
pub fn cmd() cli.Command {
return cli.Command{
name: 'build'
description: 'Run the build process.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ?
conf := env.load<Config>(config_file) ?
build(conf) ?
}
}
}

View File

@ -0,0 +1,67 @@
module client
import net.http { Method }
import net.urllib
import response { Response }
import json
pub struct Client {
pub:
address string
api_key string
}
// new creates a new Client instance.
pub fn new(address string, api_key string) Client {
return Client{
address: address
api_key: api_key
}
}
// send_request_raw sends an HTTP request, returning the http.Response object.
// It encodes the params so that they're safe to pass as HTTP query parameters.
fn (c &Client) send_request_raw(method Method, url string, params map[string]string, body string) ?http.Response {
mut full_url := '$c.address$url'
if params.len > 0 {
mut params_escaped := map[string]string{}
// Escape each query param
for k, v in params {
params_escaped[k] = urllib.query_escape(v)
}
params_str := params_escaped.keys().map('$it=${params[it]}').join('&')
full_url = '$full_url?$params_str'
}
mut req := http.new_request(method, full_url, body)?
req.add_custom_header('X-Api-Key', c.api_key)?
res := req.do()?
return res
}
// send_request<T> just calls send_request_with_body<T> with an empty body.
fn (c &Client) send_request<T>(method Method, url string, params map[string]string) ?Response<T> {
return c.send_request_with_body<T>(method, url, params, '')
}
// send_request_with_body<T> calls send_request_raw_response & parses its
// output as a Response<T> object.
fn (c &Client) send_request_with_body<T>(method Method, url string, params map[string]string, body string) ?Response<T> {
res_text := c.send_request_raw_response(method, url, params, body)?
data := json.decode(Response<T>, res_text)?
return data
}
// send_request_raw_response returns the raw text response for an HTTP request.
fn (c &Client) send_request_raw_response(method Method, url string, params map[string]string, body string) ?string {
res := c.send_request_raw(method, url, params, body)?
return res.text
}

51
src/client/git.v 100644
View File

@ -0,0 +1,51 @@
module client
import db { GitRepo }
import net.http { Method }
import response { Response }
// get_git_repos returns the current list of repos.
pub fn (c &Client) get_git_repos() ?[]GitRepo {
data := c.send_request<[]GitRepo>(Method.get, '/api/repos', {})?
return data.data
}
// get_git_repo returns the repo for a specific ID.
pub fn (c &Client) get_git_repo(id int) ?GitRepo {
data := c.send_request<GitRepo>(Method.get, '/api/repos/$id', {})?
return data.data
}
// add_git_repo adds a new repo to the server.
pub fn (c &Client) add_git_repo(url string, branch string, repo string, arch []string) ?Response<string> {
mut params := {
'url': url
'branch': branch
'repo': repo
}
if arch.len > 0 {
params['arch'] = arch.join(',')
}
data := c.send_request<string>(Method.post, '/api/repos', params)?
return data
}
// remove_git_repo removes the repo with the given ID from the server.
pub fn (c &Client) remove_git_repo(id int) ?Response<string> {
data := c.send_request<string>(Method.delete, '/api/repos/$id', {})?
return data
}
// patch_git_repo sends a PATCH request to the given repo with the params as
// payload.
pub fn (c &Client) patch_git_repo(id int, params map[string]string) ?Response<string> {
data := c.send_request<string>(Method.patch, '/api/repos/$id', params)?
return data
}

53
src/client/logs.v 100644
View File

@ -0,0 +1,53 @@
module client
import db { BuildLog }
import net.http { Method }
import response { Response }
import time
// get_build_logs returns all build logs.
pub fn (c &Client) get_build_logs() ?Response<[]BuildLog> {
data := c.send_request<[]BuildLog>(Method.get, '/api/logs', {})?
return data
}
// get_build_logs_for_repo returns all build logs for a given repo.
pub fn (c &Client) get_build_logs_for_repo(repo_id int) ?Response<[]BuildLog> {
params := {
'repo': repo_id.str()
}
data := c.send_request<[]BuildLog>(Method.get, '/api/logs', params)?
return data
}
// get_build_log returns a specific build log.
pub fn (c &Client) get_build_log(id int) ?Response<BuildLog> {
data := c.send_request<BuildLog>(Method.get, '/api/logs/$id', {})?
return data
}
// get_build_log_content returns the contents of the build log file.
pub fn (c &Client) get_build_log_content(id int) ?string {
data := c.send_request_raw_response(Method.get, '/api/logs/$id/content', {}, '')?
return data
}
// add_build_log adds a new build log to the server.
pub fn (c &Client) add_build_log(repo_id int, start_time time.Time, end_time time.Time, arch string, exit_code int, content string) ?Response<string> {
params := {
'repo': repo_id.str()
'startTime': start_time.str()
'endTime': end_time.str()
'arch': arch
'exitCode': exit_code.str()
}
data := c.send_request_with_body<string>(Method.post, '/api/logs', params, content)?
return data
}

View File

@ -0,0 +1,56 @@
module console
import arrays
import strings
// pretty_table converts a list of string data into a pretty table. Many thanks
// to @hungrybluedev in the Vlang Discord for providing this code!
// https://ptb.discord.com/channels/592103645835821068/592106336838352923/970278787143045192
pub fn pretty_table(header []string, data [][]string) ?string {
column_count := header.len
mut column_widths := []int{len: column_count, init: header[it].len}
for values in data {
for col, value in values {
if column_widths[col] < value.len {
column_widths[col] = value.len
}
}
}
single_line_length := arrays.sum(column_widths)? + (column_count + 1) * 3 - 4
horizontal_line := '+' + strings.repeat(`-`, single_line_length) + '+'
mut buffer := strings.new_builder(data.len * single_line_length)
buffer.writeln(horizontal_line)
buffer.write_string('| ')
for col, head in header {
if col != 0 {
buffer.write_string(' | ')
}
buffer.write_string(head)
buffer.write_string(strings.repeat(` `, column_widths[col] - head.len))
}
buffer.writeln(' |')
buffer.writeln(horizontal_line)
for values in data {
buffer.write_string('| ')
for col, value in values {
if col != 0 {
buffer.write_string(' | ')
}
buffer.write_string(value)
buffer.write_string(strings.repeat(` `, column_widths[col] - value.len))
}
buffer.writeln(' |')
}
buffer.writeln(horizontal_line)
return buffer.str()
}

View File

@ -0,0 +1,34 @@
module git
import client
import docker
import os
import build
// build builds every Git repo in the server's list.
fn build(conf Config, repo_id int) ? {
c := client.new(conf.address, conf.api_key)
repo := c.get_git_repo(repo_id)?
build_arch := os.uname().machine
println('Creating base image...')
image_id := build.create_build_image(conf.base_image)?
println('Running build...')
res := build.build_repo(conf.address, conf.api_key, image_id, repo)?
println('Removing build image...')
mut dd := docker.new_conn()?
defer {
dd.close() or {}
}
dd.remove_image(image_id)?
println('Uploading logs to Vieter...')
c.add_build_log(repo.id, res.start_time, res.end_time, build_arch, res.exit_code,
res.logs)?
}

View File

@ -3,10 +3,13 @@ module git
import cli import cli
import env import env
import cron.expression { parse_expression } import cron.expression { parse_expression }
import client
import console
struct Config { struct Config {
address string [required] address string [required]
api_key string [required] api_key string [required]
base_image string = 'archlinux:base-devel'
} }
// cmd returns the cli submodule that handles the repos API interaction // cmd returns the cli submodule that handles the repos API interaction
@ -19,10 +22,10 @@ pub fn cmd() cli.Command {
name: 'list' name: 'list'
description: 'List the current repos.' description: 'List the current repos.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ? config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file) ? conf := env.load<Config>(config_file)?
list(conf) ? list(conf)?
} }
}, },
cli.Command{ cli.Command{
@ -31,10 +34,10 @@ pub fn cmd() cli.Command {
usage: 'url branch repo' usage: 'url branch repo'
description: 'Add a new repository.' description: 'Add a new repository.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ? config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file) ? conf := env.load<Config>(config_file)?
add(conf, cmd.args[0], cmd.args[1], cmd.args[2]) ? add(conf, cmd.args[0], cmd.args[1], cmd.args[2])?
} }
}, },
cli.Command{ cli.Command{
@ -43,10 +46,10 @@ pub fn cmd() cli.Command {
usage: 'id' usage: 'id'
description: 'Remove a repository that matches the given ID prefix.' description: 'Remove a repository that matches the given ID prefix.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ? config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file) ? conf := env.load<Config>(config_file)?
remove(conf, cmd.args[0]) ? remove(conf, cmd.args[0])?
} }
}, },
cli.Command{ cli.Command{
@ -55,10 +58,10 @@ pub fn cmd() cli.Command {
usage: 'id' usage: 'id'
description: 'Show detailed information for the repo matching the ID prefix.' description: 'Show detailed information for the repo matching the ID prefix.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ? config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file) ? conf := env.load<Config>(config_file)?
info(conf, cmd.args[0]) ? info(conf, cmd.args[0])?
} }
}, },
cli.Command{ cli.Command{
@ -94,8 +97,8 @@ pub fn cmd() cli.Command {
}, },
] ]
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ? config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file) ? conf := env.load<Config>(config_file)?
found := cmd.flags.get_all_found() found := cmd.flags.get_all_found()
@ -103,11 +106,23 @@ pub fn cmd() cli.Command {
for f in found { for f in found {
if f.name != 'config-file' { if f.name != 'config-file' {
params[f.name] = f.get_string() ? params[f.name] = f.get_string()?
} }
} }
patch(conf, cmd.args[0], params) ? patch(conf, cmd.args[0], params)?
}
},
cli.Command{
name: 'build'
required_args: 1
usage: 'id'
description: 'Build the repo with the given id & publish it.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file)?
build(conf, cmd.args[0].int())?
} }
}, },
] ]
@ -119,16 +134,17 @@ pub fn cmd() cli.Command {
// list prints out a list of all repositories. // list prints out a list of all repositories.
fn list(conf Config) ? { fn list(conf Config) ? {
repos := get_repos(conf.address, conf.api_key) ? c := client.new(conf.address, conf.api_key)
repos := c.get_git_repos()?
data := repos.map([it.id.str(), it.url, it.branch, it.repo])
for repo in repos { println(console.pretty_table(['id', 'url', 'branch', 'repo'], data)?)
println('$repo.id\t$repo.url\t$repo.branch\t$repo.repo')
}
} }
// add adds a new repository to the server's list. // add adds a new repository to the server's list.
fn add(conf Config, url string, branch string, repo string) ? { fn add(conf Config, url string, branch string, repo string) ? {
res := add_repo(conf.address, conf.api_key, url, branch, repo, []) ? c := client.new(conf.address, conf.api_key)
res := c.add_git_repo(url, branch, repo, [])?
println(res.message) println(res.message)
} }
@ -139,7 +155,8 @@ fn remove(conf Config, id string) ? {
id_int := id.int() id_int := id.int()
if id_int != 0 { if id_int != 0 {
res := remove_repo(conf.address, conf.api_key, id_int) ? c := client.new(conf.address, conf.api_key)
res := c.remove_git_repo(id_int)?
println(res.message) println(res.message)
} }
} }
@ -156,7 +173,8 @@ fn patch(conf Config, id string, params map[string]string) ? {
id_int := id.int() id_int := id.int()
if id_int != 0 { if id_int != 0 {
res := patch_repo(conf.address, conf.api_key, id_int, params) ? c := client.new(conf.address, conf.api_key)
res := c.patch_git_repo(id_int, params)?
println(res.message) println(res.message)
} }
@ -170,6 +188,7 @@ fn info(conf Config, id string) ? {
return return
} }
repo := get_repo(conf.address, conf.api_key, id_int) ? c := client.new(conf.address, conf.api_key)
repo := c.get_git_repo(id_int)?
println(repo) println(repo)
} }

View File

@ -0,0 +1,108 @@
module logs
import cli
import env
import client
import db
import console
struct Config {
address string [required]
api_key string [required]
}
// cmd returns the cli module that handles the build repos API.
pub fn cmd() cli.Command {
return cli.Command{
name: 'logs'
description: 'Interact with the build logs API.'
commands: [
cli.Command{
name: 'list'
description: 'List the build logs. If a repo ID is provided, only list the build logs for that repo.'
flags: [
cli.Flag{
name: 'repo'
description: 'ID of the Git repo to restrict list to.'
flag: cli.FlagType.int
},
]
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file)?
repo_id := cmd.flags.get_int('repo')?
if repo_id == 0 { list(conf)? } else { list_for_repo(conf, repo_id)? }
}
},
cli.Command{
name: 'info'
required_args: 1
usage: 'id'
description: 'Show all info for a specific build log.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file)?
id := cmd.args[0].int()
info(conf, id)?
}
},
cli.Command{
name: 'content'
required_args: 1
usage: 'id'
description: 'Output the content of a build log to stdout.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file)?
id := cmd.args[0].int()
content(conf, id)?
}
},
]
}
}
// print_log_list prints a list of logs.
fn print_log_list(logs []db.BuildLog) ? {
data := logs.map([it.id.str(), it.repo_id.str(), it.start_time.str(),
it.exit_code.str()])
println(console.pretty_table(['id', 'repo', 'start time', 'exit code'], data)?)
}
// list prints a list of all build logs.
fn list(conf Config) ? {
c := client.new(conf.address, conf.api_key)
logs := c.get_build_logs()?.data
print_log_list(logs)?
}
// list prints a list of all build logs for a given repo.
fn list_for_repo(conf Config, repo_id int) ? {
c := client.new(conf.address, conf.api_key)
logs := c.get_build_logs_for_repo(repo_id)?.data
print_log_list(logs)?
}
// info print the detailed info for a given build log.
fn info(conf Config, id int) ? {
c := client.new(conf.address, conf.api_key)
log := c.get_build_log(id)?.data
print(log)
}
// content outputs the contents of the log file for a given build log to
// stdout.
fn content(conf Config, id int) ? {
c := client.new(conf.address, conf.api_key)
content := c.get_build_log_content(id)?
println(content)
}

View File

@ -23,10 +23,10 @@ pub fn cmd() cli.Command {
name: 'cron' name: 'cron'
description: 'Start the cron service that periodically runs builds.' description: 'Start the cron service that periodically runs builds.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ? config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file) ? conf := env.load<Config>(config_file)?
cron(conf) ? cron(conf)?
} }
} }
} }

View File

@ -27,7 +27,7 @@ pub fn cron(conf Config) ? {
} }
mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce, mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency) ? conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)?
d.run() d.run()
} }

View File

@ -3,6 +3,7 @@ module daemon
import time import time
import sync.stdatomic import sync.stdatomic
import build import build
import os
const ( const (
build_empty = 0 build_empty = 0
@ -77,13 +78,20 @@ fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
// 0 means success, 1 means failure // 0 means success, 1 means failure
mut status := 0 mut status := 0
build.build_repo(d.address, d.api_key, d.builder_images.last(), &sb.repo) or { res := build.build_repo(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.repo) or {
d.ldebug('build_repo error: $err.msg()') d.ldebug('build_repo error: $err.msg()')
status = 1 status = 1
build.BuildResult{}
} }
if status == 0 { if status == 0 {
d.linfo('finished build: $sb.repo.url $sb.repo.branch') d.linfo('finished build: $sb.repo.url $sb.repo.branch; uploading logs...')
build_arch := os.uname().machine
d.client.add_build_log(sb.repo.id, res.start_time, res.end_time, build_arch, res.exit_code,
res.logs) or { d.lerror('Failed to upload logs for $sb.repo.url $sb.repo.arch') }
} else { } else {
d.linfo('failed build: $sb.repo.url $sb.repo.branch') d.linfo('failed build: $sb.repo.url $sb.repo.branch')
} }

View File

@ -1,6 +1,5 @@
module daemon module daemon
import git
import time import time
import log import log
import datatypes { MinHeap } import datatypes { MinHeap }
@ -10,6 +9,7 @@ import build
import docker import docker
import db import db
import os import os
import client
const ( const (
// How many seconds to wait before retrying to update API if failed // How many seconds to wait before retrying to update API if failed
@ -31,8 +31,7 @@ fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
pub struct Daemon { pub struct Daemon {
mut: mut:
address string client client.Client
api_key string
base_image string base_image string
builder_images []string builder_images []string
global_schedule CronExpression global_schedule CronExpression
@ -56,8 +55,7 @@ mut:
// populates the build queue for the first time. // populates the build queue for the first time.
pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon { pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon {
mut d := Daemon{ mut d := Daemon{
address: address client: client.new(address, api_key)
api_key: api_key
base_image: base_image base_image: base_image
global_schedule: global_schedule global_schedule: global_schedule
api_update_frequency: api_update_frequency api_update_frequency: api_update_frequency
@ -180,7 +178,7 @@ fn (mut d Daemon) schedule_build(repo db.GitRepo) {
fn (mut d Daemon) renew_repos() { fn (mut d Daemon) renew_repos() {
d.linfo('Renewing repos...') d.linfo('Renewing repos...')
mut new_repos := git.get_repos(d.address, d.api_key) or { mut new_repos := d.client.get_git_repos() or {
d.lerror('Failed to renew repos. Retrying in ${daemon.api_update_retry_timeout}s...') d.lerror('Failed to renew repos. Retrying in ${daemon.api_update_retry_timeout}s...')
d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout) d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout)
@ -255,14 +253,21 @@ fn (mut d Daemon) rebuild_base_image() bool {
fn (mut d Daemon) clean_old_base_images() { fn (mut d Daemon) clean_old_base_images() {
mut i := 0 mut i := 0
mut dd := docker.new_conn() or {
d.lerror('Failed to connect to Docker socket.')
return
}
defer {
dd.close() or {}
}
for i < d.builder_images.len - 1 { for i < d.builder_images.len - 1 {
// For each builder image, we try to remove it by calling the Docker // For each builder image, we try to remove it by calling the Docker
// API. If the function returns an error or false, that means the image // API. If the function returns an error or false, that means the image
// wasn't deleted. Therefore, we move the index over. If the function // wasn't deleted. Therefore, we move the index over. If the function
// returns true, the array's length has decreased by one so we don't // returns true, the array's length has decreased by one so we don't
// move the index. // move the index.
if !docker.remove_image(d.builder_images[i]) or { false } { dd.remove_image(d.builder_images[i]) or { i += 1 }
i += 1
}
} }
} }

View File

@ -218,7 +218,7 @@ fn parse_part(s string, min int, max int) ?[]int {
mut bitv := []bool{len: max - min + 1, init: false} mut bitv := []bool{len: max - min + 1, init: false}
for range in s.split(',') { for range in s.split(',') {
parse_range(range, min, max, mut bitv) ? parse_range(range, min, max, mut bitv)?
} }
return bitv_to_ints(bitv, min) return bitv_to_ints(bitv, min)

View File

@ -13,14 +13,14 @@ fn parse_range_error(s string, min int, max int) string {
// =====parse_range===== // =====parse_range=====
fn test_range_star_range() ? { fn test_range_star_range() ? {
mut bitv := []bool{len: 6, init: false} mut bitv := []bool{len: 6, init: false}
parse_range('*', 0, 5, mut bitv) ? parse_range('*', 0, 5, mut bitv)?
assert bitv == [true, true, true, true, true, true] assert bitv == [true, true, true, true, true, true]
} }
fn test_range_number() ? { fn test_range_number() ? {
mut bitv := []bool{len: 6, init: false} mut bitv := []bool{len: 6, init: false}
parse_range('4', 0, 5, mut bitv) ? parse_range('4', 0, 5, mut bitv)?
assert bitv_to_ints(bitv, 0) == [4] assert bitv_to_ints(bitv, 0) == [4]
} }
@ -39,14 +39,14 @@ fn test_range_number_invalid() ? {
fn test_range_step_star_1() ? { fn test_range_step_star_1() ? {
mut bitv := []bool{len: 21, init: false} mut bitv := []bool{len: 21, init: false}
parse_range('*/4', 0, 20, mut bitv) ? parse_range('*/4', 0, 20, mut bitv)?
assert bitv_to_ints(bitv, 0) == [0, 4, 8, 12, 16, 20] assert bitv_to_ints(bitv, 0) == [0, 4, 8, 12, 16, 20]
} }
fn test_range_step_star_2() ? { fn test_range_step_star_2() ? {
mut bitv := []bool{len: 8, init: false} mut bitv := []bool{len: 8, init: false}
parse_range('*/3', 1, 8, mut bitv) ? parse_range('*/3', 1, 8, mut bitv)?
assert bitv_to_ints(bitv, 1) == [1, 4, 7] assert bitv_to_ints(bitv, 1) == [1, 4, 7]
} }
@ -61,7 +61,7 @@ fn test_range_step_zero() ? {
fn test_range_step_number() ? { fn test_range_step_number() ? {
mut bitv := []bool{len: 21, init: false} mut bitv := []bool{len: 21, init: false}
parse_range('5/4', 2, 22, mut bitv) ? parse_range('5/4', 2, 22, mut bitv)?
assert bitv_to_ints(bitv, 2) == [5, 9, 13, 17, 21] assert bitv_to_ints(bitv, 2) == [5, 9, 13, 17, 21]
} }
@ -76,23 +76,23 @@ fn test_range_step_number_too_small() ? {
fn test_range_dash() ? { fn test_range_dash() ? {
mut bitv := []bool{len: 10, init: false} mut bitv := []bool{len: 10, init: false}
parse_range('4-8', 0, 9, mut bitv) ? parse_range('4-8', 0, 9, mut bitv)?
assert bitv_to_ints(bitv, 0) == [4, 5, 6, 7, 8] assert bitv_to_ints(bitv, 0) == [4, 5, 6, 7, 8]
} }
fn test_range_dash_step() ? { fn test_range_dash_step() ? {
mut bitv := []bool{len: 10, init: false} mut bitv := []bool{len: 10, init: false}
parse_range('4-8/2', 0, 9, mut bitv) ? parse_range('4-8/2', 0, 9, mut bitv)?
assert bitv_to_ints(bitv, 0) == [4, 6, 8] assert bitv_to_ints(bitv, 0) == [4, 6, 8]
} }
// =====parse_part===== // =====parse_part=====
fn test_part_single() ? { fn test_part_single() ? {
assert parse_part('*', 0, 5) ? == [0, 1, 2, 3, 4, 5] assert parse_part('*', 0, 5)? == [0, 1, 2, 3, 4, 5]
} }
fn test_part_multiple() ? { fn test_part_multiple() ? {
assert parse_part('*/2,2/3', 1, 8) ? == [1, 2, 3, 5, 7, 8] assert parse_part('*/2,2/3', 1, 8)? == [1, 2, 3, 5, 7, 8]
} }

View File

@ -3,11 +3,11 @@ module expression
import time { parse } import time { parse }
fn util_test_time(exp string, t1_str string, t2_str string) ? { fn util_test_time(exp string, t1_str string, t2_str string) ? {
ce := parse_expression(exp) ? ce := parse_expression(exp)?
t1 := parse(t1_str) ? t1 := parse(t1_str)?
t2 := parse(t2_str) ? t2 := parse(t2_str)?
t3 := ce.next(t1) ? t3 := ce.next(t1)?
assert t2.year == t3.year assert t2.year == t3.year
assert t2.month == t3.month assert t2.month == t3.month
@ -18,17 +18,17 @@ fn util_test_time(exp string, t1_str string, t2_str string) ? {
fn test_next_simple() ? { fn test_next_simple() ? {
// Very simple // Very simple
util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00') ? util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')?
// Overlap to next day // Overlap to next day
util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00') ? util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')?
util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00') ? util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')?
util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00') ? util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')?
// Overlap to next month // Overlap to next month
util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00') ? util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')?
// Overlap to next year // Overlap to next year
util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00') ? util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')?
} }

View File

@ -8,10 +8,11 @@ struct VieterDb {
// init initializes a database & adds the correct tables. // init initializes a database & adds the correct tables.
pub fn init(db_path string) ?VieterDb { pub fn init(db_path string) ?VieterDb {
conn := sqlite.connect(db_path) ? conn := sqlite.connect(db_path)?
sql conn { sql conn {
create table GitRepo create table GitRepo
create table BuildLog
} }
return VieterDb{ return VieterDb{

View File

@ -94,7 +94,7 @@ pub fn (db &VieterDb) get_git_repo(repo_id int) ?GitRepo {
// If a select statement fails, it returns a zeroed object. By // If a select statement fails, it returns a zeroed object. By
// checking one of the required fields, we can see whether the query // checking one of the required fields, we can see whether the query
// returned a result or not. // returned a result or not.
if res.url == '' { if res.id == 0 {
return none return none
} }
@ -152,3 +152,11 @@ pub fn (db &VieterDb) update_git_repo_archs(repo_id int, archs []GitRepoArch) {
} }
} }
} }
// git_repo_exists is a utility function that checks whether a repo with the
// given id exists.
pub fn (db &VieterDb) git_repo_exists(repo_id int) bool {
db.get_git_repo(repo_id) or { return false }
return true
}

74
src/db/logs.v 100644
View File

@ -0,0 +1,74 @@
module db
import time
pub struct BuildLog {
pub:
id int [primary; sql: serial]
repo_id int [nonull]
start_time time.Time [nonull]
end_time time.Time [nonull]
arch string [nonull]
exit_code int [nonull]
}
// str returns a string representation.
pub fn (bl &BuildLog) str() string {
mut parts := [
'id: $bl.id',
'repo id: $bl.repo_id',
'start time: $bl.start_time',
'end time: $bl.end_time',
'arch: $bl.arch',
'exit code: $bl.exit_code',
]
str := parts.join('\n')
return str
}
// get_build_logs returns all BuildLog's in the database.
pub fn (db &VieterDb) get_build_logs() []BuildLog {
res := sql db.conn {
select from BuildLog order by id
}
return res
}
// get_build_logs_for_repo returns all BuildLog's in the database for a given
// repo.
pub fn (db &VieterDb) get_build_logs_for_repo(repo_id int) []BuildLog {
res := sql db.conn {
select from BuildLog where repo_id == repo_id order by id
}
return res
}
// get_build_log tries to return a specific BuildLog.
pub fn (db &VieterDb) get_build_log(id int) ?BuildLog {
res := sql db.conn {
select from BuildLog where id == id
}
if res.id == 0 {
return none
}
return res
}
// add_build_log inserts the given BuildLog into the database.
pub fn (db &VieterDb) add_build_log(log BuildLog) {
sql db.conn {
insert log into BuildLog
}
}
// delete_build_log delete the BuildLog with the given ID from the database.
pub fn (db &VieterDb) delete_build_log(id int) {
sql db.conn {
delete from BuildLog where id == id
}
}

View File

@ -0,0 +1,3 @@
This module implements part of the Docker Engine API v1.41
([documentation](https://docs.docker.com/engine/api/v1.41/)) using socket-based
HTTP communication.

View File

@ -2,17 +2,11 @@ module docker
import json import json
import net.urllib import net.urllib
import time
import net.http { Method }
struct Container { struct DockerError {
id string [json: Id] message string
names []string [json: Names]
}
// containers returns a list of all currently running containers
pub fn containers() ?[]Container {
res := request('GET', urllib.parse('/v1.41/containers/json') ?) ?
return json.decode([]Container, res.text) or {}
} }
pub struct NewContainer { pub struct NewContainer {
@ -25,54 +19,104 @@ pub struct NewContainer {
} }
struct CreatedContainer { struct CreatedContainer {
id string [json: Id] pub:
id string [json: Id]
warnings []string [json: Warnings]
} }
// create_container creates a container defined by the given configuration. If // create_container creates a new container with the given config.
// successful, it returns the ID of the newly created container. pub fn (mut d DockerConn) create_container(c NewContainer) ?CreatedContainer {
pub fn create_container(c &NewContainer) ?string { d.send_request_with_json(Method.post, urllib.parse('/v1.41/containers/create')?, c)?
res := request_with_json('POST', urllib.parse('/v1.41/containers/create') ?, c) ? head, res := d.read_response()?
if res.status_code != 201 { if head.status_code != 201 {
return error('Failed to create container.') data := json.decode(DockerError, res)?
return error(data.message)
} }
return json.decode(CreatedContainer, res.text) ?.id data := json.decode(CreatedContainer, res)?
return data
} }
// start_container starts a container with a given ID. It returns whether the // start_container starts the container with the given id.
// container was started or not. pub fn (mut d DockerConn) start_container(id string) ? {
pub fn start_container(id string) ?bool { d.send_request(Method.post, urllib.parse('/v1.41/containers/$id/start')?)?
res := request('POST', urllib.parse('/v1.41/containers/$id/start') ?) ? head, body := d.read_response()?
return res.status_code == 204 if head.status_code != 204 {
data := json.decode(DockerError, body)?
return error(data.message)
}
} }
struct ContainerInspect { struct ContainerInspect {
pub: pub mut:
state ContainerState [json: State] state ContainerState [json: State]
} }
struct ContainerState { struct ContainerState {
pub: pub:
running bool [json: Running] running bool [json: Running]
status string [json: Status]
exit_code int [json: ExitCode]
// These use a rather specific format so they have to be parsed later
start_time_str string [json: StartedAt]
end_time_str string [json: FinishedAt]
pub mut:
start_time time.Time [skip]
end_time time.Time [skip]
} }
// inspect_container returns the result of inspecting a container with a given // inspect_container returns detailed information for a given container.
// ID. pub fn (mut d DockerConn) inspect_container(id string) ?ContainerInspect {
pub fn inspect_container(id string) ?ContainerInspect { d.send_request(Method.get, urllib.parse('/v1.41/containers/$id/json')?)?
res := request('GET', urllib.parse('/v1.41/containers/$id/json') ?) ? head, body := d.read_response()?
if res.status_code != 200 { if head.status_code != 200 {
return error('Failed to inspect container.') data := json.decode(DockerError, body)?
return error(data.message)
} }
return json.decode(ContainerInspect, res.text) or {} mut data := json.decode(ContainerInspect, body)?
data.state.start_time = time.parse_rfc3339(data.state.start_time_str)?
if data.state.status == 'exited' {
data.state.end_time = time.parse_rfc3339(data.state.end_time_str)?
}
return data
} }
// remove_container removes a container with a given ID. // remove_container removes the container with the given id.
pub fn remove_container(id string) ?bool { pub fn (mut d DockerConn) remove_container(id string) ? {
res := request('DELETE', urllib.parse('/v1.41/containers/$id') ?) ? d.send_request(Method.delete, urllib.parse('/v1.41/containers/$id')?)?
head, body := d.read_response()?
return res.status_code == 204 if head.status_code != 204 {
data := json.decode(DockerError, body)?
return error(data.message)
}
}
// get_container_logs returns a reader object allowing access to the
// container's logs.
pub fn (mut d DockerConn) get_container_logs(id string) ?&StreamFormatReader {
d.send_request(Method.get, urllib.parse('/v1.41/containers/$id/logs?stdout=true&stderr=true')?)?
head := d.read_response_head()?
if head.status_code != 200 {
content_length := head.header.get(http.CommonHeader.content_length)?.int()
body := d.read_response_body(content_length)?
data := json.decode(DockerError, body)?
return error(data.message)
}
return d.get_stream_format_reader()
} }

View File

@ -1,97 +1,137 @@
module docker module docker
import net.unix import net.unix
import net.urllib import io
import net.http import net.http
import strings
import net.urllib
import json import json
import util
const socket = '/var/run/docker.sock' const (
socket = '/var/run/docker.sock'
buf_len = 10 * 1024
http_separator = [u8(`\r`), `\n`, `\r`, `\n`]
http_chunk_separator = [u8(`\r`), `\n`]
)
const buf_len = 1024 pub struct DockerConn {
mut:
socket &unix.StreamConn
reader &io.BufferedReader
}
// send writes a request to the Docker socket, waits for a response & returns // new_conn creates a new connection to the Docker daemon.
// it. pub fn new_conn() ?&DockerConn {
fn send(req &string) ?http.Response { s := unix.connect_stream(docker.socket)?
// Open a connection to the socket
mut s := unix.connect_stream(docker.socket) or { d := &DockerConn{
return error('Failed to connect to socket ${docker.socket}.') socket: s
reader: io.new_buffered_reader(reader: s)
} }
defer { return d
// This or is required because otherwise, the V compiler segfaults for }
// some reason
// https://github.com/vlang/v/issues/13534
s.close() or {}
}
// Write the request to the socket // close closes the underlying socket connection.
s.write_string(req) or { return error('Failed to write request to socket ${docker.socket}.') } pub fn (mut d DockerConn) close() ? {
d.socket.close()?
}
s.wait_for_write() ? // send_request sends an HTTP request without body.
pub fn (mut d DockerConn) send_request(method http.Method, url urllib.URL) ? {
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n'
mut c := 0 d.socket.write_string(req)?
mut buf := []u8{len: docker.buf_len}
// When starting a new request, the reader needs to be reset.
d.reader = io.new_buffered_reader(reader: d.socket)
}
// send_request_with_body sends an HTTP request with the given body.
pub fn (mut d DockerConn) send_request_with_body(method http.Method, url urllib.URL, content_type string, body string) ? {
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n'
d.socket.write_string(req)?
// When starting a new request, the reader needs to be reset.
d.reader = io.new_buffered_reader(reader: d.socket)
}
// send_request_with_json<T> is a convenience wrapper around
// send_request_with_body that encodes the input as JSON.
pub fn (mut d DockerConn) send_request_with_json<T>(method http.Method, url urllib.URL, data &T) ? {
body := json.encode(data)
return d.send_request_with_body(method, url, 'application/json', body)
}
// read_response_head consumes the socket's contents until it encounters
// '\r\n\r\n', after which it parses the response as an HTTP response.
// Importantly, this function never consumes the reader past the HTTP
// separator, so the body can be read fully later on.
pub fn (mut d DockerConn) read_response_head() ?http.Response {
mut res := []u8{} mut res := []u8{}
for { util.read_until_separator(mut d.reader, mut res, docker.http_separator)?
c = s.read(mut buf) or { return error('Failed to read data from socket ${docker.socket}.') }
res << buf[..c]
if c < docker.buf_len {
break
}
}
// After reading the first part of the response, we parse it into an HTTP
// response. If it isn't chunked, we return early with the data.
parsed := http.parse_response(res.bytestr()) or {
return error('Failed to parse HTTP response from socket ${docker.socket}.')
}
if parsed.header.get(http.CommonHeader.transfer_encoding) or { '' } != 'chunked' {
return parsed
}
// We loop until we've encountered the end of the chunked response
// A chunked HTTP response always ends with '0\r\n\r\n'.
for res.len < 5 || res#[-5..] != [u8(`0`), `\r`, `\n`, `\r`, `\n`] {
// Wait for the server to respond
s.wait_for_write() ?
for {
c = s.read(mut buf) or {
return error('Failed to read data from socket ${docker.socket}.')
}
res << buf[..c]
if c < docker.buf_len {
break
}
}
}
// Decode chunked response
return http.parse_response(res.bytestr()) return http.parse_response(res.bytestr())
} }
// request_with_body sends a request to the Docker socket with the given body. // read_response_body reads `length` bytes from the stream. It can be used when
fn request_with_body(method string, url urllib.URL, content_type string, body string) ?http.Response { // the response encoding isn't chunked to fully read it.
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n' pub fn (mut d DockerConn) read_response_body(length int) ?string {
if length == 0 {
return ''
}
return send(req) mut buf := []u8{len: docker.buf_len}
mut c := 0
mut builder := strings.new_builder(docker.buf_len)
for builder.len < length {
c = d.reader.read(mut buf) or { break }
builder.write(buf[..c])?
}
return builder.str()
} }
// request sends a request to the Docker socket with an empty body. // read_response is a convenience function which always consumes the entire
fn request(method string, url urllib.URL) ?http.Response { // response & returns it. It should only be used when we're certain that the
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n' // result isn't too large.
pub fn (mut d DockerConn) read_response() ?(http.Response, string) {
head := d.read_response_head()?
return send(req) if head.header.get(http.CommonHeader.transfer_encoding) or { '' } == 'chunked' {
mut builder := strings.new_builder(1024)
mut body := d.get_chunked_response_reader()
util.reader_to_writer(mut body, mut builder)?
return head, builder.str()
}
content_length := head.header.get(http.CommonHeader.content_length)?.int()
res := d.read_response_body(content_length)?
return head, res
} }
// request_with_json<T> sends a request to the Docker socket with a given JSON // get_chunked_response_reader returns a ChunkedResponseReader using the socket
// payload // as reader.
pub fn request_with_json<T>(method string, url urllib.URL, data &T) ?http.Response { pub fn (mut d DockerConn) get_chunked_response_reader() &ChunkedResponseReader {
body := json.encode(data) r := new_chunked_response_reader(d.reader)
return request_with_body(method, url, 'application/json', body) return r
}
// get_stream_format_reader returns a StreamFormatReader using the socket as
// reader.
pub fn (mut d DockerConn) get_stream_format_reader() &StreamFormatReader {
r := new_chunked_response_reader(d.reader)
r2 := new_stream_format_reader(r)
return r2
} }

View File

@ -1,6 +1,6 @@
module docker module docker
import net.http import net.http { Method }
import net.urllib import net.urllib
import json import json
@ -9,26 +9,53 @@ pub:
id string [json: Id] id string [json: Id]
} }
// pull_image pulls tries to pull the image for the given image & tag // pull_image pulls the given image:tag.
pub fn pull_image(image string, tag string) ?http.Response { pub fn (mut d DockerConn) pull_image(image string, tag string) ? {
return request('POST', urllib.parse('/v1.41/images/create?fromImage=$image&tag=$tag') ?) d.send_request(Method.post, urllib.parse('/v1.41/images/create?fromImage=$image&tag=$tag')?)?
} head := d.read_response_head()?
// create_image_from_container creates a new image from a container with the if head.status_code != 200 {
// given repo & tag, given the container's ID. content_length := head.header.get(http.CommonHeader.content_length)?.int()
pub fn create_image_from_container(id string, repo string, tag string) ?Image { body := d.read_response_body(content_length)?
res := request('POST', urllib.parse('/v1.41/commit?container=$id&repo=$repo&tag=$tag') ?) ? data := json.decode(DockerError, body)?
if res.status_code != 201 { return error(data.message)
return error('Failed to create image from container.')
} }
return json.decode(Image, res.text) or {} // Keep reading the body until the pull has completed
mut body := d.get_chunked_response_reader()
mut buf := []u8{len: 1024}
for {
body.read(mut buf) or { break }
}
} }
// remove_image removes the image with the given ID. // create_image_from_container creates a new image from a container.
pub fn remove_image(id string) ?bool { pub fn (mut d DockerConn) create_image_from_container(id string, repo string, tag string) ?Image {
res := request('DELETE', urllib.parse('/v1.41/images/$id') ?) ? d.send_request(Method.post, urllib.parse('/v1.41/commit?container=$id&repo=$repo&tag=$tag')?)?
head, body := d.read_response()?
return res.status_code == 200 if head.status_code != 201 {
data := json.decode(DockerError, body)?
return error(data.message)
}
data := json.decode(Image, body)?
return data
}
// remove_image removes the image with the given id.
pub fn (mut d DockerConn) remove_image(id string) ? {
d.send_request(Method.delete, urllib.parse('/v1.41/images/$id')?)?
head, body := d.read_response()?
if head.status_code != 200 {
data := json.decode(DockerError, body)?
return error(data.message)
}
} }

135
src/docker/stream.v 100644
View File

@ -0,0 +1,135 @@
module docker
import io
import util
import encoding.binary
import encoding.hex
// ChunkedResponseReader parses an underlying HTTP chunked response, exposing
// it as if it was a continuous stream of data.
struct ChunkedResponseReader {
mut:
reader io.BufferedReader
bytes_left_in_chunk u64
started bool
}
// new_chunked_response_reader creates a new ChunkedResponseReader on the heap
// with the provided reader.
pub fn new_chunked_response_reader(reader io.BufferedReader) &ChunkedResponseReader {
r := &ChunkedResponseReader{
reader: reader
}
return r
}
// read satisfies the io.Reader interface.
pub fn (mut r ChunkedResponseReader) read(mut buf []u8) ?int {
if r.bytes_left_in_chunk == 0 {
// An io.BufferedReader always returns none if its stream has
// ended.
r.bytes_left_in_chunk = r.read_chunk_size()?
}
mut c := 0
// Make sure we don't read more than we can safely read. This is to avoid
// the underlying reader from becoming out of sync with our parsing:
if buf.len > r.bytes_left_in_chunk {
c = r.reader.read(mut buf[..r.bytes_left_in_chunk])?
} else {
c = r.reader.read(mut buf)?
}
r.bytes_left_in_chunk -= u64(c)
return c
}
// read_chunk_size advances the reader & reads the size of the next HTTP chunk.
// This function should only be called if the previous chunk has been
// completely consumed.
fn (mut r ChunkedResponseReader) read_chunk_size() ?u64 {
if r.started {
mut buf := []u8{len: 2}
// Each chunk ends with a `\r\n` which we want to skip first
r.reader.read(mut buf)?
}
r.started = true
mut res := []u8{}
util.read_until_separator(mut r.reader, mut res, http_chunk_separator)?
// The length of the next chunk is provided as a hexadecimal
mut num_data := hex.decode(res#[..-2].bytestr())?
for num_data.len < 8 {
num_data.insert(0, 0)
}
num := binary.big_endian_u64(num_data)
// This only occurs for the very last chunk, which always reports a size of
// 0.
if num == 0 {
return none
}
return num
}
// StreamFormatReader parses an underlying stream of Docker logs, removing the
// header bytes.
struct StreamFormatReader {
mut:
reader ChunkedResponseReader
bytes_left_in_chunk u32
}
// new_stream_format_reader creates a new StreamFormatReader using the given
// reader.
pub fn new_stream_format_reader(reader ChunkedResponseReader) &StreamFormatReader {
r := &StreamFormatReader{
reader: reader
}
return r
}
// read satisfies the io.Reader interface.
pub fn (mut r StreamFormatReader) read(mut buf []u8) ?int {
if r.bytes_left_in_chunk == 0 {
r.bytes_left_in_chunk = r.read_chunk_size()?
}
mut c := 0
if buf.len > r.bytes_left_in_chunk {
c = r.reader.read(mut buf[..r.bytes_left_in_chunk])?
} else {
c = r.reader.read(mut buf)?
}
r.bytes_left_in_chunk -= u32(c)
return c
}
// read_chunk_size advances the reader & reads the header bytes for the length
// of the next chunk.
fn (mut r StreamFormatReader) read_chunk_size() ?u32 {
mut buf := []u8{len: 8}
r.reader.read(mut buf)?
num := binary.big_endian_u32(buf[4..])
if num == 0 {
return none
}
return num
}

7
src/env/README.md vendored 100644
View File

@ -0,0 +1,7 @@
This module provides a framework for parsing a configuration, defined as a
struct, from both a TOML configuration file & environment variables. Some
notable features are:
* Overwrite values in config file using environment variables
* Allow default values in config struct
* Read environment variable value from file

22
src/env/env.v vendored
View File

@ -3,13 +3,19 @@ module env
import os import os
import toml import toml
// The prefix that every environment variable should have const (
const prefix = 'VIETER_' // The prefix that every environment variable should have
prefix = 'VIETER_'
// The suffix an environment variable in order for it to be loaded from a file // The suffix an environment variable in order for it to be loaded from a file
// instead // instead
const file_suffix = '_FILE' file_suffix = '_FILE'
)
// get_env_var tries to read the contents of the given environment variable. It
// looks for either `${env.prefix}${field_name.to_upper()}` or
// `${env.prefix}${field_name.to_upper()}${env.file_suffix}`, returning the
// contents of the file instead if the latter. If both or neither exist, the
// function returns an error.
fn get_env_var(field_name string) ?string { fn get_env_var(field_name string) ?string {
env_var_name := '$env.prefix$field_name.to_upper()' env_var_name := '$env.prefix$field_name.to_upper()'
env_file_name := '$env.prefix$field_name.to_upper()$env.file_suffix' env_file_name := '$env.prefix$field_name.to_upper()$env.file_suffix'
@ -50,7 +56,7 @@ pub fn load<T>(path string) ?T {
if os.exists(path) { if os.exists(path) {
// We don't use reflect here because reflect also sets any fields not // We don't use reflect here because reflect also sets any fields not
// in the toml back to their zero value, which we don't want // in the toml back to their zero value, which we don't want
doc := toml.parse_file(path) ? doc := toml.parse_file(path)?
$for field in T.fields { $for field in T.fields {
s := doc.value(field.name) s := doc.value(field.name)
@ -66,7 +72,7 @@ pub fn load<T>(path string) ?T {
} }
$for field in T.fields { $for field in T.fields {
env_value := get_env_var(field.name) ? env_value := get_env_var(field.name)?
// The value of an env var will always take precedence over the toml // The value of an env var will always take precedence over the toml
// file. // file.

View File

@ -1,77 +0,0 @@
module git
import json
import response { Response }
import net.http
import db
// send_request<T> is a convenience method for sending requests to the repos
// API. It mostly does string manipulation to create a query string containing
// the provided params.
fn send_request<T>(method http.Method, address string, url string, api_key string, params map[string]string) ?Response<T> {
mut full_url := '$address$url'
if params.len > 0 {
params_str := params.keys().map('$it=${params[it]}').join('&')
full_url = '$full_url?$params_str'
}
mut req := http.new_request(method, full_url, '') ?
req.add_custom_header('X-API-Key', api_key) ?
res := req.do() ?
data := json.decode(Response<T>, res.text) ?
return data
}
// get_repos returns the current list of repos.
pub fn get_repos(address string, api_key string) ?[]db.GitRepo {
data := send_request<[]db.GitRepo>(http.Method.get, address, '/api/repos', api_key,
{}) ?
return data.data
}
// get_repo returns the repo for a specific ID.
pub fn get_repo(address string, api_key string, id int) ?db.GitRepo {
data := send_request<db.GitRepo>(http.Method.get, address, '/api/repos/$id', api_key,
{}) ?
return data.data
}
// add_repo adds a new repo to the server.
pub fn add_repo(address string, api_key string, url string, branch string, repo string, arch []string) ?Response<string> {
mut params := {
'url': url
'branch': branch
'repo': repo
}
if arch.len > 0 {
params['arch'] = arch.join(',')
}
data := send_request<string>(http.Method.post, address, '/api/repos', api_key, params) ?
return data
}
// remove_repo removes the repo with the given ID from the server.
pub fn remove_repo(address string, api_key string, id int) ?Response<string> {
data := send_request<string>(http.Method.delete, address, '/api/repos/$id', api_key,
{}) ?
return data
}
// patch_repo sends a PATCH request to the given repo with the params as
// payload.
pub fn patch_repo(address string, api_key string, id int, params map[string]string) ?Response<string> {
data := send_request<string>(http.Method.patch, address, '/api/repos/$id', api_key,
params) ?
return data
}

View File

@ -3,15 +3,15 @@ module main
import os import os
import server import server
import cli import cli
import build import console.git
import git import console.logs
import cron import cron
fn main() { fn main() {
mut app := cli.Command{ mut app := cli.Command{
name: 'vieter' name: 'vieter'
description: 'Vieter is a lightweight implementation of an Arch repository server.' description: 'Vieter is a lightweight implementation of an Arch repository server.'
version: '0.3.0-alpha.1' version: '0.3.0-alpha.2'
flags: [ flags: [
cli.Flag{ cli.Flag{
flag: cli.FlagType.string flag: cli.FlagType.string
@ -24,12 +24,12 @@ fn main() {
] ]
commands: [ commands: [
server.cmd(), server.cmd(),
build.cmd(),
git.cmd(), git.cmd(),
cron.cmd(), cron.cmd(),
logs.cmd(),
] ]
} }
app.setup() app.setup()
app.parse(os.args) app.parse(os.args)
return
} }

View File

@ -159,7 +159,7 @@ pub fn read_pkg_archive(pkg_path string) ?Pkg {
pkg_text := unsafe { buf.vstring_with_len(size).clone() } pkg_text := unsafe { buf.vstring_with_len(size).clone() }
pkg_info = parse_pkg_info_string(pkg_text) ? pkg_info = parse_pkg_info_string(pkg_text)?
} else { } else {
C.archive_read_data_skip(a) C.archive_read_data_skip(a)
} }

View File

@ -53,22 +53,22 @@ pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?Re
return error('Failed to read package file: $err.msg()') return error('Failed to read package file: $err.msg()')
} }
added := r.add_pkg_in_repo(repo, pkg) ? added := r.add_pkg_in_repo(repo, pkg)?
// If the add was successful, we move the file to the packages directory // If the add was successful, we move the file to the packages directory
for arch in added { for arch in added {
repo_pkg_path := os.real_path(os.join_path(r.pkg_dir, repo, arch)) repo_pkg_path := os.real_path(os.join_path(r.pkg_dir, repo, arch))
dest_path := os.join_path_single(repo_pkg_path, pkg.filename()) dest_path := os.join_path_single(repo_pkg_path, pkg.filename())
os.mkdir_all(repo_pkg_path) ? os.mkdir_all(repo_pkg_path)?
// We create hard links so that "any" arch packages aren't stored // We create hard links so that "any" arch packages aren't stored
// multiple times // multiple times
os.link(pkg_path, dest_path) ? os.link(pkg_path, dest_path)?
} }
// After linking, we can remove the original file // After linking, we can remove the original file
os.rm(pkg_path) ? os.rm(pkg_path)?
return RepoAddResult{ return RepoAddResult{
added: added.len > 0 added: added.len > 0
@ -87,7 +87,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin
// A package not of arch 'any' can be handled easily by adding it to the // A package not of arch 'any' can be handled easily by adding it to the
// respective repo // respective repo
if pkg.info.arch != 'any' { if pkg.info.arch != 'any' {
if r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg) ? { if r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)? {
return [pkg.info.arch] return [pkg.info.arch]
} else { } else {
return [] return []
@ -104,7 +104,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin
// If this is the first package that's added to the repo, the directory // If this is the first package that's added to the repo, the directory
// won't exist yet // won't exist yet
if os.exists(repo_dir) { if os.exists(repo_dir) {
arch_repos = os.ls(repo_dir) ? arch_repos = os.ls(repo_dir)?
} }
// The default_arch should always be updated when a package with arch 'any' // The default_arch should always be updated when a package with arch 'any'
@ -118,7 +118,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin
// We add the package to each repository. If any of the repositories // We add the package to each repository. If any of the repositories
// return true, the result of the function is also true. // return true, the result of the function is also true.
for arch in arch_repos { for arch in arch_repos {
if r.add_pkg_in_arch_repo(repo, arch, pkg) ? { if r.add_pkg_in_arch_repo(repo, arch, pkg)? {
added << arch added << arch
} }
} }
@ -135,22 +135,22 @@ fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &pac
pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version') pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version')
// Remove the previous version of the package, if present // Remove the previous version of the package, if present
r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false) ? r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)?
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') } os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') }
os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()) or { os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()) or {
os.rmdir_all(pkg_dir) ? os.rmdir_all(pkg_dir)?
return error('Failed to write desc file.') return error('Failed to write desc file.')
} }
os.write_file(os.join_path_single(pkg_dir, 'files'), pkg.to_files()) or { os.write_file(os.join_path_single(pkg_dir, 'files'), pkg.to_files()) or {
os.rmdir_all(pkg_dir) ? os.rmdir_all(pkg_dir)?
return error('Failed to write files file.') return error('Failed to write files file.')
} }
r.sync(repo, arch) ? r.sync(repo, arch)?
return true return true
} }
@ -168,7 +168,7 @@ fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg
// We iterate over every directory in the repo dir // We iterate over every directory in the repo dir
// TODO filter so we only check directories // TODO filter so we only check directories
for d in os.ls(repo_dir) ? { for d in os.ls(repo_dir)? {
// Because a repository only allows a single version of each package, // Because a repository only allows a single version of each package,
// we need only compare whether the name of the package is the same, // we need only compare whether the name of the package is the same,
// not the version. // not the version.
@ -178,22 +178,22 @@ fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg
// We lock the mutex here to prevent other routines from creating a // We lock the mutex here to prevent other routines from creating a
// new archive while we remove an entry // new archive while we remove an entry
lock r.mutex { lock r.mutex {
os.rmdir_all(os.join_path_single(repo_dir, d)) ? os.rmdir_all(os.join_path_single(repo_dir, d))?
} }
// Also remove the package archive // Also remove the package archive
repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch) repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch)
archives := os.ls(repo_pkg_dir) ?.filter(it.split('-')#[..-3].join('-') == name) archives := os.ls(repo_pkg_dir)?.filter(it.split('-')#[..-3].join('-') == name)
for archive_name in archives { for archive_name in archives {
full_path := os.join_path_single(repo_pkg_dir, archive_name) full_path := os.join_path_single(repo_pkg_dir, archive_name)
os.rm(full_path) ? os.rm(full_path)?
} }
// Sync the db archives if requested // Sync the db archives if requested
if sync { if sync {
r.sync(repo, arch) ? r.sync(repo, arch)?
} }
return true return true

View File

@ -54,7 +54,7 @@ fn (r &RepoGroupManager) sync(repo string, arch string) ? {
C.archive_write_open_filename(a_files, &char(files_path.str)) C.archive_write_open_filename(a_files, &char(files_path.str))
// Iterate over each directory // Iterate over each directory
for d in os.ls(subrepo_path) ?.filter(os.is_dir(os.join_path_single(subrepo_path, for d in os.ls(subrepo_path)?.filter(os.is_dir(os.join_path_single(subrepo_path,
it))) { it))) {
// desc // desc
mut inner_path := os.join_path_single(d, 'desc') mut inner_path := os.join_path_single(d, 'desc')

View File

@ -18,10 +18,10 @@ pub fn cmd() cli.Command {
name: 'server' name: 'server'
description: 'Start the Vieter server.' description: 'Start the Vieter server.'
execute: fn (cmd cli.Command) ? { execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ? config_file := cmd.flags.get_string('config-file')?
conf := env.load<Config>(config_file) ? conf := env.load<Config>(config_file)?
server(conf) ? server(conf)?
} }
} }
} }

136
src/server/logs.v 100644
View File

@ -0,0 +1,136 @@
module server
import web
import net.http
import net.urllib
import response { new_data_response, new_response }
import db
import time
import os
import util
// get_logs returns all build logs in the database. A 'repo' query param can
// optionally be added to limit the list of build logs to that repository.
['/api/logs'; get]
fn (mut app App) get_logs() web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
logs := if 'repo' in app.query {
app.db.get_build_logs_for_repo(app.query['repo'].int())
} else {
app.db.get_build_logs()
}
return app.json(http.Status.ok, new_data_response(logs))
}
// get_single_log returns the build log with the given id.
['/api/logs/:id'; get]
fn (mut app App) get_single_log(id int) web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
log := app.db.get_build_log(id) or { return app.not_found() }
return app.json(http.Status.ok, new_data_response(log))
}
// get_log_content returns the actual build log file for the given id.
['/api/logs/:id/content'; get]
fn (mut app App) get_log_content(id int) web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
log := app.db.get_build_log(id) or { return app.not_found() }
file_name := log.start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
full_path := os.join_path(app.conf.data_dir, logs_dir_name, log.repo_id.str(), log.arch,
file_name)
return app.file(full_path)
}
// parse_query_time unescapes an HTTP query parameter & tries to parse it as a
// time.Time struct.
fn parse_query_time(query string) ?time.Time {
unescaped := urllib.query_unescape(query)?
t := time.parse(unescaped)?
return t
}
// post_log adds a new log to the database.
['/api/logs'; post]
fn (mut app App) post_log() web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
// Parse query params
start_time := parse_query_time(app.query['startTime']) or {
return app.json(http.Status.bad_request, new_response('Invalid or missing start time.'))
}
end_time := parse_query_time(app.query['endTime']) or {
return app.json(http.Status.bad_request, new_response('Invalid or missing end time.'))
}
if 'exitCode' !in app.query {
return app.json(http.Status.bad_request, new_response('Missing exit code.'))
}
exit_code := app.query['exitCode'].int()
if 'arch' !in app.query {
return app.json(http.Status.bad_request, new_response("Missing parameter 'arch'."))
}
arch := app.query['arch']
repo_id := app.query['repo'].int()
if !app.db.git_repo_exists(repo_id) {
return app.json(http.Status.bad_request, new_response('Unknown Git repo.'))
}
// Store log in db
log := db.BuildLog{
repo_id: repo_id
start_time: start_time
end_time: end_time
arch: arch
exit_code: exit_code
}
app.db.add_build_log(log)
repo_logs_dir := os.join_path(app.conf.data_dir, logs_dir_name, repo_id.str(), arch)
// Create the logs directory of it doesn't exist
if !os.exists(repo_logs_dir) {
os.mkdir_all(repo_logs_dir) or {
app.lerror("Couldn't create dir '$repo_logs_dir'.")
return app.json(http.Status.internal_server_error, new_response('An error occured while processing the request.'))
}
}
// Stream log contents to correct file
file_name := start_time.custom_format('YYYY-MM-DD_HH-mm-ss')
full_path := os.join_path_single(repo_logs_dir, file_name)
if length := app.req.header.get(.content_length) {
util.reader_to_file(mut app.reader, length.int(), full_path) or {
app.lerror('An error occured while receiving logs: $err.msg()')
return app.json(http.Status.internal_server_error, new_response('Failed to upload logs.'))
}
} else {
return app.status(http.Status.length_required)
}
return app.json(http.Status.ok, new_response('Logs added successfully.'))
}

View File

@ -12,6 +12,7 @@ const (
log_file_name = 'vieter.log' log_file_name = 'vieter.log'
repo_dir_name = 'repos' repo_dir_name = 'repos'
db_file_name = 'vieter.sqlite' db_file_name = 'vieter.sqlite'
logs_dir_name = 'logs'
) )
struct App { struct App {
@ -37,6 +38,14 @@ pub fn server(conf Config) ? {
os.mkdir_all(conf.data_dir) or { util.exit_with_message(1, 'Failed to create data directory.') } os.mkdir_all(conf.data_dir) or { util.exit_with_message(1, 'Failed to create data directory.') }
logs_dir := os.join_path_single(conf.data_dir, server.logs_dir_name)
if !os.exists(logs_dir) {
os.mkdir(os.join_path_single(conf.data_dir, server.logs_dir_name)) or {
util.exit_with_message(1, 'Failed to create logs directory.')
}
}
mut logger := log.Log{ mut logger := log.Log{
level: log_level level: log_level
} }

View File

@ -0,0 +1,2 @@
This module defines a few useful functions used throughout the codebase that
don't specifically fit inside a module.

95
src/util/stream.v 100644
View File

@ -0,0 +1,95 @@
// Functions for interacting with `io.Reader` & `io.Writer` objects.
module util
import io
import os
// reader_to_writer tries to consume the entire reader & write it to the writer.
pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ? {
mut buf := []u8{len: 10 * 1024}
for {
bytes_read := reader.read(mut buf) or { break }
mut bytes_written := 0
for bytes_written < bytes_read {
c := writer.write(buf[bytes_written..bytes_read]) or { break }
bytes_written += c
}
}
}
// reader_to_file writes the contents of a BufferedReader to a file
pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? {
mut file := os.create(path)?
defer {
file.close()
}
mut buf := []u8{len: reader_buf_size}
mut bytes_left := length
// Repeat as long as the stream still has data
for bytes_left > 0 {
// TODO check if just breaking here is safe
bytes_read := reader.read(mut buf) or { break }
bytes_left -= bytes_read
mut to_write := bytes_read
for to_write > 0 {
// TODO don't just loop infinitely here
bytes_written := file.write(buf[bytes_read - to_write..bytes_read]) or { continue }
// file.flush()
to_write = to_write - bytes_written
}
}
}
// match_array_in_array<T> returns how many elements of a2 overlap with a1. For
// example, if a1 = "abcd" & a2 = "cd", the result will be 2. If the match is
// not at the end of a1, the result is 0.
pub fn match_array_in_array<T>(a1 []T, a2 []T) int {
mut i := 0
mut match_len := 0
for i + match_len < a1.len {
if a1[i + match_len] == a2[match_len] {
match_len += 1
} else {
i += match_len + 1
match_len = 0
}
}
return match_len
}
// read_until_separator consumes an io.Reader until it encounters some
// separator array. The data read is stored inside the provided res array.
pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ? {
mut buf := []u8{len: sep.len}
for {
c := reader.read(mut buf)?
res << buf[..c]
match_len := match_array_in_array(buf[..c], sep)
if match_len == sep.len {
break
}
if match_len > 0 {
match_left := sep.len - match_len
c2 := reader.read(mut buf[..match_left])?
res << buf[..c2]
if buf[..c2] == sep[match_len..] {
break
}
}
}
}

View File

@ -1,13 +1,13 @@
module util module util
import os import os
import io
import crypto.md5 import crypto.md5
import crypto.sha256 import crypto.sha256
const reader_buf_size = 1_000_000 const (
reader_buf_size = 1_000_000
const prefixes = ['B', 'KB', 'MB', 'GB'] prefixes = ['B', 'KB', 'MB', 'GB']
)
// Dummy struct to work around the fact that you can only share structs, maps & // Dummy struct to work around the fact that you can only share structs, maps &
// arrays // arrays
@ -23,34 +23,6 @@ pub fn exit_with_message(code int, msg string) {
exit(code) exit(code)
} }
// reader_to_file writes the contents of a BufferedReader to a file
pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? {
mut file := os.create(path) ?
defer {
file.close()
}
mut buf := []u8{len: util.reader_buf_size}
mut bytes_left := length
// Repeat as long as the stream still has data
for bytes_left > 0 {
// TODO check if just breaking here is safe
bytes_read := reader.read(mut buf) or { break }
bytes_left -= bytes_read
mut to_write := bytes_read
for to_write > 0 {
// TODO don't just loop infinitely here
bytes_written := file.write(buf[bytes_read - to_write..bytes_read]) or { continue }
// file.flush()
to_write = to_write - bytes_written
}
}
}
// hash_file returns the md5 & sha256 hash of a given file // hash_file returns the md5 & sha256 hash of a given file
// TODO actually implement sha256 // TODO actually implement sha256
pub fn hash_file(path &string) ?(string, string) { pub fn hash_file(path &string) ?(string, string) {

View File

@ -190,7 +190,7 @@ pub fn (ctx Context) before_request() {}
// send_string // send_string
fn send_string(mut conn net.TcpConn, s string) ? { fn send_string(mut conn net.TcpConn, s string) ? {
conn.write(s.bytes()) ? conn.write(s.bytes())?
} }
// send_response_to_client sends a response to the client // send_response_to_client sends a response to the client