Merge pull request 'integrate build logs API into build command & cron' (#171) from Chewing_Bever/vieter:build-logs into dev
ci/woodpecker/push/build Pipeline was successful Details
ci/woodpecker/push/lint Pipeline was successful Details
ci/woodpecker/push/test Pipeline was successful Details
ci/woodpecker/push/docs Pipeline was successful Details
ci/woodpecker/push/arch Pipeline failed Details
ci/woodpecker/push/docker unknown status Details
ci/woodpecker/push/deploy unknown status Details

Reviewed-on: vieter/vieter#171
pull/173/head
Jef Roosens 2022-05-09 15:08:59 +02:00
commit cae44fb593
4 changed files with 95 additions and 35 deletions

View File

@ -73,10 +73,18 @@ pub fn create_build_image(base_image string) ?string {
return image.id
}
pub struct BuildResult {
pub:
start_time time.Time
end_time time.Time
exit_code int
logs string
}
// build_repo builds, packages & publishes a given Arch package based on the
// provided GitRepo. The base image ID should be of an image previously created
// by create_build_image.
pub fn build_repo(address string, api_key string, base_image_id string, repo &db.GitRepo) ? {
// by create_build_image. It returns the logs of the container.
pub fn build_repo(address string, api_key string, base_image_id string, repo &db.GitRepo) ?BuildResult {
build_arch := os.uname().machine
// TODO what to do with PKGBUILDs that build multiple packages?
@ -87,7 +95,7 @@ pub fn build_repo(address string, api_key string, base_image_id string, repo &db
'source PKGBUILD',
// The build container checks whether the package is already
// present on the server
'curl --head --fail $address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
'curl -s --head --fail $address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $address/$repo.repo/publish; done',
]
@ -107,43 +115,44 @@ pub fn build_repo(address string, api_key string, base_image_id string, repo &db
id := docker.create_container(c) ?
docker.start_container(id) ?
mut data := docker.inspect_container(id) ?
// This loop waits until the container has stopped, so we can remove it after
for {
data := docker.inspect_container(id) ?
if !data.state.running {
break
}
for data.state.running {
time.sleep(1 * time.second)
data = docker.inspect_container(id) ?
}
logs := docker.get_container_logs(id) ?
docker.remove_container(id) ?
return BuildResult{
start_time: data.state.start_time
end_time: data.state.end_time
exit_code: data.state.exit_code
logs: logs
}
}
// build builds every Git repo in the server's list.
fn build(conf Config) ? {
fn build(conf Config, repo_id int) ? {
c := client.new(conf.address, conf.api_key)
repo := c.get_git_repo(repo_id) ?
build_arch := os.uname().machine
// We get the repos map from the Vieter instance
repos := client.new(conf.address, conf.api_key).get_git_repos() ?
// We filter out any repos that aren't allowed to be built on this
// architecture
filtered_repos := repos.filter(it.arch.map(it.value).contains(build_arch))
// No point in doing work if there's no repos present
if filtered_repos.len == 0 {
return
}
// First, we create a base image which has updated repos n stuff
println('Creating base image...')
image_id := create_build_image(conf.base_image) ?
for repo in filtered_repos {
build_repo(conf.address, conf.api_key, image_id, repo) ?
}
println('Running build...')
res := build_repo(conf.address, conf.api_key, image_id, repo) ?
// Finally, we remove the builder image
println('Removing build image...')
docker.remove_image(image_id) ?
println('Uploading logs to Vieter...')
c.add_build_log(repo.id, res.start_time, res.end_time, build_arch, res.exit_code,
res.logs) ?
}

View File

@ -14,12 +14,16 @@ pub:
pub fn cmd() cli.Command {
return cli.Command{
name: 'build'
description: 'Run the build process.'
required_args: 1
usage: 'id'
description: 'Build the repository with the given ID.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ?
conf := env.load<Config>(config_file) ?
build(conf) ?
id := cmd.args[0].int()
build(conf, id) ?
}
}
}

View File

@ -3,6 +3,7 @@ module daemon
import time
import sync.stdatomic
import build
import os
const (
build_empty = 0
@ -77,13 +78,20 @@ fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) {
// 0 means success, 1 means failure
mut status := 0
build.build_repo(d.client.address, d.client.api_key, d.builder_images.last(), &sb.repo) or {
res := build.build_repo(d.client.address, d.client.api_key, d.builder_images.last(),
&sb.repo) or {
d.ldebug('build_repo error: $err.msg()')
status = 1
build.BuildResult{}
}
if status == 0 {
d.linfo('finished build: $sb.repo.url $sb.repo.branch')
d.linfo('finished build: $sb.repo.url $sb.repo.branch; uploading logs...')
build_arch := os.uname().machine
d.client.add_build_log(sb.repo.id, res.start_time, res.end_time, build_arch, res.exit_code,
res.logs) or { d.lerror('Failed to upload logs for $sb.repo.url $sb.repo.arch') }
} else {
d.linfo('failed build: $sb.repo.url $sb.repo.branch')
}

View File

@ -2,6 +2,7 @@ module docker
import json
import net.urllib
import time
struct Container {
id string [json: Id]
@ -49,13 +50,21 @@ pub fn start_container(id string) ?bool {
}
struct ContainerInspect {
pub:
pub mut:
state ContainerState [json: State]
}
struct ContainerState {
pub:
running bool [json: Running]
running bool [json: Running]
status string [json: Status]
exit_code int [json: ExitCode]
// These use a rather specific format so they have to be parsed later
start_time_str string [json: StartedAt]
end_time_str string [json: FinishedAt]
pub mut:
start_time time.Time [skip]
end_time time.Time [skip]
}
// inspect_container returns the result of inspecting a container with a given
@ -67,7 +76,15 @@ pub fn inspect_container(id string) ?ContainerInspect {
return error('Failed to inspect container.')
}
return json.decode(ContainerInspect, res.text) or {}
mut data := json.decode(ContainerInspect, res.text) ?
data.state.start_time = time.parse_rfc3339(data.state.start_time_str) ?
if data.state.status == 'exited' {
data.state.end_time = time.parse_rfc3339(data.state.end_time_str) ?
}
return data
}
// remove_container removes a container with a given ID.
@ -76,3 +93,25 @@ pub fn remove_container(id string) ?bool {
return res.status_code == 204
}
// get_container_logs retrieves the logs for a Docker container, both stdout &
// stderr.
pub fn get_container_logs(id string) ?string {
res := request('GET', urllib.parse('/v1.41/containers/$id/logs?stdout=true&stderr=true') ?) ?
mut res_bytes := res.text.bytes()
// Docker uses a special "stream" format for their logs, so we have to
// clean up the data.
mut index := 0
for index < res_bytes.len {
// The reverse is required because V reads in the bytes differently
t := res_bytes[index + 4..index + 8].reverse()
len_length := unsafe { *(&u32(&t[0])) }
res_bytes.delete_many(index, 8)
index += int(len_length)
}
return res_bytes.bytestr()
}