forked from vieter-v/vieter
				
			Merge pull request 'implementation of cron daemon' (#134) from cron into dev
Reviewed-on: #134cron-docs
						commit
						4a47c7bbdc
					
				| 
						 | 
					@ -5,8 +5,8 @@ data/
 | 
				
			||||||
vieter
 | 
					vieter
 | 
				
			||||||
dvieter
 | 
					dvieter
 | 
				
			||||||
pvieter
 | 
					pvieter
 | 
				
			||||||
dvieterctl
 | 
					suvieter
 | 
				
			||||||
vieterctl
 | 
					afvieter
 | 
				
			||||||
vieter.c
 | 
					vieter.c
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Ignore testing files
 | 
					# Ignore testing files
 | 
				
			||||||
| 
						 | 
					@ -23,3 +23,6 @@ v/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# gdb log file
 | 
					# gdb log file
 | 
				
			||||||
gdb.txt
 | 
					gdb.txt
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Generated docs
 | 
				
			||||||
 | 
					_docs/
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,6 +10,8 @@ pipeline:
 | 
				
			||||||
  build:
 | 
					  build:
 | 
				
			||||||
    image: 'menci/archlinuxarm:base-devel'
 | 
					    image: 'menci/archlinuxarm:base-devel'
 | 
				
			||||||
    commands:
 | 
					    commands:
 | 
				
			||||||
 | 
					      # Add the vieter repository so we can use the compiler
 | 
				
			||||||
 | 
					      - echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
 | 
				
			||||||
      # Update packages
 | 
					      # Update packages
 | 
				
			||||||
      - pacman -Syu --noconfirm
 | 
					      - pacman -Syu --noconfirm
 | 
				
			||||||
      # Create non-root user to perform build & switch to their home
 | 
					      # Create non-root user to perform build & switch to their home
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -9,22 +9,21 @@ matrix:
 | 
				
			||||||
platform: ${PLATFORM}
 | 
					platform: ${PLATFORM}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
pipeline:
 | 
					pipeline:
 | 
				
			||||||
  # The default build isn't needed, as alpine switches to gcc for the compiler anyways
 | 
					 | 
				
			||||||
  debug:
 | 
					  debug:
 | 
				
			||||||
    image: 'chewingbever/vlang:latest'
 | 
					    image: 'chewingbever/vlang:latest'
 | 
				
			||||||
    pull: true
 | 
					    pull: true
 | 
				
			||||||
    group: 'build'
 | 
					 | 
				
			||||||
    commands:
 | 
					    commands:
 | 
				
			||||||
      - make debug
 | 
					      - make
 | 
				
			||||||
    when:
 | 
					    when:
 | 
				
			||||||
      event: push
 | 
					      event: push
 | 
				
			||||||
 | 
					      branch:
 | 
				
			||||||
 | 
					        exclude: [main, dev]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  prod:
 | 
					  prod:
 | 
				
			||||||
    image: 'chewingbever/vlang:latest'
 | 
					    image: 'chewingbever/vlang:latest'
 | 
				
			||||||
    pull: true
 | 
					    pull: true
 | 
				
			||||||
    environment:
 | 
					    environment:
 | 
				
			||||||
      - LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static
 | 
					      - LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static
 | 
				
			||||||
    group: 'build'
 | 
					 | 
				
			||||||
    commands:
 | 
					    commands:
 | 
				
			||||||
      - make prod
 | 
					      - make prod
 | 
				
			||||||
      # Make sure the binary is actually statically built
 | 
					      # Make sure the binary is actually statically built
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,29 @@
 | 
				
			||||||
 | 
					# These builds are not important for the project, but might be valuable for
 | 
				
			||||||
 | 
					# fixing bugs in the V compiler.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					platform: linux/amd64
 | 
				
			||||||
 | 
					branches:
 | 
				
			||||||
 | 
					  exclude: [master, dev]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					pipeline:
 | 
				
			||||||
 | 
					  autofree:
 | 
				
			||||||
 | 
					    image: 'chewingbever/vlang:latest'
 | 
				
			||||||
 | 
					    pull: true
 | 
				
			||||||
 | 
					    group: 'build'
 | 
				
			||||||
 | 
					    commands:
 | 
				
			||||||
 | 
					      - make autofree
 | 
				
			||||||
 | 
					      - readelf -d afvieter
 | 
				
			||||||
 | 
					      - du -h afvieter
 | 
				
			||||||
 | 
					    when:
 | 
				
			||||||
 | 
					      event: push
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  skip-unused:
 | 
				
			||||||
 | 
					    image: 'chewingbever/vlang:latest'
 | 
				
			||||||
 | 
					    pull: true
 | 
				
			||||||
 | 
					    group: 'build'
 | 
				
			||||||
 | 
					    commands:
 | 
				
			||||||
 | 
					      - make skip-unused
 | 
				
			||||||
 | 
					      - readelf -d suvieter
 | 
				
			||||||
 | 
					      - du -h suvieter
 | 
				
			||||||
 | 
					    when:
 | 
				
			||||||
 | 
					      event: push
 | 
				
			||||||
| 
						 | 
					@ -7,7 +7,5 @@ pipeline:
 | 
				
			||||||
  lint:
 | 
					  lint:
 | 
				
			||||||
    image: 'chewingbever/vlang:latest'
 | 
					    image: 'chewingbever/vlang:latest'
 | 
				
			||||||
    pull: true
 | 
					    pull: true
 | 
				
			||||||
    group: lint
 | 
					 | 
				
			||||||
    commands:
 | 
					    commands:
 | 
				
			||||||
      - make lint
 | 
					      - make lint
 | 
				
			||||||
      - make vet
 | 
					 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										36
									
								
								Makefile
								
								
								
								
							
							
						
						
									
										36
									
								
								Makefile
								
								
								
								
							| 
						 | 
					@ -7,6 +7,7 @@ V := $(V_PATH) -showcc -gc boehm
 | 
				
			||||||
 | 
					
 | 
				
			||||||
all: vieter
 | 
					all: vieter
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# =====COMPILATION=====
 | 
					# =====COMPILATION=====
 | 
				
			||||||
# Regular binary
 | 
					# Regular binary
 | 
				
			||||||
vieter: $(SOURCES)
 | 
					vieter: $(SOURCES)
 | 
				
			||||||
| 
						 | 
					@ -23,7 +24,7 @@ dvieter: $(SOURCES)
 | 
				
			||||||
# Run the debug build inside gdb
 | 
					# Run the debug build inside gdb
 | 
				
			||||||
.PHONY: gdb
 | 
					.PHONY: gdb
 | 
				
			||||||
gdb: dvieter
 | 
					gdb: dvieter
 | 
				
			||||||
		gdb --args './dvieter -f vieter.toml server'
 | 
							gdb --args ./dvieter -f vieter.toml server
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Optimised production build
 | 
					# Optimised production build
 | 
				
			||||||
.PHONY: prod
 | 
					.PHONY: prod
 | 
				
			||||||
| 
						 | 
					@ -33,33 +34,34 @@ pvieter: $(SOURCES)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Only generate C code
 | 
					# Only generate C code
 | 
				
			||||||
.PHONY: c
 | 
					.PHONY: c
 | 
				
			||||||
c:
 | 
					c: $(SOURCES)
 | 
				
			||||||
	$(V) -o vieter.c $(SRC_DIR)
 | 
						$(V) -o vieter.c $(SRC_DIR)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# =====EXECUTION=====
 | 
					# =====EXECUTION=====
 | 
				
			||||||
# Run the server in the default 'data' directory
 | 
					# Run the server in the default 'data' directory
 | 
				
			||||||
.PHONY: run
 | 
					.PHONY: run
 | 
				
			||||||
run: vieter
 | 
					run: vieter
 | 
				
			||||||
		./vieter -f vieter.toml server
 | 
						./vieter -f vieter.toml server
 | 
				
			||||||
 | 
					
 | 
				
			||||||
.PHONY: run-prod
 | 
					.PHONY: run-prod
 | 
				
			||||||
run-prod: prod
 | 
					run-prod: prod
 | 
				
			||||||
	./pvieter -f vieter.toml server
 | 
						./pvieter -f vieter.toml server
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# =====OTHER=====
 | 
					# =====OTHER=====
 | 
				
			||||||
.PHONY: lint
 | 
					.PHONY: lint
 | 
				
			||||||
lint:
 | 
					lint:
 | 
				
			||||||
	$(V) fmt -verify $(SRC_DIR)
 | 
						$(V) fmt -verify $(SRC_DIR)
 | 
				
			||||||
 | 
						$(V) vet -W $(SRC_DIR)
 | 
				
			||||||
 | 
						$(V_PATH) missdoc -p $(SRC_DIR)
 | 
				
			||||||
 | 
						@ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Format the V codebase
 | 
					# Format the V codebase
 | 
				
			||||||
.PHONY: fmt
 | 
					.PHONY: fmt
 | 
				
			||||||
fmt:
 | 
					fmt:
 | 
				
			||||||
	$(V) fmt -w $(SRC_DIR)
 | 
						$(V) fmt -w $(SRC_DIR)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
.PHONY: vet
 | 
					 | 
				
			||||||
vet:
 | 
					 | 
				
			||||||
	$(V) vet -W $(SRC_DIR)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
.PHONY: test
 | 
					.PHONY: test
 | 
				
			||||||
test:
 | 
					test:
 | 
				
			||||||
	$(V) test $(SRC_DIR)
 | 
						$(V) test $(SRC_DIR)
 | 
				
			||||||
| 
						 | 
					@ -71,5 +73,23 @@ v/v:
 | 
				
			||||||
	git clone --single-branch https://git.rustybever.be/Chewing_Bever/v v
 | 
						git clone --single-branch https://git.rustybever.be/Chewing_Bever/v v
 | 
				
			||||||
	make -C v
 | 
						make -C v
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					.PHONY: clean
 | 
				
			||||||
clean:
 | 
					clean:
 | 
				
			||||||
	rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst
 | 
						rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					.PHONY: api-docs
 | 
				
			||||||
 | 
					api-docs:
 | 
				
			||||||
 | 
						rm -rf '$(SRC_DIR)/_docs'
 | 
				
			||||||
 | 
						cd '$(SRC_DIR)' && v doc -all -f html -m -readme .
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# =====EXPERIMENTAL=====
 | 
				
			||||||
 | 
					.PHONY: autofree
 | 
				
			||||||
 | 
					autofree: afvieter
 | 
				
			||||||
 | 
					afvieter: $(SOURCES)
 | 
				
			||||||
 | 
						$(V_PATH) -showcc -autofree -o afvieter $(SRC_DIR)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					.PHONY: skip-unused
 | 
				
			||||||
 | 
					skip-unused: suvieter
 | 
				
			||||||
 | 
					suvieter: $(SOURCES)
 | 
				
			||||||
 | 
						$(V_PATH) -showcc -skip-unused -o suvieter $(SRC_DIR)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										9
									
								
								PKGBUILD
								
								
								
								
							
							
						
						
									
										9
									
								
								PKGBUILD
								
								
								
								
							| 
						 | 
					@ -2,10 +2,10 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
pkgbase='vieter'
 | 
					pkgbase='vieter'
 | 
				
			||||||
pkgname='vieter'
 | 
					pkgname='vieter'
 | 
				
			||||||
pkgver=0.2.0.r24.g9a56bd0
 | 
					pkgver=0.2.0.r25.g20112b8
 | 
				
			||||||
pkgrel=1
 | 
					pkgrel=1
 | 
				
			||||||
depends=('glibc' 'openssl' 'libarchive' 'gc')
 | 
					depends=('glibc' 'openssl' 'libarchive' 'gc')
 | 
				
			||||||
makedepends=('git' 'gcc')
 | 
					makedepends=('git' 'gcc' 'vieter-v')
 | 
				
			||||||
arch=('x86_64' 'aarch64' 'armv7')
 | 
					arch=('x86_64' 'aarch64' 'armv7')
 | 
				
			||||||
url='https://git.rustybever.be/Chewing_Bever/vieter'
 | 
					url='https://git.rustybever.be/Chewing_Bever/vieter'
 | 
				
			||||||
license=('AGPL3')
 | 
					license=('AGPL3')
 | 
				
			||||||
| 
						 | 
					@ -20,10 +20,7 @@ pkgver() {
 | 
				
			||||||
build() {
 | 
					build() {
 | 
				
			||||||
    cd "$pkgname"
 | 
					    cd "$pkgname"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    # Build the compiler
 | 
					    make prod
 | 
				
			||||||
    CFLAGS= make v
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    V_PATH=v/v make prod
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
package() {
 | 
					package() {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2,7 +2,9 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## Documentation
 | 
					## Documentation
 | 
				
			||||||
 | 
					
 | 
				
			||||||
I host documentation for Vieter over at https://rustybever.be/docs/vieter/.
 | 
					I host documentation for Vieter over at https://rustybever.be/docs/vieter/. API
 | 
				
			||||||
 | 
					documentation for the current codebase can be found at
 | 
				
			||||||
 | 
					https://rustybever.be/api-docs/vieter/.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## Overview
 | 
					## Overview
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,7 +10,12 @@ const container_build_dir = '/build'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const build_image_repo = 'vieter-build'
 | 
					const build_image_repo = 'vieter-build'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fn create_build_image(base_image string) ?string {
 | 
					// create_build_image creates a builder image given some base image which can
 | 
				
			||||||
 | 
					// then be used to build & package Arch images. It mostly just updates the
 | 
				
			||||||
 | 
					// system, install some necessary packages & creates a non-root user to run
 | 
				
			||||||
 | 
					// makepkg with. The base image should be some Linux distribution that uses
 | 
				
			||||||
 | 
					// Pacman as its package manager.
 | 
				
			||||||
 | 
					pub fn create_build_image(base_image string) ?string {
 | 
				
			||||||
	commands := [
 | 
						commands := [
 | 
				
			||||||
		// Update repos & install required packages
 | 
							// Update repos & install required packages
 | 
				
			||||||
		'pacman -Syu --needed --noconfirm base-devel git'
 | 
							'pacman -Syu --needed --noconfirm base-devel git'
 | 
				
			||||||
| 
						 | 
					@ -53,12 +58,13 @@ fn create_build_image(base_image string) ?string {
 | 
				
			||||||
			break
 | 
								break
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Wait for 5 seconds
 | 
							time.sleep(1 * time.second)
 | 
				
			||||||
		time.sleep(5000000000)
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Finally, we create the image from the container
 | 
						// Finally, we create the image from the container
 | 
				
			||||||
	// As the tag, we use the epoch value
 | 
						// As the tag, we use the epoch value
 | 
				
			||||||
 | 
						// TODO also add the base image's name into the image name to prevent
 | 
				
			||||||
 | 
						// conflicts.
 | 
				
			||||||
	tag := time.sys_mono_now().str()
 | 
						tag := time.sys_mono_now().str()
 | 
				
			||||||
	image := docker.create_image_from_container(id, 'vieter-build', tag) ?
 | 
						image := docker.create_image_from_container(id, 'vieter-build', tag) ?
 | 
				
			||||||
	docker.remove_container(id) ?
 | 
						docker.remove_container(id) ?
 | 
				
			||||||
| 
						 | 
					@ -66,6 +72,55 @@ fn create_build_image(base_image string) ?string {
 | 
				
			||||||
	return image.id
 | 
						return image.id
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// build_repo builds, packages & publishes a given Arch package based on the
 | 
				
			||||||
 | 
					// provided GitRepo. The base image ID should be of an image previously created
 | 
				
			||||||
 | 
					// by create_build_image.
 | 
				
			||||||
 | 
					pub fn build_repo(address string, api_key string, base_image_id string, repo &git.GitRepo) ? {
 | 
				
			||||||
 | 
						build_arch := os.uname().machine
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// TODO what to do with PKGBUILDs that build multiple packages?
 | 
				
			||||||
 | 
						commands := [
 | 
				
			||||||
 | 
							'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
 | 
				
			||||||
 | 
							'cd repo',
 | 
				
			||||||
 | 
							'makepkg --nobuild --nodeps',
 | 
				
			||||||
 | 
							'source PKGBUILD',
 | 
				
			||||||
 | 
							// The build container checks whether the package is already
 | 
				
			||||||
 | 
							// present on the server
 | 
				
			||||||
 | 
							'curl --head --fail $address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
 | 
				
			||||||
 | 
							'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $address/$repo.repo/publish; done',
 | 
				
			||||||
 | 
						]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// We convert the list of commands into a base64 string, which then gets
 | 
				
			||||||
 | 
						// passed to the container as an env var
 | 
				
			||||||
 | 
						cmds_str := base64.encode_str(commands.join('\n'))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						c := docker.NewContainer{
 | 
				
			||||||
 | 
							image: '$base_image_id'
 | 
				
			||||||
 | 
							env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$api_key']
 | 
				
			||||||
 | 
							entrypoint: ['/bin/sh', '-c']
 | 
				
			||||||
 | 
							cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e']
 | 
				
			||||||
 | 
							work_dir: '/build'
 | 
				
			||||||
 | 
							user: 'builder:builder'
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						id := docker.create_container(c) ?
 | 
				
			||||||
 | 
						docker.start_container(id) ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// This loop waits until the container has stopped, so we can remove it after
 | 
				
			||||||
 | 
						for {
 | 
				
			||||||
 | 
							data := docker.inspect_container(id) ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if !data.state.running {
 | 
				
			||||||
 | 
								break
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							time.sleep(1 * time.second)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						docker.remove_container(id) ?
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// build builds every Git repo in the server's list.
 | 
				
			||||||
fn build(conf Config) ? {
 | 
					fn build(conf Config) ? {
 | 
				
			||||||
	build_arch := os.uname().machine
 | 
						build_arch := os.uname().machine
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -85,47 +140,7 @@ fn build(conf Config) ? {
 | 
				
			||||||
	image_id := create_build_image(conf.base_image) ?
 | 
						image_id := create_build_image(conf.base_image) ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for repo in filtered_repos {
 | 
						for repo in filtered_repos {
 | 
				
			||||||
		// TODO what to do with PKGBUILDs that build multiple packages?
 | 
							build_repo(conf.address, conf.api_key, image_id, repo) ?
 | 
				
			||||||
		commands := [
 | 
					 | 
				
			||||||
			'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
 | 
					 | 
				
			||||||
			'cd repo',
 | 
					 | 
				
			||||||
			'makepkg --nobuild --nodeps',
 | 
					 | 
				
			||||||
			'source PKGBUILD',
 | 
					 | 
				
			||||||
			// The build container checks whether the package is already
 | 
					 | 
				
			||||||
			// present on the server
 | 
					 | 
				
			||||||
			'curl --head --fail $conf.address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
 | 
					 | 
				
			||||||
			'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $conf.address/$repo.repo/publish; done',
 | 
					 | 
				
			||||||
		]
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		// We convert the list of commands into a base64 string, which then gets
 | 
					 | 
				
			||||||
		// passed to the container as an env var
 | 
					 | 
				
			||||||
		cmds_str := base64.encode_str(commands.join('\n'))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		c := docker.NewContainer{
 | 
					 | 
				
			||||||
			image: '$image_id'
 | 
					 | 
				
			||||||
			env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$conf.api_key']
 | 
					 | 
				
			||||||
			entrypoint: ['/bin/sh', '-c']
 | 
					 | 
				
			||||||
			cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e']
 | 
					 | 
				
			||||||
			work_dir: '/build'
 | 
					 | 
				
			||||||
			user: 'builder:builder'
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		id := docker.create_container(c) ?
 | 
					 | 
				
			||||||
		docker.start_container(id) ?
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		// This loop waits until the container has stopped, so we can remove it after
 | 
					 | 
				
			||||||
		for {
 | 
					 | 
				
			||||||
			data := docker.inspect_container(id) ?
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			if !data.state.running {
 | 
					 | 
				
			||||||
				break
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			// Wait for 5 seconds
 | 
					 | 
				
			||||||
			time.sleep(5000000000)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		docker.remove_container(id) ?
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Finally, we remove the builder image
 | 
						// Finally, we remove the builder image
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5,11 +5,16 @@ import env
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct Config {
 | 
					struct Config {
 | 
				
			||||||
pub:
 | 
					pub:
 | 
				
			||||||
	log_level  string = 'WARN'
 | 
						log_level               string = 'WARN'
 | 
				
			||||||
	log_file   string = 'vieter.log'
 | 
						log_file                string = 'vieter.log'
 | 
				
			||||||
	api_key    string
 | 
						api_key                 string
 | 
				
			||||||
	address    string
 | 
						address                 string
 | 
				
			||||||
	base_image string = 'archlinux:base-devel'
 | 
						base_image              string = 'archlinux:base-devel'
 | 
				
			||||||
 | 
						max_concurrent_builds   int    = 1
 | 
				
			||||||
 | 
						api_update_frequency    int    = 15
 | 
				
			||||||
 | 
						image_rebuild_frequency int    = 1440
 | 
				
			||||||
 | 
						// Replicates the behavior of the original cron system
 | 
				
			||||||
 | 
						global_schedule string = '0 3'
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// cmd returns the cli module that handles the cron daemon.
 | 
					// cmd returns the cli module that handles the cron daemon.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,18 +1,29 @@
 | 
				
			||||||
module cron
 | 
					module cron
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import git
 | 
					import log
 | 
				
			||||||
import time
 | 
					import cron.daemon
 | 
				
			||||||
 | 
					import cron.expression
 | 
				
			||||||
struct ScheduledBuild {
 | 
					 | 
				
			||||||
	repo      git.GitRepo
 | 
					 | 
				
			||||||
	timestamp time.Time
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
 | 
					 | 
				
			||||||
	return r1.timestamp < r2.timestamp
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
// cron starts a cron daemon & starts periodically scheduling builds.
 | 
					// cron starts a cron daemon & starts periodically scheduling builds.
 | 
				
			||||||
pub fn cron(conf Config) ? {
 | 
					pub fn cron(conf Config) ? {
 | 
				
			||||||
	println('WIP')
 | 
						// Configure logger
 | 
				
			||||||
 | 
						log_level := log.level_from_tag(conf.log_level) or {
 | 
				
			||||||
 | 
							return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mut logger := log.Log{
 | 
				
			||||||
 | 
							level: log_level
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						logger.set_full_logpath(conf.log_file)
 | 
				
			||||||
 | 
						logger.log_to_console_too()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ce := expression.parse_expression(conf.global_schedule) or {
 | 
				
			||||||
 | 
							return error('Error while parsing global cron expression: $err.msg()')
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce,
 | 
				
			||||||
 | 
							conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency) ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						d.run() ?
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,83 @@
 | 
				
			||||||
 | 
					module daemon
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import time
 | 
				
			||||||
 | 
					import sync.stdatomic
 | 
				
			||||||
 | 
					import build
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					const build_empty = 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					const build_running = 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					const build_done = 2
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// clean_finished_builds removes finished builds from the build slots & returns
 | 
				
			||||||
 | 
					// them.
 | 
				
			||||||
 | 
					fn (mut d Daemon) clean_finished_builds() ?[]ScheduledBuild {
 | 
				
			||||||
 | 
						mut out := []ScheduledBuild{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for i in 0 .. d.atomics.len {
 | 
				
			||||||
 | 
							if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
 | 
				
			||||||
 | 
								stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
 | 
				
			||||||
 | 
								out << d.builds[i]
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return out
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// update_builds starts as many builds as possible.
 | 
				
			||||||
 | 
					fn (mut d Daemon) start_new_builds() ? {
 | 
				
			||||||
 | 
						now := time.now()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for d.queue.len() > 0 {
 | 
				
			||||||
 | 
							if d.queue.peek() ?.timestamp < now {
 | 
				
			||||||
 | 
								sb := d.queue.pop() ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								// If this build couldn't be scheduled, no more will be possible.
 | 
				
			||||||
 | 
								if !d.start_build(sb) {
 | 
				
			||||||
 | 
									d.queue.insert(sb)
 | 
				
			||||||
 | 
									break
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								break
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// start_build starts a build for the given ScheduledBuild object.
 | 
				
			||||||
 | 
					fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
 | 
				
			||||||
 | 
						for i in 0 .. d.atomics.len {
 | 
				
			||||||
 | 
							if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty {
 | 
				
			||||||
 | 
								stdatomic.store_u64(&d.atomics[i], daemon.build_running)
 | 
				
			||||||
 | 
								d.builds[i] = sb
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								go d.run_build(i, sb)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								return true
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return false
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// run_build actually starts the build process for a given repo.
 | 
				
			||||||
 | 
					fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? {
 | 
				
			||||||
 | 
						d.linfo('started build: $sb.repo.url $sb.repo.branch')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						build.build_repo(d.address, d.api_key, d.builder_images.last(), &sb.repo) ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// current_build_count returns how many builds are currently running.
 | 
				
			||||||
 | 
					fn (mut d Daemon) current_build_count() int {
 | 
				
			||||||
 | 
						mut res := 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for i in 0 .. d.atomics.len {
 | 
				
			||||||
 | 
							if stdatomic.load_u64(&d.atomics[i]) == daemon.build_running {
 | 
				
			||||||
 | 
								res += 1
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return res
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,223 @@
 | 
				
			||||||
 | 
					module daemon
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import git
 | 
				
			||||||
 | 
					import time
 | 
				
			||||||
 | 
					import log
 | 
				
			||||||
 | 
					import datatypes { MinHeap }
 | 
				
			||||||
 | 
					import cron.expression { CronExpression, parse_expression }
 | 
				
			||||||
 | 
					import math
 | 
				
			||||||
 | 
					import build
 | 
				
			||||||
 | 
					import docker
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct ScheduledBuild {
 | 
				
			||||||
 | 
					pub:
 | 
				
			||||||
 | 
						repo_id   string
 | 
				
			||||||
 | 
						repo      git.GitRepo
 | 
				
			||||||
 | 
						timestamp time.Time
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Overloaded operator for comparing ScheduledBuild objects
 | 
				
			||||||
 | 
					fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool {
 | 
				
			||||||
 | 
						return r1.timestamp < r2.timestamp
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					pub struct Daemon {
 | 
				
			||||||
 | 
					mut:
 | 
				
			||||||
 | 
						address                 string
 | 
				
			||||||
 | 
						api_key                 string
 | 
				
			||||||
 | 
						base_image              string
 | 
				
			||||||
 | 
						builder_images          []string
 | 
				
			||||||
 | 
						global_schedule         CronExpression
 | 
				
			||||||
 | 
						api_update_frequency    int
 | 
				
			||||||
 | 
						image_rebuild_frequency int
 | 
				
			||||||
 | 
						// Repos currently loaded from API.
 | 
				
			||||||
 | 
						repos_map map[string]git.GitRepo
 | 
				
			||||||
 | 
						// At what point to update the list of repositories.
 | 
				
			||||||
 | 
						api_update_timestamp  time.Time
 | 
				
			||||||
 | 
						image_build_timestamp time.Time
 | 
				
			||||||
 | 
						queue                 MinHeap<ScheduledBuild>
 | 
				
			||||||
 | 
						// Which builds are currently running
 | 
				
			||||||
 | 
						builds []ScheduledBuild
 | 
				
			||||||
 | 
						// Atomic variables used to detect when a build has finished; length is the
 | 
				
			||||||
 | 
						// same as builds
 | 
				
			||||||
 | 
						atomics []u64
 | 
				
			||||||
 | 
						logger  shared log.Log
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// init_daemon initializes a new Daemon object. It renews the repositories &
 | 
				
			||||||
 | 
					// populates the build queue for the first time.
 | 
				
			||||||
 | 
					pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) ?Daemon {
 | 
				
			||||||
 | 
						mut d := Daemon{
 | 
				
			||||||
 | 
							address: address
 | 
				
			||||||
 | 
							api_key: api_key
 | 
				
			||||||
 | 
							base_image: base_image
 | 
				
			||||||
 | 
							global_schedule: global_schedule
 | 
				
			||||||
 | 
							api_update_frequency: api_update_frequency
 | 
				
			||||||
 | 
							image_rebuild_frequency: image_rebuild_frequency
 | 
				
			||||||
 | 
							atomics: []u64{len: max_concurrent_builds}
 | 
				
			||||||
 | 
							builds: []ScheduledBuild{len: max_concurrent_builds}
 | 
				
			||||||
 | 
							logger: logger
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Initialize the repos & queue
 | 
				
			||||||
 | 
						d.renew_repos() ?
 | 
				
			||||||
 | 
						d.renew_queue() ?
 | 
				
			||||||
 | 
						d.rebuild_base_image() ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return d
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// run starts the actual daemon process. It runs builds when possible &
 | 
				
			||||||
 | 
					// periodically refreshes the list of repositories to ensure we stay in sync.
 | 
				
			||||||
 | 
					pub fn (mut d Daemon) run() ? {
 | 
				
			||||||
 | 
						for {
 | 
				
			||||||
 | 
							finished_builds := d.clean_finished_builds() ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Update the API's contents if needed & renew the queue
 | 
				
			||||||
 | 
							if time.now() >= d.api_update_timestamp {
 | 
				
			||||||
 | 
								d.renew_repos() ?
 | 
				
			||||||
 | 
								d.renew_queue() ?
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							// The finished builds should only be rescheduled if the API contents
 | 
				
			||||||
 | 
							// haven't been renewed.
 | 
				
			||||||
 | 
							else {
 | 
				
			||||||
 | 
								for sb in finished_builds {
 | 
				
			||||||
 | 
									d.schedule_build(sb.repo_id, sb.repo) ?
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// TODO remove old builder images.
 | 
				
			||||||
 | 
							// This issue is less trivial than it sounds, because a build could
 | 
				
			||||||
 | 
							// still be running when the image has to be rebuilt. That would
 | 
				
			||||||
 | 
							// prevent the image from being removed. Therefore, we will need to
 | 
				
			||||||
 | 
							// keep track of a list or something & remove an image once we have
 | 
				
			||||||
 | 
							// made sure it isn't being used anymore.
 | 
				
			||||||
 | 
							if time.now() >= d.image_build_timestamp {
 | 
				
			||||||
 | 
								d.rebuild_base_image() ?
 | 
				
			||||||
 | 
								// In theory, executing this function here allows an old builder
 | 
				
			||||||
 | 
								// image to exist for at most image_rebuild_frequency minutes.
 | 
				
			||||||
 | 
								d.clean_old_base_images()
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Schedules new builds when possible
 | 
				
			||||||
 | 
							d.start_new_builds() ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// If there are builds currently running, the daemon should refresh
 | 
				
			||||||
 | 
							// every second to clean up any finished builds & start new ones.
 | 
				
			||||||
 | 
							mut delay := time.Duration(1 * time.second)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Sleep either until we have to refresh the repos or when the next
 | 
				
			||||||
 | 
							// build has to start, with a minimum of 1 second.
 | 
				
			||||||
 | 
							if d.current_build_count() == 0 {
 | 
				
			||||||
 | 
								now := time.now()
 | 
				
			||||||
 | 
								delay = d.api_update_timestamp - now
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if d.queue.len() > 0 {
 | 
				
			||||||
 | 
									time_until_next_job := d.queue.peek() ?.timestamp - now
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
									delay = math.min(delay, time_until_next_job)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// We sleep for at least one second. This is to prevent the program
 | 
				
			||||||
 | 
							// from looping agressively when a cronjob can be scheduled, but
 | 
				
			||||||
 | 
							// there's no spots free for it to be started.
 | 
				
			||||||
 | 
							delay = math.max(delay, 1 * time.second)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							d.ldebug('Sleeping for ${delay}...')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							time.sleep(delay)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// schedule_build adds the next occurence of the given repo build to the queue.
 | 
				
			||||||
 | 
					fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? {
 | 
				
			||||||
 | 
						ce := if repo.schedule != '' {
 | 
				
			||||||
 | 
							parse_expression(repo.schedule) or {
 | 
				
			||||||
 | 
								// TODO This shouldn't return an error if the expression is empty.
 | 
				
			||||||
 | 
								d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								d.global_schedule
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							d.global_schedule
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// A repo that can't be scheduled will just be skipped for now
 | 
				
			||||||
 | 
						timestamp := ce.next_from_now() ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						d.queue.insert(ScheduledBuild{
 | 
				
			||||||
 | 
							repo_id: repo_id
 | 
				
			||||||
 | 
							repo: repo
 | 
				
			||||||
 | 
							timestamp: timestamp
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// renew_repos requests the newest list of Git repos from the server & replaces
 | 
				
			||||||
 | 
					// the old one.
 | 
				
			||||||
 | 
					fn (mut d Daemon) renew_repos() ? {
 | 
				
			||||||
 | 
						d.linfo('Renewing repos...')
 | 
				
			||||||
 | 
						mut new_repos := git.get_repos(d.address, d.api_key) ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						d.repos_map = new_repos.move()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// renew_queue replaces the old queue with a new one that reflects the newest
 | 
				
			||||||
 | 
					// values in repos_map.
 | 
				
			||||||
 | 
					fn (mut d Daemon) renew_queue() ? {
 | 
				
			||||||
 | 
						d.linfo('Renewing queue...')
 | 
				
			||||||
 | 
						mut new_queue := MinHeap<ScheduledBuild>{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Move any jobs that should have already started from the old queue onto
 | 
				
			||||||
 | 
						// the new one
 | 
				
			||||||
 | 
						now := time.now()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// For some reason, using
 | 
				
			||||||
 | 
						// ```v
 | 
				
			||||||
 | 
						// for d.queue.len() > 0 && d.queue.peek() ?.timestamp < now {
 | 
				
			||||||
 | 
						//```
 | 
				
			||||||
 | 
						// here causes the function to prematurely just exit, without any errors or anything, very weird
 | 
				
			||||||
 | 
						// https://github.com/vlang/v/issues/14042
 | 
				
			||||||
 | 
						for d.queue.len() > 0 {
 | 
				
			||||||
 | 
							if d.queue.peek() ?.timestamp < now {
 | 
				
			||||||
 | 
								new_queue.insert(d.queue.pop() ?)
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								break
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						d.queue = new_queue
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// For each repository in repos_map, parse their cron expression (or use
 | 
				
			||||||
 | 
						// the default one if not present) & add them to the queue
 | 
				
			||||||
 | 
						for id, repo in d.repos_map {
 | 
				
			||||||
 | 
							d.schedule_build(id, repo) ?
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// rebuild_base_image recreates the builder image.
 | 
				
			||||||
 | 
					fn (mut d Daemon) rebuild_base_image() ? {
 | 
				
			||||||
 | 
						d.linfo('Rebuilding builder image....')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						d.builder_images << build.create_build_image(d.base_image) ?
 | 
				
			||||||
 | 
						d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// clean_old_base_images tries to remove any old but still present builder
 | 
				
			||||||
 | 
					// images.
 | 
				
			||||||
 | 
					fn (mut d Daemon) clean_old_base_images() {
 | 
				
			||||||
 | 
						mut i := 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for i < d.builder_images.len - 1 {
 | 
				
			||||||
 | 
							// For each builder image, we try to remove it by calling the Docker
 | 
				
			||||||
 | 
							// API. If the function returns an error or false, that means the image
 | 
				
			||||||
 | 
							// wasn't deleted. Therefore, we move the index over. If the function
 | 
				
			||||||
 | 
							// returns true, the array's length has decreased by one so we don't
 | 
				
			||||||
 | 
							// move the index.
 | 
				
			||||||
 | 
							if !docker.remove_image(d.builder_images[i]) or { false } {
 | 
				
			||||||
 | 
								i += 1
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,35 @@
 | 
				
			||||||
 | 
					module daemon
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import log
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// log reate a log message with the given level
 | 
				
			||||||
 | 
					pub fn (mut d Daemon) log(msg &string, level log.Level) {
 | 
				
			||||||
 | 
						lock d.logger {
 | 
				
			||||||
 | 
							d.logger.send_output(msg, level)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// lfatal create a log message with the fatal level
 | 
				
			||||||
 | 
					pub fn (mut d Daemon) lfatal(msg &string) {
 | 
				
			||||||
 | 
						d.log(msg, log.Level.fatal)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// lerror create a log message with the error level
 | 
				
			||||||
 | 
					pub fn (mut d Daemon) lerror(msg &string) {
 | 
				
			||||||
 | 
						d.log(msg, log.Level.error)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// lwarn create a log message with the warn level
 | 
				
			||||||
 | 
					pub fn (mut d Daemon) lwarn(msg &string) {
 | 
				
			||||||
 | 
						d.log(msg, log.Level.warn)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// linfo create a log message with the info level
 | 
				
			||||||
 | 
					pub fn (mut d Daemon) linfo(msg &string) {
 | 
				
			||||||
 | 
						d.log(msg, log.Level.info)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ldebug create a log message with the debug level
 | 
				
			||||||
 | 
					pub fn (mut d Daemon) ldebug(msg &string) {
 | 
				
			||||||
 | 
						d.log(msg, log.Level.debug)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1,8 +1,8 @@
 | 
				
			||||||
module cron
 | 
					module expression
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import time
 | 
					import time
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct CronExpression {
 | 
					pub struct CronExpression {
 | 
				
			||||||
	minutes []int
 | 
						minutes []int
 | 
				
			||||||
	hours   []int
 | 
						hours   []int
 | 
				
			||||||
	days    []int
 | 
						days    []int
 | 
				
			||||||
| 
						 | 
					@ -65,6 +65,7 @@ pub fn (ce &CronExpression) next(ref time.Time) ?time.Time {
 | 
				
			||||||
	if minute_index == ce.minutes.len && hour_index < ce.hours.len {
 | 
						if minute_index == ce.minutes.len && hour_index < ce.hours.len {
 | 
				
			||||||
		hour_index += 1
 | 
							hour_index += 1
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if hour_index == ce.hours.len && day_index < ce.days.len {
 | 
						if hour_index == ce.hours.len && day_index < ce.days.len {
 | 
				
			||||||
		day_index += 1
 | 
							day_index += 1
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -114,7 +115,9 @@ pub fn (ce &CronExpression) next(ref time.Time) ?time.Time {
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fn (ce &CronExpression) next_from_now() ?time.Time {
 | 
					// next_from_now returns the result of ce.next(ref) where ref is the result of
 | 
				
			||||||
 | 
					// time.now().
 | 
				
			||||||
 | 
					pub fn (ce &CronExpression) next_from_now() ?time.Time {
 | 
				
			||||||
	return ce.next(time.now())
 | 
						return ce.next(time.now())
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -195,6 +198,8 @@ fn parse_range(s string, min int, max int, mut bitv []bool) ? {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// bitv_to_ints converts a bit vector into an array containing the
 | 
				
			||||||
 | 
					// corresponding values.
 | 
				
			||||||
fn bitv_to_ints(bitv []bool, min int) []int {
 | 
					fn bitv_to_ints(bitv []bool, min int) []int {
 | 
				
			||||||
	mut out := []int{}
 | 
						mut out := []int{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -207,6 +212,8 @@ fn bitv_to_ints(bitv []bool, min int) []int {
 | 
				
			||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// parse_part parses a given part of a cron expression & returns the
 | 
				
			||||||
 | 
					// corresponding array of ints.
 | 
				
			||||||
fn parse_part(s string, min int, max int) ?[]int {
 | 
					fn parse_part(s string, min int, max int) ?[]int {
 | 
				
			||||||
	mut bitv := []bool{len: max - min + 1, init: false}
 | 
						mut bitv := []bool{len: max - min + 1, init: false}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -219,7 +226,7 @@ fn parse_part(s string, min int, max int) ?[]int {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// parse_expression parses an entire cron expression string into a
 | 
					// parse_expression parses an entire cron expression string into a
 | 
				
			||||||
// CronExpression object, if possible.
 | 
					// CronExpression object, if possible.
 | 
				
			||||||
fn parse_expression(exp string) ?CronExpression {
 | 
					pub fn parse_expression(exp string) ?CronExpression {
 | 
				
			||||||
	// The filter allows for multiple spaces between parts
 | 
						// The filter allows for multiple spaces between parts
 | 
				
			||||||
	mut parts := exp.split(' ').filter(it != '')
 | 
						mut parts := exp.split(' ').filter(it != '')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -241,7 +248,7 @@ fn parse_expression(exp string) ?CronExpression {
 | 
				
			||||||
	// This for loop allows us to more clearly propagate the error to the user.
 | 
						// This for loop allows us to more clearly propagate the error to the user.
 | 
				
			||||||
	for i, min in mins {
 | 
						for i, min in mins {
 | 
				
			||||||
		part_results << parse_part(parts[i], min, maxs[i]) or {
 | 
							part_results << parse_part(parts[i], min, maxs[i]) or {
 | 
				
			||||||
			return error('An error occurred with part $i: $err.msg')
 | 
								return error('An error occurred with part $i: $err.msg()')
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,4 +1,4 @@
 | 
				
			||||||
module cron
 | 
					module expression
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// parse_range_error returns the returned error message. If the result is '',
 | 
					// parse_range_error returns the returned error message. If the result is '',
 | 
				
			||||||
// that means the function didn't error.
 | 
					// that means the function didn't error.
 | 
				
			||||||
| 
						 | 
					@ -1,4 +1,4 @@
 | 
				
			||||||
module cron
 | 
					module expression
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import time { parse }
 | 
					import time { parse }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -9,6 +9,8 @@ const socket = '/var/run/docker.sock'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const buf_len = 1024
 | 
					const buf_len = 1024
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// send writes a request to the Docker socket, waits for a response & returns
 | 
				
			||||||
 | 
					// it.
 | 
				
			||||||
fn send(req &string) ?http.Response {
 | 
					fn send(req &string) ?http.Response {
 | 
				
			||||||
	// Open a connection to the socket
 | 
						// Open a connection to the socket
 | 
				
			||||||
	mut s := unix.connect_stream(docker.socket) or {
 | 
						mut s := unix.connect_stream(docker.socket) or {
 | 
				
			||||||
| 
						 | 
					@ -28,8 +30,8 @@ fn send(req &string) ?http.Response {
 | 
				
			||||||
	s.wait_for_write() ?
 | 
						s.wait_for_write() ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mut c := 0
 | 
						mut c := 0
 | 
				
			||||||
	mut buf := []byte{len: docker.buf_len}
 | 
						mut buf := []u8{len: docker.buf_len}
 | 
				
			||||||
	mut res := []byte{}
 | 
						mut res := []u8{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for {
 | 
						for {
 | 
				
			||||||
		c = s.read(mut buf) or { return error('Failed to read data from socket ${docker.socket}.') }
 | 
							c = s.read(mut buf) or { return error('Failed to read data from socket ${docker.socket}.') }
 | 
				
			||||||
| 
						 | 
					@ -52,7 +54,7 @@ fn send(req &string) ?http.Response {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// We loop until we've encountered the end of the chunked response
 | 
						// We loop until we've encountered the end of the chunked response
 | 
				
			||||||
	// A chunked HTTP response always ends with '0\r\n\r\n'.
 | 
						// A chunked HTTP response always ends with '0\r\n\r\n'.
 | 
				
			||||||
	for res.len < 5 || res#[-5..] != [byte(`0`), `\r`, `\n`, `\r`, `\n`] {
 | 
						for res.len < 5 || res#[-5..] != [u8(`0`), `\r`, `\n`, `\r`, `\n`] {
 | 
				
			||||||
		// Wait for the server to respond
 | 
							// Wait for the server to respond
 | 
				
			||||||
		s.wait_for_write() ?
 | 
							s.wait_for_write() ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -72,12 +74,14 @@ fn send(req &string) ?http.Response {
 | 
				
			||||||
	return http.parse_response(res.bytestr())
 | 
						return http.parse_response(res.bytestr())
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// request_with_body sends a request to the Docker socket with the given body.
 | 
				
			||||||
fn request_with_body(method string, url urllib.URL, content_type string, body string) ?http.Response {
 | 
					fn request_with_body(method string, url urllib.URL, content_type string, body string) ?http.Response {
 | 
				
			||||||
	req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n'
 | 
						req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return send(req)
 | 
						return send(req)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// request sends a request to the Docker socket with an empty body.
 | 
				
			||||||
fn request(method string, url urllib.URL) ?http.Response {
 | 
					fn request(method string, url urllib.URL) ?http.Response {
 | 
				
			||||||
	req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n'
 | 
						req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -36,7 +36,7 @@ fn get_env_var(field_name string) ?string {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Otherwise, we process the file
 | 
						// Otherwise, we process the file
 | 
				
			||||||
	return os.read_file(env_file) or {
 | 
						return os.read_file(env_file) or {
 | 
				
			||||||
		error('Failed to read file defined in $env_file_name: ${err.msg}.')
 | 
							error('Failed to read file defined in $env_file_name: ${err.msg()}.')
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -55,27 +55,41 @@ pub fn load<T>(path string) ?T {
 | 
				
			||||||
		$for field in T.fields {
 | 
							$for field in T.fields {
 | 
				
			||||||
			s := doc.value(field.name)
 | 
								s := doc.value(field.name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			// We currently only support strings
 | 
								if s !is toml.Null {
 | 
				
			||||||
			if s.type_name() == 'string' {
 | 
									$if field.typ is string {
 | 
				
			||||||
				res.$(field.name) = s.string()
 | 
										res.$(field.name) = s.string()
 | 
				
			||||||
 | 
									} $else $if field.typ is int {
 | 
				
			||||||
 | 
										res.$(field.name) = s.int()
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	$for field in T.fields {
 | 
						$for field in T.fields {
 | 
				
			||||||
		$if field.typ is string {
 | 
							env_value := get_env_var(field.name) ?
 | 
				
			||||||
			env_value := get_env_var(field.name) ?
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			// The value of the env var will always be chosen over the config
 | 
							// The value of an env var will always take precedence over the toml
 | 
				
			||||||
			// file
 | 
							// file.
 | 
				
			||||||
			if env_value != '' {
 | 
							if env_value != '' {
 | 
				
			||||||
 | 
								$if field.typ is string {
 | 
				
			||||||
				res.$(field.name) = env_value
 | 
									res.$(field.name) = env_value
 | 
				
			||||||
 | 
								} $else $if field.typ is int {
 | 
				
			||||||
 | 
									res.$(field.name) = env_value.int()
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			// If there's no value from the toml file either, we try to find a
 | 
							}
 | 
				
			||||||
			// default value
 | 
					
 | 
				
			||||||
			else if res.$(field.name) == '' {
 | 
							// Now, we check whether a value is present. If there isn't, that means
 | 
				
			||||||
				return error("Missing config variable '$field.name' with no provided default. Either add it to the config file or provide it using an environment variable.")
 | 
							// it isn't in the config file, nor is there a default or an env var.
 | 
				
			||||||
			}
 | 
							mut has_value := false
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							$if field.typ is string {
 | 
				
			||||||
 | 
								has_value = res.$(field.name) != ''
 | 
				
			||||||
 | 
							} $else $if field.typ is int {
 | 
				
			||||||
 | 
								has_value = res.$(field.name) != 0
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if !has_value {
 | 
				
			||||||
 | 
								return error("Missing config variable '$field.name' with no provided default. Either add it to the config file or provide it using an environment variable.")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return res
 | 
						return res
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -96,6 +96,8 @@ pub fn cmd() cli.Command {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// get_repo_id_by_prefix tries to find the repo with the given prefix in its
 | 
				
			||||||
 | 
					// ID. If multiple or none are found, an error is raised.
 | 
				
			||||||
fn get_repo_id_by_prefix(conf Config, id_prefix string) ?string {
 | 
					fn get_repo_id_by_prefix(conf Config, id_prefix string) ?string {
 | 
				
			||||||
	repos := get_repos(conf.address, conf.api_key) ?
 | 
						repos := get_repos(conf.address, conf.api_key) ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -118,6 +120,7 @@ fn get_repo_id_by_prefix(conf Config, id_prefix string) ?string {
 | 
				
			||||||
	return res[0]
 | 
						return res[0]
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// list prints out a list of all repositories.
 | 
				
			||||||
fn list(conf Config) ? {
 | 
					fn list(conf Config) ? {
 | 
				
			||||||
	repos := get_repos(conf.address, conf.api_key) ?
 | 
						repos := get_repos(conf.address, conf.api_key) ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -126,12 +129,14 @@ fn list(conf Config) ? {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// add adds a new repository to the server's list.
 | 
				
			||||||
fn add(conf Config, url string, branch string, repo string, arch []string) ? {
 | 
					fn add(conf Config, url string, branch string, repo string, arch []string) ? {
 | 
				
			||||||
	res := add_repo(conf.address, conf.api_key, url, branch, repo, arch) ?
 | 
						res := add_repo(conf.address, conf.api_key, url, branch, repo, arch) ?
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	println(res.message)
 | 
						println(res.message)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// remove removes a repository from the server's list.
 | 
				
			||||||
fn remove(conf Config, id_prefix string) ? {
 | 
					fn remove(conf Config, id_prefix string) ? {
 | 
				
			||||||
	id := get_repo_id_by_prefix(conf, id_prefix) ?
 | 
						id := get_repo_id_by_prefix(conf, id_prefix) ?
 | 
				
			||||||
	res := remove_repo(conf.address, conf.api_key, id) ?
 | 
						res := remove_repo(conf.address, conf.api_key, id) ?
 | 
				
			||||||
| 
						 | 
					@ -139,6 +144,7 @@ fn remove(conf Config, id_prefix string) ? {
 | 
				
			||||||
	println(res.message)
 | 
						println(res.message)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// patch patches a given repository with the provided params.
 | 
				
			||||||
fn patch(conf Config, id_prefix string, params map[string]string) ? {
 | 
					fn patch(conf Config, id_prefix string, params map[string]string) ? {
 | 
				
			||||||
	id := get_repo_id_by_prefix(conf, id_prefix) ?
 | 
						id := get_repo_id_by_prefix(conf, id_prefix) ?
 | 
				
			||||||
	res := patch_repo(conf.address, conf.api_key, id, params) ?
 | 
						res := patch_repo(conf.address, conf.api_key, id, params) ?
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4,6 +4,9 @@ import json
 | 
				
			||||||
import response { Response }
 | 
					import response { Response }
 | 
				
			||||||
import net.http
 | 
					import net.http
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// send_request<T> is a convenience method for sending requests to the repos
 | 
				
			||||||
 | 
					// API. It mostly does string manipulation to create a query string containing
 | 
				
			||||||
 | 
					// the provided params.
 | 
				
			||||||
fn send_request<T>(method http.Method, address string, url string, api_key string, params map[string]string) ?Response<T> {
 | 
					fn send_request<T>(method http.Method, address string, url string, api_key string, params map[string]string) ?Response<T> {
 | 
				
			||||||
	mut full_url := '$address$url'
 | 
						mut full_url := '$address$url'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -14,6 +14,8 @@ pub mut:
 | 
				
			||||||
	arch []string
 | 
						arch []string
 | 
				
			||||||
	// Which repo the builder should publish packages to
 | 
						// Which repo the builder should publish packages to
 | 
				
			||||||
	repo string
 | 
						repo string
 | 
				
			||||||
 | 
						// Cron schedule describing how frequently to build the repo.
 | 
				
			||||||
 | 
						schedule string [optional]
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// patch_from_params patches a GitRepo from a map[string]string, usually
 | 
					// patch_from_params patches a GitRepo from a map[string]string, usually
 | 
				
			||||||
| 
						 | 
					@ -72,7 +74,7 @@ pub fn repo_from_params(params map[string]string) ?GitRepo {
 | 
				
			||||||
	// If we're creating a new GitRepo, we want all fields to be present before
 | 
						// If we're creating a new GitRepo, we want all fields to be present before
 | 
				
			||||||
	// "patching".
 | 
						// "patching".
 | 
				
			||||||
	$for field in GitRepo.fields {
 | 
						$for field in GitRepo.fields {
 | 
				
			||||||
		if field.name !in params {
 | 
							if field.name !in params && !field.attrs.contains('optional') {
 | 
				
			||||||
			return error('Missing parameter: ${field.name}.')
 | 
								return error('Missing parameter: ${field.name}.')
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -175,6 +175,7 @@ pub fn read_pkg_archive(pkg_path string) ?Pkg {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// format_entry returns a string properly formatted to be added to a desc file.
 | 
				
			||||||
fn format_entry(key string, value string) string {
 | 
					fn format_entry(key string, value string) string {
 | 
				
			||||||
	return '\n%$key%\n$value\n'
 | 
						return '\n%$key%\n$value\n'
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -30,11 +30,11 @@ pub:
 | 
				
			||||||
// new creates a new RepoGroupManager & creates the directories as needed
 | 
					// new creates a new RepoGroupManager & creates the directories as needed
 | 
				
			||||||
pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager {
 | 
					pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager {
 | 
				
			||||||
	if !os.is_dir(repos_dir) {
 | 
						if !os.is_dir(repos_dir) {
 | 
				
			||||||
		os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg') }
 | 
							os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') }
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !os.is_dir(pkg_dir) {
 | 
						if !os.is_dir(pkg_dir) {
 | 
				
			||||||
		os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg') }
 | 
							os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg()') }
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return RepoGroupManager{
 | 
						return RepoGroupManager{
 | 
				
			||||||
| 
						 | 
					@ -50,7 +50,7 @@ pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupMana
 | 
				
			||||||
// the right subdirectories in r.pkg_dir if it was successfully added.
 | 
					// the right subdirectories in r.pkg_dir if it was successfully added.
 | 
				
			||||||
pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult {
 | 
					pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult {
 | 
				
			||||||
	pkg := package.read_pkg_archive(pkg_path) or {
 | 
						pkg := package.read_pkg_archive(pkg_path) or {
 | 
				
			||||||
		return error('Failed to read package file: $err.msg')
 | 
							return error('Failed to read package file: $err.msg()')
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	added := r.add_pkg_in_repo(repo, pkg) ?
 | 
						added := r.add_pkg_in_repo(repo, pkg) ?
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2,6 +2,8 @@ module repo
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import os
 | 
					import os
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// archive_add_entry writes a file to an archive, given its path & inner path
 | 
				
			||||||
 | 
					// inside the archive.
 | 
				
			||||||
fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &string, inner_path &string) {
 | 
					fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &string, inner_path &string) {
 | 
				
			||||||
	st := C.stat{}
 | 
						st := C.stat{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -19,7 +21,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Write the file to the archive
 | 
						// Write the file to the archive
 | 
				
			||||||
	buf := [8192]byte{}
 | 
						buf := [8192]u8{}
 | 
				
			||||||
	mut len := C.read(fd, &buf, sizeof(buf))
 | 
						mut len := C.read(fd, &buf, sizeof(buf))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for len > 0 {
 | 
						for len > 0 {
 | 
				
			||||||
| 
						 | 
					@ -29,7 +31,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Re-generate the repo archive files
 | 
					// sync regenerates the repository archive files.
 | 
				
			||||||
fn (r &RepoGroupManager) sync(repo string, arch string) ? {
 | 
					fn (r &RepoGroupManager) sync(repo string, arch string) ? {
 | 
				
			||||||
	subrepo_path := os.join_path(r.repos_dir, repo, arch)
 | 
						subrepo_path := os.join_path(r.repos_dir, repo, arch)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2,6 +2,7 @@ module server
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import net.http
 | 
					import net.http
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// is_authorized checks whether the provided API key is correct.
 | 
				
			||||||
fn (mut app App) is_authorized() bool {
 | 
					fn (mut app App) is_authorized() bool {
 | 
				
			||||||
	x_header := app.req.header.get_custom('X-Api-Key', http.HeaderQueryConfig{ exact: true }) or {
 | 
						x_header := app.req.header.get_custom('X-Api-Key', http.HeaderQueryConfig{ exact: true }) or {
 | 
				
			||||||
		return false
 | 
							return false
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -8,6 +8,7 @@ import response { new_data_response, new_response }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const repos_file = 'repos.json'
 | 
					const repos_file = 'repos.json'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// get_repos returns the current list of repos.
 | 
				
			||||||
['/api/repos'; get]
 | 
					['/api/repos'; get]
 | 
				
			||||||
fn (mut app App) get_repos() web.Result {
 | 
					fn (mut app App) get_repos() web.Result {
 | 
				
			||||||
	if !app.is_authorized() {
 | 
						if !app.is_authorized() {
 | 
				
			||||||
| 
						 | 
					@ -16,7 +17,7 @@ fn (mut app App) get_repos() web.Result {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	repos := rlock app.git_mutex {
 | 
						repos := rlock app.git_mutex {
 | 
				
			||||||
		git.read_repos(app.conf.repos_file) or {
 | 
							git.read_repos(app.conf.repos_file) or {
 | 
				
			||||||
			app.lerror('Failed to read repos file: $err.msg')
 | 
								app.lerror('Failed to read repos file: $err.msg()')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			return app.status(http.Status.internal_server_error)
 | 
								return app.status(http.Status.internal_server_error)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -25,6 +26,7 @@ fn (mut app App) get_repos() web.Result {
 | 
				
			||||||
	return app.json(http.Status.ok, new_data_response(repos))
 | 
						return app.json(http.Status.ok, new_data_response(repos))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// get_single_repo returns the information for a single repo.
 | 
				
			||||||
['/api/repos/:id'; get]
 | 
					['/api/repos/:id'; get]
 | 
				
			||||||
fn (mut app App) get_single_repo(id string) web.Result {
 | 
					fn (mut app App) get_single_repo(id string) web.Result {
 | 
				
			||||||
	if !app.is_authorized() {
 | 
						if !app.is_authorized() {
 | 
				
			||||||
| 
						 | 
					@ -48,6 +50,7 @@ fn (mut app App) get_single_repo(id string) web.Result {
 | 
				
			||||||
	return app.json(http.Status.ok, new_data_response(repo))
 | 
						return app.json(http.Status.ok, new_data_response(repo))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// post_repo creates a new repo from the provided query string.
 | 
				
			||||||
['/api/repos'; post]
 | 
					['/api/repos'; post]
 | 
				
			||||||
fn (mut app App) post_repo() web.Result {
 | 
					fn (mut app App) post_repo() web.Result {
 | 
				
			||||||
	if !app.is_authorized() {
 | 
						if !app.is_authorized() {
 | 
				
			||||||
| 
						 | 
					@ -55,7 +58,7 @@ fn (mut app App) post_repo() web.Result {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	new_repo := git.repo_from_params(app.query) or {
 | 
						new_repo := git.repo_from_params(app.query) or {
 | 
				
			||||||
		return app.json(http.Status.bad_request, new_response(err.msg))
 | 
							return app.json(http.Status.bad_request, new_response(err.msg()))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	id := rand.uuid_v4()
 | 
						id := rand.uuid_v4()
 | 
				
			||||||
| 
						 | 
					@ -86,6 +89,7 @@ fn (mut app App) post_repo() web.Result {
 | 
				
			||||||
	return app.json(http.Status.ok, new_response('Repo added successfully.'))
 | 
						return app.json(http.Status.ok, new_response('Repo added successfully.'))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// delete_repo removes a given repo from the server's list.
 | 
				
			||||||
['/api/repos/:id'; delete]
 | 
					['/api/repos/:id'; delete]
 | 
				
			||||||
fn (mut app App) delete_repo(id string) web.Result {
 | 
					fn (mut app App) delete_repo(id string) web.Result {
 | 
				
			||||||
	if !app.is_authorized() {
 | 
						if !app.is_authorized() {
 | 
				
			||||||
| 
						 | 
					@ -113,6 +117,7 @@ fn (mut app App) delete_repo(id string) web.Result {
 | 
				
			||||||
	return app.json(http.Status.ok, new_response('Repo removed successfully.'))
 | 
						return app.json(http.Status.ok, new_response('Repo removed successfully.'))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// patch_repo updates a repo's data with the given query params.
 | 
				
			||||||
['/api/repos/:id'; patch]
 | 
					['/api/repos/:id'; patch]
 | 
				
			||||||
fn (mut app App) patch_repo(id string) web.Result {
 | 
					fn (mut app App) patch_repo(id string) web.Result {
 | 
				
			||||||
	if !app.is_authorized() {
 | 
						if !app.is_authorized() {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -16,6 +16,9 @@ pub fn (mut app App) healthcheck() web.Result {
 | 
				
			||||||
	return app.json(http.Status.ok, new_response('Healthy.'))
 | 
						return app.json(http.Status.ok, new_response('Healthy.'))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// get_repo_file handles all Pacman-related routes. It returns both the
 | 
				
			||||||
 | 
					// repository's archives, but also package archives or the contents of a
 | 
				
			||||||
 | 
					// package's desc file.
 | 
				
			||||||
['/:repo/:arch/:filename'; get; head]
 | 
					['/:repo/:arch/:filename'; get; head]
 | 
				
			||||||
fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Result {
 | 
					fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Result {
 | 
				
			||||||
	mut full_path := ''
 | 
						mut full_path := ''
 | 
				
			||||||
| 
						 | 
					@ -54,6 +57,7 @@ fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Re
 | 
				
			||||||
	return app.file(full_path)
 | 
						return app.file(full_path)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// put_package handles publishing a package to a repository.
 | 
				
			||||||
['/:repo/publish'; post]
 | 
					['/:repo/publish'; post]
 | 
				
			||||||
fn (mut app App) put_package(repo string) web.Result {
 | 
					fn (mut app App) put_package(repo string) web.Result {
 | 
				
			||||||
	if !app.is_authorized() {
 | 
						if !app.is_authorized() {
 | 
				
			||||||
| 
						 | 
					@ -87,15 +91,15 @@ fn (mut app App) put_package(repo string) web.Result {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	res := app.repo.add_pkg_from_path(repo, pkg_path) or {
 | 
						res := app.repo.add_pkg_from_path(repo, pkg_path) or {
 | 
				
			||||||
		app.lerror('Error while adding package: $err.msg')
 | 
							app.lerror('Error while adding package: $err.msg()')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") }
 | 
							os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg()") }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		return app.json(http.Status.internal_server_error, new_response('Failed to add package.'))
 | 
							return app.json(http.Status.internal_server_error, new_response('Failed to add package.'))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !res.added {
 | 
						if !res.added {
 | 
				
			||||||
		os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") }
 | 
							os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg()") }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		app.lwarn("Duplicate package '$res.pkg.full_name()' in repo '$repo'.")
 | 
							app.lwarn("Duplicate package '$res.pkg.full_name()' in repo '$repo'.")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,7 +45,7 @@ pub fn server(conf Config) ? {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// This also creates the directories if needed
 | 
						// This also creates the directories if needed
 | 
				
			||||||
	repo := repo.new(conf.repos_dir, conf.pkg_dir, conf.default_arch) or {
 | 
						repo := repo.new(conf.repos_dir, conf.pkg_dir, conf.default_arch) or {
 | 
				
			||||||
		logger.error(err.msg)
 | 
							logger.error(err.msg())
 | 
				
			||||||
		exit(1)
 | 
							exit(1)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -30,7 +30,7 @@ pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? {
 | 
				
			||||||
		file.close()
 | 
							file.close()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mut buf := []byte{len: util.reader_buf_size}
 | 
						mut buf := []u8{len: util.reader_buf_size}
 | 
				
			||||||
	mut bytes_left := length
 | 
						mut bytes_left := length
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Repeat as long as the stream still has data
 | 
						// Repeat as long as the stream still has data
 | 
				
			||||||
| 
						 | 
					@ -60,7 +60,7 @@ pub fn hash_file(path &string) ?(string, string) {
 | 
				
			||||||
	mut sha256sum := sha256.new()
 | 
						mut sha256sum := sha256.new()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	buf_size := int(1_000_000)
 | 
						buf_size := int(1_000_000)
 | 
				
			||||||
	mut buf := []byte{len: buf_size}
 | 
						mut buf := []u8{len: buf_size}
 | 
				
			||||||
	mut bytes_left := os.file_size(path)
 | 
						mut bytes_left := os.file_size(path)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for bytes_left > 0 {
 | 
						for bytes_left > 0 {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -47,6 +47,7 @@ fn parse_attrs(name string, attrs []string) ?([]http.Method, string) {
 | 
				
			||||||
	return methods, path.to_lower()
 | 
						return methods, path.to_lower()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Extracts query parameters from a URL.
 | 
				
			||||||
fn parse_query_from_url(url urllib.URL) map[string]string {
 | 
					fn parse_query_from_url(url urllib.URL) map[string]string {
 | 
				
			||||||
	mut query := map[string]string{}
 | 
						mut query := map[string]string{}
 | 
				
			||||||
	for v in url.query().data {
 | 
						for v in url.query().data {
 | 
				
			||||||
| 
						 | 
					@ -55,6 +56,7 @@ fn parse_query_from_url(url urllib.URL) map[string]string {
 | 
				
			||||||
	return query
 | 
						return query
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Extract form data from an HTTP request.
 | 
				
			||||||
fn parse_form_from_request(request http.Request) ?(map[string]string, map[string][]http.FileData) {
 | 
					fn parse_form_from_request(request http.Request) ?(map[string]string, map[string][]http.FileData) {
 | 
				
			||||||
	mut form := map[string]string{}
 | 
						mut form := map[string]string{}
 | 
				
			||||||
	mut files := map[string][]http.FileData{}
 | 
						mut files := map[string][]http.FileData{}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -249,7 +249,7 @@ pub fn (mut ctx Context) file(f_path string) Result {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// ext := os.file_ext(f_path)
 | 
						// ext := os.file_ext(f_path)
 | 
				
			||||||
	// data := os.read_file(f_path) or {
 | 
						// data := os.read_file(f_path) or {
 | 
				
			||||||
	// 	eprint(err.msg)
 | 
						// 	eprint(err.msg())
 | 
				
			||||||
	// 	ctx.server_error(500)
 | 
						// 	ctx.server_error(500)
 | 
				
			||||||
	// 	return Result{}
 | 
						// 	return Result{}
 | 
				
			||||||
	// }
 | 
						// }
 | 
				
			||||||
| 
						 | 
					@ -267,7 +267,7 @@ pub fn (mut ctx Context) file(f_path string) Result {
 | 
				
			||||||
	file_size := os.file_size(f_path)
 | 
						file_size := os.file_size(f_path)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	file := os.open(f_path) or {
 | 
						file := os.open(f_path) or {
 | 
				
			||||||
		eprintln(err.msg)
 | 
							eprintln(err.msg())
 | 
				
			||||||
		ctx.server_error(500)
 | 
							ctx.server_error(500)
 | 
				
			||||||
		return Result{}
 | 
							return Result{}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -285,7 +285,7 @@ pub fn (mut ctx Context) file(f_path string) Result {
 | 
				
			||||||
	resp.set_status(ctx.status)
 | 
						resp.set_status(ctx.status)
 | 
				
			||||||
	send_string(mut ctx.conn, resp.bytestr()) or { return Result{} }
 | 
						send_string(mut ctx.conn, resp.bytestr()) or { return Result{} }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mut buf := []byte{len: 1_000_000}
 | 
						mut buf := []u8{len: 1_000_000}
 | 
				
			||||||
	mut bytes_left := file_size
 | 
						mut bytes_left := file_size
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Repeat as long as the stream still has data
 | 
						// Repeat as long as the stream still has data
 | 
				
			||||||
| 
						 | 
					@ -361,7 +361,7 @@ interface DbInterface {
 | 
				
			||||||
// run runs the app
 | 
					// run runs the app
 | 
				
			||||||
[manualfree]
 | 
					[manualfree]
 | 
				
			||||||
pub fn run<T>(global_app &T, port int) {
 | 
					pub fn run<T>(global_app &T, port int) {
 | 
				
			||||||
	mut l := net.listen_tcp(.ip6, ':$port') or { panic('failed to listen $err.code $err') }
 | 
						mut l := net.listen_tcp(.ip6, ':$port') or { panic('failed to listen $err.code() $err') }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Parsing methods attributes
 | 
						// Parsing methods attributes
 | 
				
			||||||
	mut routes := map[string]Route{}
 | 
						mut routes := map[string]Route{}
 | 
				
			||||||
| 
						 | 
					@ -393,7 +393,7 @@ pub fn run<T>(global_app &T, port int) {
 | 
				
			||||||
		request_app.Context = global_app.Context // copy the context ref that contains static files map etc
 | 
							request_app.Context = global_app.Context // copy the context ref that contains static files map etc
 | 
				
			||||||
		mut conn := l.accept() or {
 | 
							mut conn := l.accept() or {
 | 
				
			||||||
			// failures should not panic
 | 
								// failures should not panic
 | 
				
			||||||
			eprintln('accept() failed with error: $err.msg')
 | 
								eprintln('accept() failed with error: $err.msg()')
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		go handle_conn<T>(mut conn, mut request_app, routes)
 | 
							go handle_conn<T>(mut conn, mut request_app, routes)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -8,3 +8,9 @@ repos_file = "data/repos.json"
 | 
				
			||||||
default_arch = "x86_64"
 | 
					default_arch = "x86_64"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
address = "http://localhost:8000"
 | 
					address = "http://localhost:8000"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					global_schedule = '* *'
 | 
				
			||||||
 | 
					api_update_frequency = 2
 | 
				
			||||||
 | 
					image_rebuild_frequency = 1
 | 
				
			||||||
 | 
					max_concurrent_builds = 3
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue