forked from vieter-v/vieter
				
			Merge pull request 'Cron implementation in C' (#341) from Chewing_Bever/vieter:c-cron into dev
Reviewed-on: vieter-v/vieter#341remotes/1761201518397485255/dev
						commit
						0d6ca8d3e4
					
				|  | @ -0,0 +1,4 @@ | ||||||
|  | # To stay consistent with the V formatting style, we use tabs | ||||||
|  | UseTab: Always | ||||||
|  | IndentWidth: 4 | ||||||
|  | TabWidth: 4 | ||||||
|  | @ -5,6 +5,5 @@ root = true | ||||||
| end_of_line = lf | end_of_line = lf | ||||||
| insert_final_newline = true | insert_final_newline = true | ||||||
| 
 | 
 | ||||||
| [*.v] | [*.{v,c,h}] | ||||||
| # vfmt wants it :( |  | ||||||
| indent_style = tab | indent_style = tab | ||||||
|  |  | ||||||
|  | @ -1,4 +1,4 @@ | ||||||
| *.c | vieter.c | ||||||
| /data/ | /data/ | ||||||
| 
 | 
 | ||||||
| # Build artifacts | # Build artifacts | ||||||
|  |  | ||||||
|  | @ -1,3 +1,6 @@ | ||||||
| [submodule "docs/themes/hugo-book"] | [submodule "docs/themes/hugo-book"] | ||||||
| 	path = docs/themes/hugo-book | 	path = docs/themes/hugo-book | ||||||
| 	url = https://github.com/alex-shpak/hugo-book | 	url = https://github.com/alex-shpak/hugo-book | ||||||
|  | [submodule "src/libvieter"] | ||||||
|  | 	path = src/libvieter | ||||||
|  | 	url = https://git.rustybever.be/vieter-v/libvieter | ||||||
|  |  | ||||||
|  | @ -1,5 +1,5 @@ | ||||||
| variables: | variables: | ||||||
|   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' |   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' | ||||||
| 
 | 
 | ||||||
| matrix: | matrix: | ||||||
|   PLATFORM: |   PLATFORM: | ||||||
|  | @ -57,7 +57,7 @@ pipeline: | ||||||
| 
 | 
 | ||||||
|       - export OBJ_PATH="/vieter/commits/$CI_COMMIT_SHA/vieter-$(echo '${PLATFORM}' | sed 's:/:-:g')" |       - export OBJ_PATH="/vieter/commits/$CI_COMMIT_SHA/vieter-$(echo '${PLATFORM}' | sed 's:/:-:g')" | ||||||
|       - export SIG_STRING="PUT\n\n$CONTENT_TYPE\n$DATE\n$OBJ_PATH" |       - export SIG_STRING="PUT\n\n$CONTENT_TYPE\n$DATE\n$OBJ_PATH" | ||||||
|       - export SIGNATURE="$(echo -en $SIG_STRING | openssl sha1 -hmac $S3_PASSWORD -binary | base64)" |       - export SIGNATURE="$(echo -en $SIG_STRING | openssl dgst -sha1 -hmac $S3_PASSWORD -binary | base64)" | ||||||
|       - > |       - > | ||||||
|         curl  |         curl  | ||||||
|         --silent |         --silent | ||||||
|  |  | ||||||
|  | @ -1,5 +1,5 @@ | ||||||
| variables: | variables: | ||||||
|   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' |   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' | ||||||
| 
 | 
 | ||||||
| platform: 'linux/amd64' | platform: 'linux/amd64' | ||||||
| branches: | branches: | ||||||
|  |  | ||||||
|  | @ -1,5 +1,5 @@ | ||||||
| variables: | variables: | ||||||
|   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' |   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' | ||||||
| 
 | 
 | ||||||
| platform: 'linux/amd64' | platform: 'linux/amd64' | ||||||
| branches: [ 'main' ] | branches: [ 'main' ] | ||||||
|  |  | ||||||
|  | @ -1,5 +1,5 @@ | ||||||
| variables: | variables: | ||||||
|   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' |   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' | ||||||
| 
 | 
 | ||||||
| # These checks already get performed on the feature branches | # These checks already get performed on the feature branches | ||||||
| branches: | branches: | ||||||
|  |  | ||||||
|  | @ -1,5 +1,5 @@ | ||||||
| variables: | variables: | ||||||
|   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' |   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' | ||||||
| 
 | 
 | ||||||
| platform: 'linux/amd64' | platform: 'linux/amd64' | ||||||
| branches: | branches: | ||||||
|  |  | ||||||
|  | @ -1,5 +1,5 @@ | ||||||
| variables: | variables: | ||||||
|   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2' |   - &vlang_image 'git.rustybever.be/chewing_bever/vlang:0.3.2-alpine3.17' | ||||||
| 
 | 
 | ||||||
| matrix: | matrix: | ||||||
|   PLATFORM: |   PLATFORM: | ||||||
|  |  | ||||||
|  | @ -13,6 +13,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 | ||||||
| * Search in list of targets using API & CLI | * Search in list of targets using API & CLI | ||||||
| * Allow filtering targets by arch value | * Allow filtering targets by arch value | ||||||
| 
 | 
 | ||||||
|  | ### Changed | ||||||
|  | 
 | ||||||
|  | * Rewrote cron expression logic in C | ||||||
|  | 
 | ||||||
|  | ### Removed | ||||||
|  | 
 | ||||||
|  | * Deprecated cron daemon | ||||||
|  | 
 | ||||||
| ## [0.5.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0) | ## [0.5.0](https://git.rustybever.be/vieter-v/vieter/src/tag/0.5.0) | ||||||
| 
 | 
 | ||||||
| ### Added | ### Added | ||||||
|  |  | ||||||
							
								
								
									
										29
									
								
								Makefile
								
								
								
								
							
							
						
						
									
										29
									
								
								Makefile
								
								
								
								
							|  | @ -1,6 +1,6 @@ | ||||||
| # =====CONFIG=====
 | # =====CONFIG=====
 | ||||||
| SRC_DIR := src | SRC_DIR := src | ||||||
| SOURCES != find '$(SRC_DIR)' -iname '*.v' | SRCS != find '$(SRC_DIR)' -iname '*.v' | ||||||
| 
 | 
 | ||||||
| V_PATH ?= v | V_PATH ?= v | ||||||
| V := $(V_PATH) -showcc -gc boehm -W -d use_openssl -skip-unused | V := $(V_PATH) -showcc -gc boehm -W -d use_openssl -skip-unused | ||||||
|  | @ -9,8 +9,12 @@ all: vieter | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # =====COMPILATION=====
 | # =====COMPILATION=====
 | ||||||
|  | .PHONY: libvieter | ||||||
|  | libvieter: | ||||||
|  | 	make -C '$(SRC_DIR)/libvieter' CFLAGS='-O3'  | ||||||
|  | 
 | ||||||
| # Regular binary
 | # Regular binary
 | ||||||
| vieter: $(SOURCES) | vieter: $(SOURCES) libvieter | ||||||
| 	$(V) -g -o vieter $(SRC_DIR) | 	$(V) -g -o vieter $(SRC_DIR) | ||||||
| 
 | 
 | ||||||
| # Debug build using gcc
 | # Debug build using gcc
 | ||||||
|  | @ -18,7 +22,7 @@ vieter: $(SOURCES) | ||||||
| # multi-threaded and causes issues when running vieter inside gdb.
 | # multi-threaded and causes issues when running vieter inside gdb.
 | ||||||
| .PHONY: debug | .PHONY: debug | ||||||
| debug: dvieter | debug: dvieter | ||||||
| dvieter: $(SOURCES) | dvieter: $(SOURCES) libvieter | ||||||
| 	$(V_PATH) -showcc -keepc -cg -o dvieter $(SRC_DIR) | 	$(V_PATH) -showcc -keepc -cg -o dvieter $(SRC_DIR) | ||||||
| 
 | 
 | ||||||
| # Run the debug build inside gdb
 | # Run the debug build inside gdb
 | ||||||
|  | @ -29,12 +33,12 @@ gdb: dvieter | ||||||
| # Optimised production build
 | # Optimised production build
 | ||||||
| .PHONY: prod | .PHONY: prod | ||||||
| prod: pvieter | prod: pvieter | ||||||
| pvieter: $(SOURCES) | pvieter: $(SOURCES) libvieter | ||||||
| 	$(V) -o pvieter -prod $(SRC_DIR) | 	$(V) -o pvieter -prod $(SRC_DIR) | ||||||
| 
 | 
 | ||||||
| # Only generate C code
 | # Only generate C code
 | ||||||
| .PHONY: c | .PHONY: c | ||||||
| c: $(SOURCES) | c: $(SOURCES) libvieter | ||||||
| 	$(V) -o vieter.c $(SRC_DIR) | 	$(V) -o vieter.c $(SRC_DIR) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -67,6 +71,7 @@ man: vieter | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # =====OTHER=====
 | # =====OTHER=====
 | ||||||
|  | # Linting
 | ||||||
| .PHONY: lint | .PHONY: lint | ||||||
| lint: | lint: | ||||||
| 	$(V) fmt -verify $(SRC_DIR) | 	$(V) fmt -verify $(SRC_DIR) | ||||||
|  | @ -74,18 +79,24 @@ lint: | ||||||
| 	$(V_PATH) missdoc -p $(SRC_DIR) | 	$(V_PATH) missdoc -p $(SRC_DIR) | ||||||
| 	@ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ] | 	@ [ $$($(V_PATH) missdoc -p $(SRC_DIR) | wc -l) = 0 ] | ||||||
| 
 | 
 | ||||||
| # Format the V codebase
 | 
 | ||||||
|  | # Formatting
 | ||||||
| .PHONY: fmt | .PHONY: fmt | ||||||
| fmt: | fmt: | ||||||
| 	$(V) fmt -w $(SRC_DIR) | 	$(V) fmt -w $(SRC_DIR) | ||||||
| 
 | 
 | ||||||
| .PHONY: test |  | ||||||
| test: |  | ||||||
| 	$(V) test $(SRC_DIR) |  | ||||||
| 
 | 
 | ||||||
|  | # Testing
 | ||||||
|  | .PHONY: test | ||||||
|  | test: libvieter | ||||||
|  | 	$(V) -g test $(SRC_DIR) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # Cleaning
 | ||||||
| .PHONY: clean | .PHONY: clean | ||||||
| clean: | clean: | ||||||
| 	rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' 'docs/public' | 	rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs' 'docs/public' | ||||||
|  | 	make -C '$(SRC_DIR)/libvieter' clean | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # =====EXPERIMENTAL=====
 | # =====EXPERIMENTAL=====
 | ||||||
|  |  | ||||||
							
								
								
									
										30
									
								
								PKGBUILD.dev
								
								
								
								
							
							
						
						
									
										30
									
								
								PKGBUILD.dev
								
								
								
								
							|  | @ -11,27 +11,37 @@ makedepends=('git' 'vlang') | ||||||
| arch=('x86_64' 'aarch64') | arch=('x86_64' 'aarch64') | ||||||
| url='https://git.rustybever.be/vieter-v/vieter' | url='https://git.rustybever.be/vieter-v/vieter' | ||||||
| license=('AGPL3') | license=('AGPL3') | ||||||
| source=("$pkgname::git+https://git.rustybever.be/vieter-v/vieter#branch=dev") | source=( | ||||||
|  |     "${pkgname}::git+https://git.rustybever.be/vieter-v/vieter#branch=dev" | ||||||
|  |     "libvieter::git+https://git.rustybever.be/vieter-v/libvieter" | ||||||
|  | ) | ||||||
| md5sums=('SKIP') | md5sums=('SKIP') | ||||||
| provides=('vieter') | provides=('vieter') | ||||||
| conflicts=('vieter') | conflicts=('vieter') | ||||||
| 
 | 
 | ||||||
| pkgver() { | pkgver() { | ||||||
|     cd "$pkgname" |     cd "${pkgname}" | ||||||
| 
 | 
 | ||||||
|     git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g' |     git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g' | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| prepare() { | prepare() { | ||||||
|     export VMODULES="$srcdir/.vmodules" |     cd "${pkgname}" | ||||||
| 
 | 
 | ||||||
|     cd "$pkgname/src" && v install |     # Add the libvieter submodule | ||||||
|  |     git submodule init | ||||||
|  |     git config submodules.src/libvieter.url "${srcdir}/libvieter" | ||||||
|  |     git -c protocol.file.allow=always submodule update | ||||||
|  | 
 | ||||||
|  |     export VMODULES="${srcdir}/.vmodules" | ||||||
|  | 
 | ||||||
|  |     cd "${pkgname}/src" && v install | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| build() { | build() { | ||||||
|     export VMODULES="$srcdir/.vmodules" |     export VMODULES="${srcdir}/.vmodules" | ||||||
| 
 | 
 | ||||||
|     cd "$pkgname" |     cd "${pkgname}" | ||||||
| 
 | 
 | ||||||
|     make prod |     make prod | ||||||
| 
 | 
 | ||||||
|  | @ -42,9 +52,9 @@ build() { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| package() { | package() { | ||||||
|     install -dm755 "$pkgdir/usr/bin" |     install -dm755 "${pkgdir}/usr/bin" | ||||||
|     install -Dm755 "$pkgname/pvieter" "$pkgdir/usr/bin/vieter" |     install -Dm755 "${pkgname}/pvieter" "${pkgdir}/usr/bin/vieter" | ||||||
| 
 | 
 | ||||||
|     install -dm755 "$pkgdir/usr/share/man/man1" |     install -dm755 "${pkgdir}/usr/share/man/man1" | ||||||
|     install -Dm644 "$pkgname/man"/*.1 "$pkgdir/usr/share/man/man1" |     install -Dm644 "${pkgname}/man"/*.1 "${pkgdir}/usr/share/man/man1" | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -1,7 +1,7 @@ | ||||||
| module build | module build | ||||||
| 
 | 
 | ||||||
| import models { BuildConfig, Target } | import models { BuildConfig, Target } | ||||||
| import cron.expression { CronExpression, parse_expression } | import cron | ||||||
| import time | import time | ||||||
| import datatypes { MinHeap } | import datatypes { MinHeap } | ||||||
| import util | import util | ||||||
|  | @ -13,7 +13,7 @@ pub mut: | ||||||
| 	// Next timestamp from which point this job is allowed to be executed | 	// Next timestamp from which point this job is allowed to be executed | ||||||
| 	timestamp time.Time | 	timestamp time.Time | ||||||
| 	// Required for calculating next timestamp after having pop'ed a job | 	// Required for calculating next timestamp after having pop'ed a job | ||||||
| 	ce CronExpression | 	ce &cron.Expression = unsafe { nil } | ||||||
| 	// Actual build config sent to the agent | 	// Actual build config sent to the agent | ||||||
| 	config BuildConfig | 	config BuildConfig | ||||||
| 	// Whether this is a one-time job | 	// Whether this is a one-time job | ||||||
|  | @ -30,7 +30,7 @@ fn (r1 BuildJob) < (r2 BuildJob) bool { | ||||||
| // for each architecture. Agents receive jobs from this queue. | // for each architecture. Agents receive jobs from this queue. | ||||||
| pub struct BuildJobQueue { | pub struct BuildJobQueue { | ||||||
| 	// Schedule to use for targets without explicitely defined cron expression | 	// Schedule to use for targets without explicitely defined cron expression | ||||||
| 	default_schedule CronExpression | 	default_schedule &cron.Expression | ||||||
| 	// Base image to use for targets without defined base image | 	// Base image to use for targets without defined base image | ||||||
| 	default_base_image string | 	default_base_image string | ||||||
| mut: | mut: | ||||||
|  | @ -44,9 +44,9 @@ mut: | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // new_job_queue initializes a new job queue | // new_job_queue initializes a new job queue | ||||||
| pub fn new_job_queue(default_schedule CronExpression, default_base_image string) BuildJobQueue { | pub fn new_job_queue(default_schedule &cron.Expression, default_base_image string) BuildJobQueue { | ||||||
| 	return BuildJobQueue{ | 	return BuildJobQueue{ | ||||||
| 		default_schedule: default_schedule | 		default_schedule: unsafe { default_schedule } | ||||||
| 		default_base_image: default_base_image | 		default_base_image: default_base_image | ||||||
| 		invalidated: map[int]time.Time{} | 		invalidated: map[int]time.Time{} | ||||||
| 	} | 	} | ||||||
|  | @ -85,14 +85,14 @@ pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! { | ||||||
| 
 | 
 | ||||||
| 		if !input.now { | 		if !input.now { | ||||||
| 			ce := if input.target.schedule != '' { | 			ce := if input.target.schedule != '' { | ||||||
| 				parse_expression(input.target.schedule) or { | 				cron.parse_expression(input.target.schedule) or { | ||||||
| 					return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()") | 					return error("Error while parsing cron expression '$input.target.schedule' (id $input.target.id): $err.msg()") | ||||||
| 				} | 				} | ||||||
| 			} else { | 			} else { | ||||||
| 				q.default_schedule | 				q.default_schedule | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			job.timestamp = ce.next_from_now()! | 			job.timestamp = ce.next_from_now() | ||||||
| 			job.ce = ce | 			job.ce = ce | ||||||
| 		} else { | 		} else { | ||||||
| 			job.timestamp = time.now() | 			job.timestamp = time.now() | ||||||
|  | @ -105,8 +105,8 @@ pub fn (mut q BuildJobQueue) insert(input InsertConfig) ! { | ||||||
| // reschedule the given job by calculating the next timestamp and re-adding it | // reschedule the given job by calculating the next timestamp and re-adding it | ||||||
| // to its respective queue. This function is called by the pop functions | // to its respective queue. This function is called by the pop functions | ||||||
| // *after* having pop'ed the job. | // *after* having pop'ed the job. | ||||||
| fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) ! { | fn (mut q BuildJobQueue) reschedule(job BuildJob, arch string) { | ||||||
| 	new_timestamp := job.ce.next_from_now()! | 	new_timestamp := job.ce.next_from_now() | ||||||
| 
 | 
 | ||||||
| 	new_job := BuildJob{ | 	new_job := BuildJob{ | ||||||
| 		...job | 		...job | ||||||
|  | @ -168,10 +168,7 @@ pub fn (mut q BuildJobQueue) pop(arch string) ?BuildJob { | ||||||
| 			job = q.queues[arch].pop()? | 			job = q.queues[arch].pop()? | ||||||
| 
 | 
 | ||||||
| 			if !job.single { | 			if !job.single { | ||||||
| 				// TODO how do we handle this properly? Is it even possible for a | 				q.reschedule(job, arch) | ||||||
| 				// cron expression to not return a next time if it's already been |  | ||||||
| 				// used before? |  | ||||||
| 				q.reschedule(job, arch) or {} |  | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			return job | 			return job | ||||||
|  | @ -198,8 +195,7 @@ pub fn (mut q BuildJobQueue) pop_n(arch string, n int) []BuildJob { | ||||||
| 				job = q.queues[arch].pop() or { break } | 				job = q.queues[arch].pop() or { break } | ||||||
| 
 | 
 | ||||||
| 				if !job.single { | 				if !job.single { | ||||||
| 					// TODO idem | 					q.reschedule(job, arch) | ||||||
| 					q.reschedule(job, arch) or {} |  | ||||||
| 				} | 				} | ||||||
| 
 | 
 | ||||||
| 				out << job | 				out << job | ||||||
|  |  | ||||||
|  | @ -1,7 +1,7 @@ | ||||||
| module schedule | module schedule | ||||||
| 
 | 
 | ||||||
| import cli | import cli | ||||||
| import cron.expression { parse_expression } | import cron | ||||||
| import time | import time | ||||||
| 
 | 
 | ||||||
| // cmd returns the cli submodule for previewing a cron schedule. | // cmd returns the cli submodule for previewing a cron schedule. | ||||||
|  | @ -19,10 +19,10 @@ pub fn cmd() cli.Command { | ||||||
| 			}, | 			}, | ||||||
| 		] | 		] | ||||||
| 		execute: fn (cmd cli.Command) ! { | 		execute: fn (cmd cli.Command) ! { | ||||||
| 			ce := parse_expression(cmd.args.join(' '))! | 			ce := cron.parse_expression(cmd.args.join(' '))! | ||||||
| 			count := cmd.flags.get_int('count')! | 			count := cmd.flags.get_int('count')! | ||||||
| 
 | 
 | ||||||
| 			for t in ce.next_n(time.now(), count)! { | 			for t in ce.next_n(time.now(), count) { | ||||||
| 				println(t) | 				println(t) | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  |  | ||||||
|  | @ -2,7 +2,7 @@ module targets | ||||||
| 
 | 
 | ||||||
| import cli | import cli | ||||||
| import conf as vconf | import conf as vconf | ||||||
| import cron.expression { parse_expression } | import cron | ||||||
| import client { NewTarget } | import client { NewTarget } | ||||||
| import console | import console | ||||||
| import models { TargetFilter } | import models { TargetFilter } | ||||||
|  | @ -295,7 +295,7 @@ fn patch(conf Config, id string, params map[string]string) ! { | ||||||
| 	// We check the cron expression first because it's useless to send an | 	// We check the cron expression first because it's useless to send an | ||||||
| 	// invalid one to the server. | 	// invalid one to the server. | ||||||
| 	if 'schedule' in params && params['schedule'] != '' { | 	if 'schedule' in params && params['schedule'] != '' { | ||||||
| 		parse_expression(params['schedule']) or { | 		cron.parse_expression(params['schedule']) or { | ||||||
| 			return error('Invalid cron expression: $err.msg()') | 			return error('Invalid cron expression: $err.msg()') | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -1,32 +0,0 @@ | ||||||
| module cron |  | ||||||
| 
 |  | ||||||
| import cli |  | ||||||
| import conf as vconf |  | ||||||
| 
 |  | ||||||
| struct Config { |  | ||||||
| pub: |  | ||||||
| 	log_level               string = 'WARN' |  | ||||||
| 	api_key                 string |  | ||||||
| 	address                 string |  | ||||||
| 	data_dir                string |  | ||||||
| 	base_image              string = 'archlinux:base-devel' |  | ||||||
| 	max_concurrent_builds   int    = 1 |  | ||||||
| 	api_update_frequency    int    = 15 |  | ||||||
| 	image_rebuild_frequency int    = 1440 |  | ||||||
| 	// Replicates the behavior of the original cron system |  | ||||||
| 	global_schedule string = '0 3' |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // cmd returns the cli module that handles the cron daemon. |  | ||||||
| pub fn cmd() cli.Command { |  | ||||||
| 	return cli.Command{ |  | ||||||
| 		name: 'cron' |  | ||||||
| 		description: 'Start the cron service that periodically runs builds.' |  | ||||||
| 		execute: fn (cmd cli.Command) ! { |  | ||||||
| 			config_file := cmd.flags.get_string('config-file')! |  | ||||||
| 			conf := vconf.load<Config>(prefix: 'VIETER_', default_path: config_file)! |  | ||||||
| 
 |  | ||||||
| 			cron(conf)! |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  | @ -1,33 +0,0 @@ | ||||||
| module cron |  | ||||||
| 
 |  | ||||||
| import log |  | ||||||
| import cron.daemon |  | ||||||
| import cron.expression |  | ||||||
| import os |  | ||||||
| 
 |  | ||||||
| const log_file_name = 'vieter.cron.log' |  | ||||||
| 
 |  | ||||||
| // cron starts a cron daemon & starts periodically scheduling builds. |  | ||||||
| pub fn cron(conf Config) ! { |  | ||||||
| 	// Configure logger |  | ||||||
| 	log_level := log.level_from_tag(conf.log_level) or { |  | ||||||
| 		return error('Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	mut logger := log.Log{ |  | ||||||
| 		level: log_level |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	log_file := os.join_path_single(conf.data_dir, cron.log_file_name) |  | ||||||
| 	logger.set_full_logpath(log_file) |  | ||||||
| 	logger.log_to_console_too() |  | ||||||
| 
 |  | ||||||
| 	ce := expression.parse_expression(conf.global_schedule) or { |  | ||||||
| 		return error('Error while parsing global cron expression: $err.msg()') |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	mut d := daemon.init_daemon(logger, conf.address, conf.api_key, conf.base_image, ce, |  | ||||||
| 		conf.max_concurrent_builds, conf.api_update_frequency, conf.image_rebuild_frequency)! |  | ||||||
| 
 |  | ||||||
| 	d.run() |  | ||||||
| } |  | ||||||
|  | @ -1,115 +0,0 @@ | ||||||
| module daemon |  | ||||||
| 
 |  | ||||||
| import time |  | ||||||
| import sync.stdatomic |  | ||||||
| import build |  | ||||||
| import os |  | ||||||
| 
 |  | ||||||
| const ( |  | ||||||
| 	build_empty   = 0 |  | ||||||
| 	build_running = 1 |  | ||||||
| 	build_done    = 2 |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| // clean_finished_builds removes finished builds from the build slots & returns |  | ||||||
| // them. |  | ||||||
| fn (mut d Daemon) clean_finished_builds() []ScheduledBuild { |  | ||||||
| 	mut out := []ScheduledBuild{} |  | ||||||
| 
 |  | ||||||
| 	for i in 0 .. d.atomics.len { |  | ||||||
| 		if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done { |  | ||||||
| 			stdatomic.store_u64(&d.atomics[i], daemon.build_empty) |  | ||||||
| 			out << d.builds[i] |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return out |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // update_builds starts as many builds as possible. |  | ||||||
| fn (mut d Daemon) start_new_builds() { |  | ||||||
| 	now := time.now() |  | ||||||
| 
 |  | ||||||
| 	for d.queue.len() > 0 { |  | ||||||
| 		elem := d.queue.peek() or { |  | ||||||
| 			d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.") |  | ||||||
| 
 |  | ||||||
| 			break |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if elem.timestamp < now { |  | ||||||
| 			sb := d.queue.pop() or { |  | ||||||
| 				d.lerror("queue.pop() unexpectedly returned an error. This shouldn't happen.") |  | ||||||
| 
 |  | ||||||
| 				break |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			// If this build couldn't be scheduled, no more will be possible. |  | ||||||
| 			if !d.start_build(sb) { |  | ||||||
| 				d.queue.insert(sb) |  | ||||||
| 				break |  | ||||||
| 			} |  | ||||||
| 		} else { |  | ||||||
| 			break |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // start_build starts a build for the given ScheduledBuild object. |  | ||||||
| fn (mut d Daemon) start_build(sb ScheduledBuild) bool { |  | ||||||
| 	for i in 0 .. d.atomics.len { |  | ||||||
| 		if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty { |  | ||||||
| 			stdatomic.store_u64(&d.atomics[i], daemon.build_running) |  | ||||||
| 			d.builds[i] = sb |  | ||||||
| 
 |  | ||||||
| 			go d.run_build(i, sb) |  | ||||||
| 
 |  | ||||||
| 			return true |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return false |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // run_build actually starts the build process for a given target. |  | ||||||
| fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) { |  | ||||||
| 	d.linfo('started build: $sb.target.url -> $sb.target.repo') |  | ||||||
| 
 |  | ||||||
| 	// 0 means success, 1 means failure |  | ||||||
| 	mut status := 0 |  | ||||||
| 
 |  | ||||||
| 	res := build.build_target(d.client.address, d.client.api_key, d.builder_images.last(), |  | ||||||
| 		&sb.target, false) or { |  | ||||||
| 		d.ldebug('build_target error: $err.msg()') |  | ||||||
| 		status = 1 |  | ||||||
| 
 |  | ||||||
| 		build.BuildResult{} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if status == 0 { |  | ||||||
| 		d.linfo('finished build: $sb.target.url -> $sb.target.repo; uploading logs...') |  | ||||||
| 
 |  | ||||||
| 		build_arch := os.uname().machine |  | ||||||
| 		d.client.add_build_log(sb.target.id, res.start_time, res.end_time, build_arch, |  | ||||||
| 			res.exit_code, res.logs) or { |  | ||||||
| 			d.lerror('Failed to upload logs for build: $sb.target.url -> $sb.target.repo') |  | ||||||
| 		} |  | ||||||
| 	} else { |  | ||||||
| 		d.linfo('an error occured during build: $sb.target.url -> $sb.target.repo') |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	stdatomic.store_u64(&d.atomics[build_index], daemon.build_done) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // current_build_count returns how many builds are currently running. |  | ||||||
| fn (mut d Daemon) current_build_count() int { |  | ||||||
| 	mut res := 0 |  | ||||||
| 
 |  | ||||||
| 	for i in 0 .. d.atomics.len { |  | ||||||
| 		if stdatomic.load_u64(&d.atomics[i]) == daemon.build_running { |  | ||||||
| 			res += 1 |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return res |  | ||||||
| } |  | ||||||
|  | @ -1,274 +0,0 @@ | ||||||
| module daemon |  | ||||||
| 
 |  | ||||||
| import time |  | ||||||
| import log |  | ||||||
| import datatypes { MinHeap } |  | ||||||
| import cron.expression { CronExpression, parse_expression } |  | ||||||
| import math |  | ||||||
| import build |  | ||||||
| import docker |  | ||||||
| import os |  | ||||||
| import client |  | ||||||
| import models { Target } |  | ||||||
| 
 |  | ||||||
| const ( |  | ||||||
| 	// How many seconds to wait before retrying to update API if failed |  | ||||||
| 	api_update_retry_timeout        = 5 |  | ||||||
| 	// How many seconds to wait before retrying to rebuild image if failed |  | ||||||
| 	rebuild_base_image_retry_timout = 30 |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| struct ScheduledBuild { |  | ||||||
| pub: |  | ||||||
| 	target    Target |  | ||||||
| 	timestamp time.Time |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Overloaded operator for comparing ScheduledBuild objects |  | ||||||
| fn (r1 ScheduledBuild) < (r2 ScheduledBuild) bool { |  | ||||||
| 	return r1.timestamp < r2.timestamp |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| pub struct Daemon { |  | ||||||
| mut: |  | ||||||
| 	client                  client.Client |  | ||||||
| 	base_image              string |  | ||||||
| 	builder_images          []string |  | ||||||
| 	global_schedule         CronExpression |  | ||||||
| 	api_update_frequency    int |  | ||||||
| 	image_rebuild_frequency int |  | ||||||
| 	// Targets currently loaded from API. |  | ||||||
| 	targets []Target |  | ||||||
| 	// At what point to update the list of targets. |  | ||||||
| 	api_update_timestamp  time.Time |  | ||||||
| 	image_build_timestamp time.Time |  | ||||||
| 	queue                 MinHeap<ScheduledBuild> |  | ||||||
| 	// Which builds are currently running |  | ||||||
| 	builds []ScheduledBuild |  | ||||||
| 	// Atomic variables used to detect when a build has finished; length is the |  | ||||||
| 	// same as builds |  | ||||||
| 	atomics []u64 |  | ||||||
| 	logger  shared log.Log |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // init_daemon initializes a new Daemon object. It renews the targets & |  | ||||||
| // populates the build queue for the first time. |  | ||||||
| pub fn init_daemon(logger log.Log, address string, api_key string, base_image string, global_schedule CronExpression, max_concurrent_builds int, api_update_frequency int, image_rebuild_frequency int) !Daemon { |  | ||||||
| 	mut d := Daemon{ |  | ||||||
| 		client: client.new(address, api_key) |  | ||||||
| 		base_image: base_image |  | ||||||
| 		global_schedule: global_schedule |  | ||||||
| 		api_update_frequency: api_update_frequency |  | ||||||
| 		image_rebuild_frequency: image_rebuild_frequency |  | ||||||
| 		atomics: []u64{len: max_concurrent_builds} |  | ||||||
| 		builds: []ScheduledBuild{len: max_concurrent_builds} |  | ||||||
| 		logger: logger |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Initialize the targets & queue |  | ||||||
| 	d.renew_targets() |  | ||||||
| 	d.renew_queue() |  | ||||||
| 	if !d.rebuild_base_image() { |  | ||||||
| 		return error('The base image failed to build. The Vieter cron daemon cannot run without an initial builder image.') |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return d |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // run starts the actual daemon process. It runs builds when possible & |  | ||||||
| // periodically refreshes the list of targets to ensure we stay in sync. |  | ||||||
| pub fn (mut d Daemon) run() { |  | ||||||
| 	for { |  | ||||||
| 		finished_builds := d.clean_finished_builds() |  | ||||||
| 
 |  | ||||||
| 		// Update the API's contents if needed & renew the queue |  | ||||||
| 		if time.now() >= d.api_update_timestamp { |  | ||||||
| 			d.renew_targets() |  | ||||||
| 			d.renew_queue() |  | ||||||
| 		} |  | ||||||
| 		// The finished builds should only be rescheduled if the API contents |  | ||||||
| 		// haven't been renewed. |  | ||||||
| 		else { |  | ||||||
| 			for sb in finished_builds { |  | ||||||
| 				d.schedule_build(sb.target) |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// TODO remove old builder images. |  | ||||||
| 		// This issue is less trivial than it sounds, because a build could |  | ||||||
| 		// still be running when the image has to be rebuilt. That would |  | ||||||
| 		// prevent the image from being removed. Therefore, we will need to |  | ||||||
| 		// keep track of a list or something & remove an image once we have |  | ||||||
| 		// made sure it isn't being used anymore. |  | ||||||
| 		if time.now() >= d.image_build_timestamp { |  | ||||||
| 			d.rebuild_base_image() |  | ||||||
| 			// In theory, executing this function here allows an old builder |  | ||||||
| 			// image to exist for at most image_rebuild_frequency minutes. |  | ||||||
| 			d.clean_old_base_images() |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// Schedules new builds when possible |  | ||||||
| 		d.start_new_builds() |  | ||||||
| 
 |  | ||||||
| 		// If there are builds currently running, the daemon should refresh |  | ||||||
| 		// every second to clean up any finished builds & start new ones. |  | ||||||
| 		mut delay := time.Duration(1 * time.second) |  | ||||||
| 
 |  | ||||||
| 		// Sleep either until we have to refresh the targets or when the next |  | ||||||
| 		// build has to start, with a minimum of 1 second. |  | ||||||
| 		if d.current_build_count() == 0 { |  | ||||||
| 			now := time.now() |  | ||||||
| 			delay = d.api_update_timestamp - now |  | ||||||
| 
 |  | ||||||
| 			if d.queue.len() > 0 { |  | ||||||
| 				elem := d.queue.peek() or { |  | ||||||
| 					d.lerror("queue.peek() unexpectedly returned an error. This shouldn't happen.") |  | ||||||
| 
 |  | ||||||
| 					// This is just a fallback option. In theory, queue.peek() |  | ||||||
| 					// should *never* return an error or none, because we check |  | ||||||
| 					// its len beforehand. |  | ||||||
| 					time.sleep(1) |  | ||||||
| 					continue |  | ||||||
| 				} |  | ||||||
| 
 |  | ||||||
| 				time_until_next_job := elem.timestamp - now |  | ||||||
| 
 |  | ||||||
| 				delay = math.min(delay, time_until_next_job) |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// We sleep for at least one second. This is to prevent the program |  | ||||||
| 		// from looping agressively when a cronjob can be scheduled, but |  | ||||||
| 		// there's no spots free for it to be started. |  | ||||||
| 		delay = math.max(delay, 1 * time.second) |  | ||||||
| 
 |  | ||||||
| 		d.ldebug('Sleeping for ${delay}...') |  | ||||||
| 
 |  | ||||||
| 		time.sleep(delay) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // schedule_build adds the next occurence of the given targets build to the |  | ||||||
| // queue. |  | ||||||
| fn (mut d Daemon) schedule_build(target Target) { |  | ||||||
| 	ce := if target.schedule != '' { |  | ||||||
| 		parse_expression(target.schedule) or { |  | ||||||
| 			// TODO This shouldn't return an error if the expression is empty. |  | ||||||
| 			d.lerror("Error while parsing cron expression '$target.schedule' (id $target.id): $err.msg()") |  | ||||||
| 
 |  | ||||||
| 			d.global_schedule |  | ||||||
| 		} |  | ||||||
| 	} else { |  | ||||||
| 		d.global_schedule |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// A target that can't be scheduled will just be skipped for now |  | ||||||
| 	timestamp := ce.next_from_now() or { |  | ||||||
| 		d.lerror("Couldn't calculate next timestamp from '$target.schedule'; skipping") |  | ||||||
| 		return |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	d.queue.insert(ScheduledBuild{ |  | ||||||
| 		target: target |  | ||||||
| 		timestamp: timestamp |  | ||||||
| 	}) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // renew_targets requests the newest list of targets from the server & replaces |  | ||||||
| // the old one. |  | ||||||
| fn (mut d Daemon) renew_targets() { |  | ||||||
| 	d.linfo('Renewing targets...') |  | ||||||
| 
 |  | ||||||
| 	mut new_targets := d.client.get_all_targets() or { |  | ||||||
| 		d.lerror('Failed to renew targets. Retrying in ${daemon.api_update_retry_timeout}s...') |  | ||||||
| 		d.api_update_timestamp = time.now().add_seconds(daemon.api_update_retry_timeout) |  | ||||||
| 
 |  | ||||||
| 		return |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Filter out any targets that shouldn't run on this architecture |  | ||||||
| 	cur_arch := os.uname().machine |  | ||||||
| 	new_targets = new_targets.filter(it.arch.any(it.value == cur_arch)) |  | ||||||
| 
 |  | ||||||
| 	d.targets = new_targets |  | ||||||
| 
 |  | ||||||
| 	d.api_update_timestamp = time.now().add_seconds(60 * d.api_update_frequency) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // renew_queue replaces the old queue with a new one that reflects the newest |  | ||||||
| // values in targets. |  | ||||||
| fn (mut d Daemon) renew_queue() { |  | ||||||
| 	d.linfo('Renewing queue...') |  | ||||||
| 	mut new_queue := MinHeap<ScheduledBuild>{} |  | ||||||
| 
 |  | ||||||
| 	// Move any jobs that should have already started from the old queue onto |  | ||||||
| 	// the new one |  | ||||||
| 	now := time.now() |  | ||||||
| 
 |  | ||||||
| 	// For some reason, using |  | ||||||
| 	// ```v |  | ||||||
| 	// for d.queue.len() > 0 && d.queue.peek() !.timestamp < now { |  | ||||||
| 	//``` |  | ||||||
| 	// here causes the function to prematurely just exit, without any errors or anything, very weird |  | ||||||
| 	// https://github.com/vlang/v/issues/14042 |  | ||||||
| 	for d.queue.len() > 0 { |  | ||||||
| 		elem := d.queue.pop() or { |  | ||||||
| 			d.lerror("queue.pop() returned an error. This shouldn't happen.") |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if elem.timestamp < now { |  | ||||||
| 			new_queue.insert(elem) |  | ||||||
| 		} else { |  | ||||||
| 			break |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	d.queue = new_queue |  | ||||||
| 
 |  | ||||||
| 	// For each target in targets, parse their cron expression (or use the |  | ||||||
| 	// default one if not present) & add them to the queue |  | ||||||
| 	for target in d.targets { |  | ||||||
| 		d.schedule_build(target) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // rebuild_base_image recreates the builder image. |  | ||||||
| fn (mut d Daemon) rebuild_base_image() bool { |  | ||||||
| 	d.linfo('Rebuilding builder image....') |  | ||||||
| 
 |  | ||||||
| 	d.builder_images << build.create_build_image(d.base_image) or { |  | ||||||
| 		d.lerror('Failed to rebuild base image. Retrying in ${daemon.rebuild_base_image_retry_timout}s...') |  | ||||||
| 		d.image_build_timestamp = time.now().add_seconds(daemon.rebuild_base_image_retry_timout) |  | ||||||
| 
 |  | ||||||
| 		return false |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	d.image_build_timestamp = time.now().add_seconds(60 * d.image_rebuild_frequency) |  | ||||||
| 
 |  | ||||||
| 	return true |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // clean_old_base_images tries to remove any old but still present builder |  | ||||||
| // images. |  | ||||||
| fn (mut d Daemon) clean_old_base_images() { |  | ||||||
| 	mut i := 0 |  | ||||||
| 
 |  | ||||||
| 	mut dd := docker.new_conn() or { |  | ||||||
| 		d.lerror('Failed to connect to Docker socket.') |  | ||||||
| 		return |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	defer { |  | ||||||
| 		dd.close() or {} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	for i < d.builder_images.len - 1 { |  | ||||||
| 		// For each builder image, we try to remove it by calling the Docker |  | ||||||
| 		// API. If the function returns an error or false, that means the image |  | ||||||
| 		// wasn't deleted. Therefore, we move the index over. If the function |  | ||||||
| 		// returns true, the array's length has decreased by one so we don't |  | ||||||
| 		// move the index. |  | ||||||
| 		dd.image_remove(d.builder_images[i]) or { i += 1 } |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  | @ -1,36 +0,0 @@ | ||||||
| module daemon |  | ||||||
| 
 |  | ||||||
| // lfatal create a log message with the fatal level |  | ||||||
| pub fn (mut d Daemon) lfatal(msg string) { |  | ||||||
| 	lock d.logger { |  | ||||||
| 		d.logger.fatal(msg) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // lerror create a log message with the error level |  | ||||||
| pub fn (mut d Daemon) lerror(msg string) { |  | ||||||
| 	lock d.logger { |  | ||||||
| 		d.logger.error(msg) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // lwarn create a log message with the warn level |  | ||||||
| pub fn (mut d Daemon) lwarn(msg string) { |  | ||||||
| 	lock d.logger { |  | ||||||
| 		d.logger.warn(msg) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // linfo create a log message with the info level |  | ||||||
| pub fn (mut d Daemon) linfo(msg string) { |  | ||||||
| 	lock d.logger { |  | ||||||
| 		d.logger.info(msg) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // ldebug create a log message with the debug level |  | ||||||
| pub fn (mut d Daemon) ldebug(msg string) { |  | ||||||
| 	lock d.logger { |  | ||||||
| 		d.logger.debug(msg) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  | @ -0,0 +1,101 @@ | ||||||
|  | module cron | ||||||
|  | 
 | ||||||
|  | #flag -I @VMODROOT/libvieter/include | ||||||
|  | #flag -L @VMODROOT/libvieter/build | ||||||
|  | #flag -lvieter | ||||||
|  | #include "vieter_cron.h" | ||||||
|  | 
 | ||||||
|  | [typedef] | ||||||
|  | pub struct C.vieter_cron_expression { | ||||||
|  | 	minutes      &u8 | ||||||
|  | 	hours        &u8 | ||||||
|  | 	days         &u8 | ||||||
|  | 	months       &u8 | ||||||
|  | 	minute_count u8 | ||||||
|  | 	hour_count   u8 | ||||||
|  | 	day_count    u8 | ||||||
|  | 	month_count  u8 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | pub type Expression = C.vieter_cron_expression | ||||||
|  | 
 | ||||||
|  | // == returns whether the two expressions are equal by value. | ||||||
|  | fn (ce1 Expression) == (ce2 Expression) bool { | ||||||
|  | 	if ce1.month_count != ce2.month_count || ce1.day_count != ce2.day_count | ||||||
|  | 		|| ce1.hour_count != ce2.hour_count || ce1.minute_count != ce2.minute_count { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for i in 0 .. ce1.month_count { | ||||||
|  | 		unsafe { | ||||||
|  | 			if ce1.months[i] != ce2.months[i] { | ||||||
|  | 				return false | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	for i in 0 .. ce1.day_count { | ||||||
|  | 		unsafe { | ||||||
|  | 			if ce1.days[i] != ce2.days[i] { | ||||||
|  | 				return false | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	for i in 0 .. ce1.hour_count { | ||||||
|  | 		unsafe { | ||||||
|  | 			if ce1.hours[i] != ce2.hours[i] { | ||||||
|  | 				return false | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	for i in 0 .. ce1.minute_count { | ||||||
|  | 		unsafe { | ||||||
|  | 			if ce1.minutes[i] != ce2.minutes[i] { | ||||||
|  | 				return false | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | [typedef] | ||||||
|  | struct C.vieter_cron_simple_time { | ||||||
|  | 	year   int | ||||||
|  | 	month  int | ||||||
|  | 	day    int | ||||||
|  | 	hour   int | ||||||
|  | 	minute int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type SimpleTime = C.vieter_cron_simple_time | ||||||
|  | 
 | ||||||
|  | enum ParseError as u8 { | ||||||
|  | 	ok = 0 | ||||||
|  | 	invalid_expression = 1 | ||||||
|  | 	invalid_number = 2 | ||||||
|  | 	out_of_range = 3 | ||||||
|  | 	too_many_parts = 4 | ||||||
|  | 	not_enough_parts = 5 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // str returns the string representation of a ParseError. | ||||||
|  | fn (e ParseError) str() string { | ||||||
|  | 	return match e { | ||||||
|  | 		.ok { '' } | ||||||
|  | 		.invalid_expression { 'Invalid expression' } | ||||||
|  | 		.invalid_number { 'Invalid number' } | ||||||
|  | 		.out_of_range { 'Out of range' } | ||||||
|  | 		.too_many_parts { 'Too many parts' } | ||||||
|  | 		.not_enough_parts { 'Not enough parts' } | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn C.vieter_cron_expr_init() &C.vieter_cron_expression | ||||||
|  | 
 | ||||||
|  | fn C.vieter_cron_expr_free(ce &C.vieter_cron_expression) | ||||||
|  | 
 | ||||||
|  | fn C.vieter_cron_expr_next(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression, ref &C.vieter_cron_simple_time) | ||||||
|  | 
 | ||||||
|  | fn C.vieter_cron_expr_next_from_now(out &C.vieter_cron_simple_time, ce &C.vieter_cron_expression) | ||||||
|  | 
 | ||||||
|  | fn C.vieter_cron_expr_parse(out &C.vieter_cron_expression, s &char) ParseError | ||||||
|  | @ -0,0 +1,73 @@ | ||||||
|  | module cron | ||||||
|  | 
 | ||||||
|  | import time | ||||||
|  | 
 | ||||||
|  | // free the memory associated with the Expression. | ||||||
|  | [unsafe] | ||||||
|  | pub fn (ce &Expression) free() { | ||||||
|  | 	C.vieter_cron_expr_free(ce) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // parse_expression parses a string into an Expression. | ||||||
|  | pub fn parse_expression(exp string) !&Expression { | ||||||
|  | 	out := C.vieter_cron_expr_init() | ||||||
|  | 	res := C.vieter_cron_expr_parse(out, exp.str) | ||||||
|  | 
 | ||||||
|  | 	if res != .ok { | ||||||
|  | 		return error(res.str()) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return out | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // next calculates the next occurence of the cron schedule, given a reference | ||||||
|  | // point. | ||||||
|  | pub fn (ce &Expression) next(ref time.Time) time.Time { | ||||||
|  | 	st := SimpleTime{ | ||||||
|  | 		year: ref.year | ||||||
|  | 		month: ref.month | ||||||
|  | 		day: ref.day | ||||||
|  | 		hour: ref.hour | ||||||
|  | 		minute: ref.minute | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	out := SimpleTime{} | ||||||
|  | 	C.vieter_cron_expr_next(&out, ce, &st) | ||||||
|  | 
 | ||||||
|  | 	return time.new_time(time.Time{ | ||||||
|  | 		year: out.year | ||||||
|  | 		month: out.month | ||||||
|  | 		day: out.day | ||||||
|  | 		hour: out.hour | ||||||
|  | 		minute: out.minute | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // next_from_now calculates the next occurence of the cron schedule with the | ||||||
|  | // current time as reference. | ||||||
|  | pub fn (ce &Expression) next_from_now() time.Time { | ||||||
|  | 	out := SimpleTime{} | ||||||
|  | 	C.vieter_cron_expr_next_from_now(&out, ce) | ||||||
|  | 
 | ||||||
|  | 	return time.new_time(time.Time{ | ||||||
|  | 		year: out.year | ||||||
|  | 		month: out.month | ||||||
|  | 		day: out.day | ||||||
|  | 		hour: out.hour | ||||||
|  | 		minute: out.minute | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // next_n returns the n next occurences of the expression, given a starting | ||||||
|  | // time. | ||||||
|  | pub fn (ce &Expression) next_n(ref time.Time, n int) []time.Time { | ||||||
|  | 	mut times := []time.Time{cap: n} | ||||||
|  | 
 | ||||||
|  | 	times << ce.next(ref) | ||||||
|  | 
 | ||||||
|  | 	for i in 1 .. n { | ||||||
|  | 		times << ce.next(times[i - 1]) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return times | ||||||
|  | } | ||||||
|  | @ -1,136 +0,0 @@ | ||||||
| module expression |  | ||||||
| 
 |  | ||||||
| import time |  | ||||||
| 
 |  | ||||||
| pub struct CronExpression { |  | ||||||
| 	minutes []int |  | ||||||
| 	hours   []int |  | ||||||
| 	days    []int |  | ||||||
| 	months  []int |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // next calculates the earliest time this cron expression is valid. It will |  | ||||||
| // always pick a moment in the future, even if ref matches completely up to the |  | ||||||
| // minute. This function conciously does not take gap years into account. |  | ||||||
| pub fn (ce &CronExpression) next(ref time.Time) !time.Time { |  | ||||||
| 	// If the given ref matches the next cron occurence up to the minute, it |  | ||||||
| 	// will return that value. Because we always want to return a value in the |  | ||||||
| 	// future, we artifically shift the ref 60 seconds to make sure we always |  | ||||||
| 	// match in the future. A shift of 60 seconds is enough because the cron |  | ||||||
| 	// expression does not allow for accuracy smaller than one minute. |  | ||||||
| 	sref := ref |  | ||||||
| 
 |  | ||||||
| 	// For all of these values, the rule is the following: if their value is |  | ||||||
| 	// the length of their respective array in the CronExpression object, that |  | ||||||
| 	// means we've looped back around. This means that the "bigger" value has |  | ||||||
| 	// to be incremented by one. For example, if the minutes have looped |  | ||||||
| 	// around, that means that the hour has to be incremented as well. |  | ||||||
| 	mut minute_index := 0 |  | ||||||
| 	mut hour_index := 0 |  | ||||||
| 	mut day_index := 0 |  | ||||||
| 	mut month_index := 0 |  | ||||||
| 
 |  | ||||||
| 	// This chain is the same logic multiple times, namely that if a "bigger" |  | ||||||
| 	// value loops around, then the smaller value will always reset as well. |  | ||||||
| 	// For example, if we're going to a new day, the hour & minute will always |  | ||||||
| 	// be their smallest value again. |  | ||||||
| 	for month_index < ce.months.len && sref.month > ce.months[month_index] { |  | ||||||
| 		month_index++ |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if month_index < ce.months.len && sref.month == ce.months[month_index] { |  | ||||||
| 		for day_index < ce.days.len && sref.day > ce.days[day_index] { |  | ||||||
| 			day_index++ |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if day_index < ce.days.len && ce.days[day_index] == sref.day { |  | ||||||
| 			for hour_index < ce.hours.len && sref.hour > ce.hours[hour_index] { |  | ||||||
| 				hour_index++ |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			if hour_index < ce.hours.len && ce.hours[hour_index] == sref.hour { |  | ||||||
| 				// Minute is the only value where we explicitely make sure we |  | ||||||
| 				// can't match sref's value exactly. This is to ensure we only |  | ||||||
| 				// return values in the future. |  | ||||||
| 				for minute_index < ce.minutes.len && sref.minute >= ce.minutes[minute_index] { |  | ||||||
| 					minute_index++ |  | ||||||
| 				} |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Here, we increment the "bigger" values by one if the smaller ones loop |  | ||||||
| 	// around. The order is important, as it allows a sort-of waterfall effect |  | ||||||
| 	// to occur which updates all values if required. |  | ||||||
| 	if minute_index == ce.minutes.len && hour_index < ce.hours.len { |  | ||||||
| 		hour_index += 1 |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if hour_index == ce.hours.len && day_index < ce.days.len { |  | ||||||
| 		day_index += 1 |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if day_index == ce.days.len && month_index < ce.months.len { |  | ||||||
| 		month_index += 1 |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	mut minute := ce.minutes[minute_index % ce.minutes.len] |  | ||||||
| 	mut hour := ce.hours[hour_index % ce.hours.len] |  | ||||||
| 	mut day := ce.days[day_index % ce.days.len] |  | ||||||
| 
 |  | ||||||
| 	// Sometimes, we end up with a day that does not exist within the selected |  | ||||||
| 	// month, e.g. day 30 in February. When this occurs, we reset day back to |  | ||||||
| 	// the smallest value & loop over to the next month that does have this |  | ||||||
| 	// day. |  | ||||||
| 	if day > time.month_days[ce.months[month_index % ce.months.len] - 1] { |  | ||||||
| 		day = ce.days[0] |  | ||||||
| 		month_index += 1 |  | ||||||
| 
 |  | ||||||
| 		for day > time.month_days[ce.months[month_index & ce.months.len] - 1] { |  | ||||||
| 			month_index += 1 |  | ||||||
| 
 |  | ||||||
| 			// If for whatever reason the day value ends up being something |  | ||||||
| 			// that can't be scheduled in any month, we have to make sure we |  | ||||||
| 			// don't create an infinite loop. |  | ||||||
| 			if month_index == 2 * ce.months.len { |  | ||||||
| 				return error('No schedulable moment.') |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	month := ce.months[month_index % ce.months.len] |  | ||||||
| 	mut year := sref.year |  | ||||||
| 
 |  | ||||||
| 	// If the month loops over, we need to increment the year. |  | ||||||
| 	if month_index >= ce.months.len { |  | ||||||
| 		year++ |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return time.new_time(time.Time{ |  | ||||||
| 		year: year |  | ||||||
| 		month: month |  | ||||||
| 		day: day |  | ||||||
| 		minute: minute |  | ||||||
| 		hour: hour |  | ||||||
| 	}) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // next_from_now returns the result of ce.next(ref) where ref is the result of |  | ||||||
| // time.now(). |  | ||||||
| pub fn (ce &CronExpression) next_from_now() !time.Time { |  | ||||||
| 	return ce.next(time.now()) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // next_n returns the n next occurences of the expression, given a starting |  | ||||||
| // time. |  | ||||||
| pub fn (ce &CronExpression) next_n(ref time.Time, n int) ![]time.Time { |  | ||||||
| 	mut times := []time.Time{cap: n} |  | ||||||
| 
 |  | ||||||
| 	times << ce.next(ref)! |  | ||||||
| 
 |  | ||||||
| 	for i in 1 .. n { |  | ||||||
| 		times << ce.next(times[i - 1])! |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return times |  | ||||||
| } |  | ||||||
|  | @ -1,146 +0,0 @@ | ||||||
| module expression |  | ||||||
| 
 |  | ||||||
| import bitfield |  | ||||||
| 
 |  | ||||||
| // parse_range parses a given string into a range of sorted integers. Its |  | ||||||
| // result is a BitField with set bits for all numbers in the result. |  | ||||||
| fn parse_range(s string, min int, max int) !bitfield.BitField { |  | ||||||
| 	mut start := min |  | ||||||
| 	mut end := max |  | ||||||
| 	mut interval := 1 |  | ||||||
| 	mut bf := bitfield.new(max - min + 1) |  | ||||||
| 
 |  | ||||||
| 	exps := s.split('/') |  | ||||||
| 
 |  | ||||||
| 	if exps.len > 2 { |  | ||||||
| 		return error('Invalid expression.') |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if exps[0] != '*' { |  | ||||||
| 		dash_parts := exps[0].split('-') |  | ||||||
| 
 |  | ||||||
| 		if dash_parts.len > 2 { |  | ||||||
| 			return error('Invalid expression.') |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		start = dash_parts[0].int() |  | ||||||
| 
 |  | ||||||
| 		// The builtin parsing functions return zero if the string can't be |  | ||||||
| 		// parsed into a number, so we have to explicitely check whether they |  | ||||||
| 		// actually entered zero or if it's an invalid number. |  | ||||||
| 		if start == 0 && dash_parts[0] != '0' { |  | ||||||
| 			return error('Invalid number.') |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// Check whether the start value is out of range |  | ||||||
| 		if start < min || start > max { |  | ||||||
| 			return error('Out of range.') |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if dash_parts.len == 2 { |  | ||||||
| 			end = dash_parts[1].int() |  | ||||||
| 
 |  | ||||||
| 			if end == 0 && dash_parts[1] != '0' { |  | ||||||
| 				return error('Invalid number.') |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			if end < start || end > max { |  | ||||||
| 				return error('Out of range.') |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if exps.len > 1 { |  | ||||||
| 		interval = exps[1].int() |  | ||||||
| 
 |  | ||||||
| 		// interval being zero is always invalid, but we want to check why |  | ||||||
| 		// it's invalid for better error messages. |  | ||||||
| 		if interval == 0 { |  | ||||||
| 			if exps[1] != '0' { |  | ||||||
| 				return error('Invalid number.') |  | ||||||
| 			} else { |  | ||||||
| 				return error('Step size zero not allowed.') |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if interval > max - min { |  | ||||||
| 			return error('Step size too large.') |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 	// Here, s solely consists of a number, so that's the only value we |  | ||||||
| 	// should return. |  | ||||||
| 	else if exps[0] != '*' && !exps[0].contains('-') { |  | ||||||
| 		bf.set_bit(start - min) |  | ||||||
| 		return bf |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	for start <= end { |  | ||||||
| 		bf.set_bit(start - min) |  | ||||||
| 		start += interval |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return bf |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // bf_to_ints takes a BitField and converts it into the expected list of actual |  | ||||||
| // integers. |  | ||||||
| fn bf_to_ints(bf bitfield.BitField, min int) []int { |  | ||||||
| 	mut out := []int{} |  | ||||||
| 
 |  | ||||||
| 	for i in 0 .. bf.get_size() { |  | ||||||
| 		if bf.get_bit(i) == 1 { |  | ||||||
| 			out << min + i |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return out |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // parse_part parses a given part of a cron expression & returns the |  | ||||||
| // corresponding array of ints. |  | ||||||
| fn parse_part(s string, min int, max int) ![]int { |  | ||||||
| 	mut bf := bitfield.new(max - min + 1) |  | ||||||
| 
 |  | ||||||
| 	for range in s.split(',') { |  | ||||||
| 		bf2 := parse_range(range, min, max)! |  | ||||||
| 		bf = bitfield.bf_or(bf, bf2) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return bf_to_ints(bf, min) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // parse_expression parses an entire cron expression string into a |  | ||||||
| // CronExpression object, if possible. |  | ||||||
| pub fn parse_expression(exp string) !CronExpression { |  | ||||||
| 	// The filter allows for multiple spaces between parts |  | ||||||
| 	mut parts := exp.split(' ').filter(it != '') |  | ||||||
| 
 |  | ||||||
| 	if parts.len < 2 || parts.len > 4 { |  | ||||||
| 		return error('Expression must contain between 2 and 4 space-separated parts.') |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// For ease of use, we allow the user to only specify as many parts as they |  | ||||||
| 	// need. |  | ||||||
| 	for parts.len < 4 { |  | ||||||
| 		parts << '*' |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	mut part_results := [][]int{} |  | ||||||
| 
 |  | ||||||
| 	mins := [0, 0, 1, 1] |  | ||||||
| 	maxs := [59, 23, 31, 12] |  | ||||||
| 
 |  | ||||||
| 	// This for loop allows us to more clearly propagate the error to the user. |  | ||||||
| 	for i, min in mins { |  | ||||||
| 		part_results << parse_part(parts[i], min, maxs[i]) or { |  | ||||||
| 			return error('An error occurred with part $i: $err.msg()') |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return CronExpression{ |  | ||||||
| 		minutes: part_results[0] |  | ||||||
| 		hours: part_results[1] |  | ||||||
| 		days: part_results[2] |  | ||||||
| 		months: part_results[3] |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  | @ -1,89 +0,0 @@ | ||||||
| module expression |  | ||||||
| 
 |  | ||||||
| // parse_range_error returns the returned error message. If the result is '', |  | ||||||
| // that means the function didn't error. |  | ||||||
| fn parse_range_error(s string, min int, max int) string { |  | ||||||
| 	parse_range(s, min, max) or { return err.msg } |  | ||||||
| 
 |  | ||||||
| 	return '' |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // =====parse_range===== |  | ||||||
| fn test_range_star_range() ! { |  | ||||||
| 	bf := parse_range('*', 0, 5)! |  | ||||||
| 
 |  | ||||||
| 	assert bf_to_ints(bf, 0) == [0, 1, 2, 3, 4, 5] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_number() ! { |  | ||||||
| 	bf := parse_range('4', 0, 5)! |  | ||||||
| 
 |  | ||||||
| 	assert bf_to_ints(bf, 0) == [4] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_number_too_large() ! { |  | ||||||
| 	assert parse_range_error('10', 0, 6) == 'Out of range.' |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_number_too_small() ! { |  | ||||||
| 	assert parse_range_error('0', 2, 6) == 'Out of range.' |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_number_invalid() ! { |  | ||||||
| 	assert parse_range_error('x', 0, 6) == 'Invalid number.' |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_step_star_1() ! { |  | ||||||
| 	bf := parse_range('*/4', 0, 20)! |  | ||||||
| 
 |  | ||||||
| 	assert bf_to_ints(bf, 0) == [0, 4, 8, 12, 16, 20] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_step_star_2() ! { |  | ||||||
| 	bf := parse_range('*/3', 1, 8)! |  | ||||||
| 
 |  | ||||||
| 	assert bf_to_ints(bf, 1) == [1, 4, 7] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_step_star_too_large() ! { |  | ||||||
| 	assert parse_range_error('*/21', 0, 20) == 'Step size too large.' |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_step_zero() ! { |  | ||||||
| 	assert parse_range_error('*/0', 0, 20) == 'Step size zero not allowed.' |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_step_number() ! { |  | ||||||
| 	bf := parse_range('5/4', 2, 22)! |  | ||||||
| 
 |  | ||||||
| 	assert bf_to_ints(bf, 2) == [5, 9, 13, 17, 21] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_step_number_too_large() ! { |  | ||||||
| 	assert parse_range_error('10/4', 0, 5) == 'Out of range.' |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_step_number_too_small() ! { |  | ||||||
| 	assert parse_range_error('2/4', 5, 10) == 'Out of range.' |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_dash() ! { |  | ||||||
| 	bf := parse_range('4-8', 0, 9)! |  | ||||||
| 
 |  | ||||||
| 	assert bf_to_ints(bf, 0) == [4, 5, 6, 7, 8] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_range_dash_step() ! { |  | ||||||
| 	bf := parse_range('4-8/2', 0, 9)! |  | ||||||
| 
 |  | ||||||
| 	assert bf_to_ints(bf, 0) == [4, 6, 8] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // =====parse_part===== |  | ||||||
| fn test_part_single() ! { |  | ||||||
| 	assert parse_part('*', 0, 5)! == [0, 1, 2, 3, 4, 5] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| fn test_part_multiple() ! { |  | ||||||
| 	assert parse_part('*/2,2/3', 1, 8)! == [1, 2, 3, 5, 7, 8] |  | ||||||
| } |  | ||||||
|  | @ -1,4 +1,4 @@ | ||||||
| module expression | module cron | ||||||
| 
 | 
 | ||||||
| import time { parse } | import time { parse } | ||||||
| 
 | 
 | ||||||
|  | @ -7,7 +7,7 @@ fn util_test_time(exp string, t1_str string, t2_str string) ! { | ||||||
| 	t1 := parse(t1_str)! | 	t1 := parse(t1_str)! | ||||||
| 	t2 := parse(t2_str)! | 	t2 := parse(t2_str)! | ||||||
| 
 | 
 | ||||||
| 	t3 := ce.next(t1)! | 	t3 := ce.next(t1) | ||||||
| 
 | 
 | ||||||
| 	assert t2.year == t3.year | 	assert t2.year == t3.year | ||||||
| 	assert t2.month == t3.month | 	assert t2.month == t3.month | ||||||
|  | @ -18,17 +18,18 @@ fn util_test_time(exp string, t1_str string, t2_str string) ! { | ||||||
| 
 | 
 | ||||||
| fn test_next_simple() ! { | fn test_next_simple() ! { | ||||||
| 	// Very simple | 	// Very simple | ||||||
| 	util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')! | 	// util_test_time('0 3', '2002-01-01 00:00:00', '2002-01-01 03:00:00')! | ||||||
| 
 | 
 | ||||||
| 	// Overlap to next day | 	// Overlap to next day | ||||||
| 	util_test_time('0 3', '2002-01-01 03:00:00', '2002-01-02 03:00:00')! | 	mut exp := '0    3         ' | ||||||
| 	util_test_time('0 3', '2002-01-01 04:00:00', '2002-01-02 03:00:00')! | 	util_test_time(exp, '2002-01-01 03:00:00', '2002-01-02 03:00:00')! | ||||||
|  | 	util_test_time(exp, '2002-01-01 04:00:00', '2002-01-02 03:00:00')! | ||||||
| 
 | 
 | ||||||
| 	util_test_time('0 3/4', '2002-01-01 04:00:00', '2002-01-01 07:00:00')! | 	util_test_time('0 3-7/4,7-19', '2002-01-01 04:00:00', '2002-01-01 07:00:00')! | ||||||
| 
 | 
 | ||||||
| 	// Overlap to next month | 	//// Overlap to next month | ||||||
| 	util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')! | 	util_test_time('0 3', '2002-11-31 04:00:00', '2002-12-01 03:00:00')! | ||||||
| 
 | 
 | ||||||
| 	// Overlap to next year | 	//// Overlap to next year | ||||||
| 	util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')! | 	util_test_time('0 3', '2002-12-31 04:00:00', '2003-01-01 03:00:00')! | ||||||
| } | } | ||||||
|  | @ -0,0 +1,42 @@ | ||||||
|  | module cron | ||||||
|  | 
 | ||||||
|  | fn test_not_allowed() { | ||||||
|  | 	illegal_expressions := [ | ||||||
|  | 		'4 *-7', | ||||||
|  | 		'4 *-7/4', | ||||||
|  | 		'4 7/*', | ||||||
|  | 		'0 0 30 2', | ||||||
|  | 		'0 /5', | ||||||
|  | 		'0  ', | ||||||
|  | 		'0', | ||||||
|  | 		'      0', | ||||||
|  | 		'       0      ', | ||||||
|  | 		'1 2 3 4~9', | ||||||
|  | 		'1 1-3-5', | ||||||
|  | 		'0 5/2-5', | ||||||
|  | 		'', | ||||||
|  | 		'1 1/2/3', | ||||||
|  | 		'*5 8', | ||||||
|  | 		'x 8', | ||||||
|  | 	] | ||||||
|  | 
 | ||||||
|  | 	mut res := false | ||||||
|  | 
 | ||||||
|  | 	for exp in illegal_expressions { | ||||||
|  | 		res = false | ||||||
|  | 		parse_expression(exp) or { res = true } | ||||||
|  | 		assert res, "'$exp' should produce an error" | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn test_auto_extend() ! { | ||||||
|  | 	ce1 := parse_expression('5 5')! | ||||||
|  | 	ce2 := parse_expression('5 5 *')! | ||||||
|  | 	ce3 := parse_expression('5 5 * *')! | ||||||
|  | 
 | ||||||
|  | 	assert ce1 == ce2 && ce2 == ce3 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn test_four() { | ||||||
|  | 	parse_expression('0 1 2 3 ') or { assert false } | ||||||
|  | } | ||||||
|  | @ -0,0 +1 @@ | ||||||
|  | Subproject commit 379a05a7b6b604c107360e0a679fb3ea5400e02c | ||||||
|  | @ -9,7 +9,6 @@ import console.schedule | ||||||
| import console.man | import console.man | ||||||
| import console.aur | import console.aur | ||||||
| import console.repos | import console.repos | ||||||
| import cron |  | ||||||
| import agent | import agent | ||||||
| 
 | 
 | ||||||
| fn main() { | fn main() { | ||||||
|  | @ -43,7 +42,6 @@ fn main() { | ||||||
| 		commands: [ | 		commands: [ | ||||||
| 			server.cmd(), | 			server.cmd(), | ||||||
| 			targets.cmd(), | 			targets.cmd(), | ||||||
| 			cron.cmd(), |  | ||||||
| 			logs.cmd(), | 			logs.cmd(), | ||||||
| 			schedule.cmd(), | 			schedule.cmd(), | ||||||
| 			man.cmd(), | 			man.cmd(), | ||||||
|  |  | ||||||
|  | @ -3,17 +3,13 @@ module server | ||||||
| import time | import time | ||||||
| import models { BuildLog } | import models { BuildLog } | ||||||
| import os | import os | ||||||
| import cron.expression { CronExpression } | import cron | ||||||
| 
 | 
 | ||||||
| const fallback_log_removal_frequency = 24 * time.hour | const fallback_log_removal_frequency = 24 * time.hour | ||||||
| 
 | 
 | ||||||
| // log_removal_daemon removes old build logs every `log_removal_frequency`. | // log_removal_daemon removes old build logs every `log_removal_frequency`. | ||||||
| fn (mut app App) log_removal_daemon(schedule CronExpression) { | fn (mut app App) log_removal_daemon(schedule &cron.Expression) { | ||||||
| 	mut start_time := time.Time{} |  | ||||||
| 
 |  | ||||||
| 	for { | 	for { | ||||||
| 		start_time = time.now() |  | ||||||
| 
 |  | ||||||
| 		mut too_old_timestamp := time.now().add_days(-app.conf.max_log_age) | 		mut too_old_timestamp := time.now().add_days(-app.conf.max_log_age) | ||||||
| 
 | 
 | ||||||
| 		app.linfo('Cleaning logs before $too_old_timestamp') | 		app.linfo('Cleaning logs before $too_old_timestamp') | ||||||
|  | @ -51,12 +47,7 @@ fn (mut app App) log_removal_daemon(schedule CronExpression) { | ||||||
| 		app.linfo('Cleaned $counter logs ($failed failed)') | 		app.linfo('Cleaned $counter logs ($failed failed)') | ||||||
| 
 | 
 | ||||||
| 		// Sleep until the next cycle | 		// Sleep until the next cycle | ||||||
| 		next_time := schedule.next_from_now() or { | 		next_time := schedule.next_from_now() | ||||||
| 			app.lerror("Log removal daemon couldn't calculate next time: $err.msg(); fallback to $server.fallback_log_removal_frequency") |  | ||||||
| 
 |  | ||||||
| 			start_time.add(server.fallback_log_removal_frequency) |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		time.sleep(next_time - time.now()) | 		time.sleep(next_time - time.now()) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -7,7 +7,7 @@ import repo | ||||||
| import util | import util | ||||||
| import db | import db | ||||||
| import build { BuildJobQueue } | import build { BuildJobQueue } | ||||||
| import cron.expression | import cron | ||||||
| import metrics | import metrics | ||||||
| 
 | 
 | ||||||
| const ( | const ( | ||||||
|  | @ -43,11 +43,11 @@ pub fn server(conf Config) ! { | ||||||
| 		util.exit_with_message(1, "'any' is not allowed as the value for default_arch.") | 		util.exit_with_message(1, "'any' is not allowed as the value for default_arch.") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	global_ce := expression.parse_expression(conf.global_schedule) or { | 	global_ce := cron.parse_expression(conf.global_schedule) or { | ||||||
| 		util.exit_with_message(1, 'Invalid global cron expression: $err.msg()') | 		util.exit_with_message(1, 'Invalid global cron expression: $err.msg()') | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	log_removal_ce := expression.parse_expression(conf.log_removal_schedule) or { | 	log_removal_ce := cron.parse_expression(conf.log_removal_schedule) or { | ||||||
| 		util.exit_with_message(1, 'Invalid log removal cron expression: $err.msg()') | 		util.exit_with_message(1, 'Invalid log removal cron expression: $err.msg()') | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -13,4 +13,5 @@ api_update_frequency = 2 | ||||||
| image_rebuild_frequency = 1 | image_rebuild_frequency = 1 | ||||||
| max_concurrent_builds = 3 | max_concurrent_builds = 3 | ||||||
| # max_log_age = 64 | # max_log_age = 64 | ||||||
|  | log_removal_schedule = '* * *' | ||||||
| collect_metrics = true | collect_metrics = true | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue