Compare commits
11 Commits
b27041d5a7
...
cc061a38da
Author | SHA1 | Date |
---|---|---|
|
cc061a38da | |
|
ed02d82a0d | |
|
488a172586 | |
|
d5409201c7 | |
|
ead4c5f4b7 | |
|
16151643e6 | |
|
5ee6d553ac | |
|
6f9e1b5f3c | |
|
4d26797453 | |
|
7722d5a7e4 | |
|
20707f6af1 |
|
@ -5,8 +5,8 @@ data/
|
||||||
vieter
|
vieter
|
||||||
dvieter
|
dvieter
|
||||||
pvieter
|
pvieter
|
||||||
dvieterctl
|
suvieter
|
||||||
vieterctl
|
afvieter
|
||||||
vieter.c
|
vieter.c
|
||||||
|
|
||||||
# Ignore testing files
|
# Ignore testing files
|
||||||
|
@ -23,3 +23,6 @@ v/
|
||||||
|
|
||||||
# gdb log file
|
# gdb log file
|
||||||
gdb.txt
|
gdb.txt
|
||||||
|
|
||||||
|
# Generated docs
|
||||||
|
_docs/
|
||||||
|
|
|
@ -10,6 +10,8 @@ pipeline:
|
||||||
build:
|
build:
|
||||||
image: 'menci/archlinuxarm:base-devel'
|
image: 'menci/archlinuxarm:base-devel'
|
||||||
commands:
|
commands:
|
||||||
|
# Add the vieter repository so we can use the compiler
|
||||||
|
- echo -e '[vieter]\nServer = https://arch.r8r.be/$repo/$arch\nSigLevel = Optional' >> /etc/pacman.conf
|
||||||
# Update packages
|
# Update packages
|
||||||
- pacman -Syu --noconfirm
|
- pacman -Syu --noconfirm
|
||||||
# Create non-root user to perform build & switch to their home
|
# Create non-root user to perform build & switch to their home
|
||||||
|
|
|
@ -9,22 +9,21 @@ matrix:
|
||||||
platform: ${PLATFORM}
|
platform: ${PLATFORM}
|
||||||
|
|
||||||
pipeline:
|
pipeline:
|
||||||
# The default build isn't needed, as alpine switches to gcc for the compiler anyways
|
|
||||||
debug:
|
debug:
|
||||||
image: 'chewingbever/vlang:latest'
|
image: 'chewingbever/vlang:latest'
|
||||||
pull: true
|
pull: true
|
||||||
group: 'build'
|
|
||||||
commands:
|
commands:
|
||||||
- make debug
|
- make
|
||||||
when:
|
when:
|
||||||
event: push
|
event: push
|
||||||
|
branch:
|
||||||
|
exclude: [main, dev]
|
||||||
|
|
||||||
prod:
|
prod:
|
||||||
image: 'chewingbever/vlang:latest'
|
image: 'chewingbever/vlang:latest'
|
||||||
pull: true
|
pull: true
|
||||||
environment:
|
environment:
|
||||||
- LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static
|
- LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static
|
||||||
group: 'build'
|
|
||||||
commands:
|
commands:
|
||||||
- make prod
|
- make prod
|
||||||
# Make sure the binary is actually statically built
|
# Make sure the binary is actually statically built
|
||||||
|
|
|
@ -27,16 +27,3 @@ pipeline:
|
||||||
- du -h suvieter
|
- du -h suvieter
|
||||||
when:
|
when:
|
||||||
event: push
|
event: push
|
||||||
|
|
||||||
skip-unused-static:
|
|
||||||
image: 'chewingbever/vlang:latest'
|
|
||||||
pull: true
|
|
||||||
environment:
|
|
||||||
- LDFLAGS=-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static
|
|
||||||
group: 'build'
|
|
||||||
commands:
|
|
||||||
- make skip-unused
|
|
||||||
- readelf -d suvieter
|
|
||||||
- du -h suvieter
|
|
||||||
when:
|
|
||||||
event: push
|
|
||||||
|
|
15
Makefile
15
Makefile
|
@ -3,13 +3,14 @@ SRC_DIR := src
|
||||||
SOURCES != find '$(SRC_DIR)' -iname '*.v'
|
SOURCES != find '$(SRC_DIR)' -iname '*.v'
|
||||||
|
|
||||||
V_PATH ?= v
|
V_PATH ?= v
|
||||||
V := $(V_PATH) -showcc -gc boehm
|
V := $(V_PATH) -showcc -gc boehm -d dynamic_boehm
|
||||||
|
|
||||||
all: vieter
|
all: vieter
|
||||||
|
|
||||||
|
|
||||||
# =====COMPILATION=====
|
# =====COMPILATION=====
|
||||||
# Regular binary
|
# We force the boehm gc to be compiled dynamically because otherwise, our CI
|
||||||
|
# build breaks.
|
||||||
vieter: $(SOURCES)
|
vieter: $(SOURCES)
|
||||||
$(V) -g -o vieter $(SRC_DIR)
|
$(V) -g -o vieter $(SRC_DIR)
|
||||||
|
|
||||||
|
@ -24,7 +25,7 @@ dvieter: $(SOURCES)
|
||||||
# Run the debug build inside gdb
|
# Run the debug build inside gdb
|
||||||
.PHONY: gdb
|
.PHONY: gdb
|
||||||
gdb: dvieter
|
gdb: dvieter
|
||||||
gdb --args './dvieter -f vieter.toml server'
|
gdb --args ./dvieter -f vieter.toml server
|
||||||
|
|
||||||
# Optimised production build
|
# Optimised production build
|
||||||
.PHONY: prod
|
.PHONY: prod
|
||||||
|
@ -74,8 +75,14 @@ v/v:
|
||||||
git clone --single-branch https://git.rustybever.be/Chewing_Bever/v v
|
git clone --single-branch https://git.rustybever.be/Chewing_Bever/v v
|
||||||
make -C v
|
make -C v
|
||||||
|
|
||||||
|
.PHONY: clean
|
||||||
clean:
|
clean:
|
||||||
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst
|
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter' *.pkg.tar.zst 'suvieter' 'afvieter' '$(SRC_DIR)/_docs'
|
||||||
|
|
||||||
|
.PHONY: docs
|
||||||
|
docs:
|
||||||
|
rm -rf '$(SRC_DIR)/_docs'
|
||||||
|
cd '$(SRC_DIR)' && v doc -all -f html -m -readme .
|
||||||
|
|
||||||
|
|
||||||
# =====EXPERIMENTAL=====
|
# =====EXPERIMENTAL=====
|
||||||
|
|
9
PKGBUILD
9
PKGBUILD
|
@ -2,10 +2,10 @@
|
||||||
|
|
||||||
pkgbase='vieter'
|
pkgbase='vieter'
|
||||||
pkgname='vieter'
|
pkgname='vieter'
|
||||||
pkgver=0.2.0.r24.g9a56bd0
|
pkgver=0.2.0.r25.g20112b8
|
||||||
pkgrel=1
|
pkgrel=1
|
||||||
depends=('glibc' 'openssl' 'libarchive' 'gc')
|
depends=('glibc' 'openssl' 'libarchive' 'gc')
|
||||||
makedepends=('git' 'gcc')
|
makedepends=('git' 'gcc' 'vieter-v')
|
||||||
arch=('x86_64' 'aarch64' 'armv7')
|
arch=('x86_64' 'aarch64' 'armv7')
|
||||||
url='https://git.rustybever.be/Chewing_Bever/vieter'
|
url='https://git.rustybever.be/Chewing_Bever/vieter'
|
||||||
license=('AGPL3')
|
license=('AGPL3')
|
||||||
|
@ -20,10 +20,7 @@ pkgver() {
|
||||||
build() {
|
build() {
|
||||||
cd "$pkgname"
|
cd "$pkgname"
|
||||||
|
|
||||||
# Build the compiler
|
make prod
|
||||||
CFLAGS= make v
|
|
||||||
|
|
||||||
V_PATH=v/v make prod
|
|
||||||
}
|
}
|
||||||
|
|
||||||
package() {
|
package() {
|
||||||
|
|
|
@ -2,7 +2,9 @@
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
I host documentation for Vieter over at https://rustybever.be/docs/vieter/.
|
I host documentation for Vieter over at https://rustybever.be/docs/vieter/. API
|
||||||
|
documentation for the current codebase can be found at
|
||||||
|
https://rustybever.be/api-docs/vieter/.
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
|
|
|
@ -11,8 +11,9 @@ pub:
|
||||||
address string
|
address string
|
||||||
base_image string = 'archlinux:base-devel'
|
base_image string = 'archlinux:base-devel'
|
||||||
max_concurrent_builds int = 1
|
max_concurrent_builds int = 1
|
||||||
api_update_frequency int = 60
|
api_update_frequency int = 15
|
||||||
global_schedule string
|
// Replicates the behavior of the original cron system
|
||||||
|
global_schedule string = '0 3'
|
||||||
}
|
}
|
||||||
|
|
||||||
// cmd returns the cli module that handles the cron daemon.
|
// cmd returns the cli module that handles the cron daemon.
|
||||||
|
|
|
@ -1,9 +1,27 @@
|
||||||
module daemon
|
module daemon
|
||||||
|
|
||||||
import git
|
|
||||||
import time
|
import time
|
||||||
import sync.stdatomic
|
import sync.stdatomic
|
||||||
|
|
||||||
|
const build_empty = 0
|
||||||
|
|
||||||
|
const build_running = 1
|
||||||
|
|
||||||
|
const build_done = 2
|
||||||
|
|
||||||
|
// reschedule_builds looks for any builds with status code 2 & re-adds them to
|
||||||
|
// the queue.
|
||||||
|
fn (mut d Daemon) reschedule_builds() ? {
|
||||||
|
for i in 0 .. d.atomics.len {
|
||||||
|
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_done {
|
||||||
|
stdatomic.store_u64(&d.atomics[i], daemon.build_empty)
|
||||||
|
sb := d.builds[i]
|
||||||
|
|
||||||
|
d.schedule_build(sb.repo_id, sb.repo) ?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// update_builds starts as many builds as possible.
|
// update_builds starts as many builds as possible.
|
||||||
fn (mut d Daemon) update_builds() ? {
|
fn (mut d Daemon) update_builds() ? {
|
||||||
now := time.now()
|
now := time.now()
|
||||||
|
@ -13,7 +31,8 @@ fn (mut d Daemon) update_builds() ? {
|
||||||
sb := d.queue.pop() ?
|
sb := d.queue.pop() ?
|
||||||
|
|
||||||
// If this build couldn't be scheduled, no more will be possible.
|
// If this build couldn't be scheduled, no more will be possible.
|
||||||
if !d.start_build(sb.repo_id)? {
|
// TODO a build that couldn't be scheduled should be re-added to the queue.
|
||||||
|
if !d.start_build(sb) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -22,13 +41,14 @@ fn (mut d Daemon) update_builds() ? {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// start_build starts a build for the given repo_id.
|
// start_build starts a build for the given ScheduledBuild object.
|
||||||
fn (mut d Daemon) start_build(repo_id string) ?bool {
|
fn (mut d Daemon) start_build(sb ScheduledBuild) bool {
|
||||||
for i in 0..d.atomics.len {
|
for i in 0 .. d.atomics.len {
|
||||||
if stdatomic.load_u64(&d.atomics[i]) == 0 {
|
if stdatomic.load_u64(&d.atomics[i]) == daemon.build_empty {
|
||||||
stdatomic.store_u64(&d.atomics[i], 1)
|
stdatomic.store_u64(&d.atomics[i], daemon.build_running)
|
||||||
|
d.builds[i] = sb
|
||||||
|
|
||||||
go d.run_build(i, d.repos_map[repo_id])
|
go d.run_build(i, sb)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -37,9 +57,10 @@ fn (mut d Daemon) start_build(repo_id string) ?bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut d Daemon) run_build(build_index int, repo git.GitRepo) ? {
|
// run_build actually starts the build process for a given repo.
|
||||||
|
fn (mut d Daemon) run_build(build_index int, sb ScheduledBuild) ? {
|
||||||
|
d.linfo('build $sb.repo.url')
|
||||||
time.sleep(10 * time.second)
|
time.sleep(10 * time.second)
|
||||||
|
|
||||||
stdatomic.store_u64(&d.atomics[build_index], 2)
|
stdatomic.store_u64(&d.atomics[build_index], daemon.build_done)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,8 @@ import time
|
||||||
import log
|
import log
|
||||||
import datatypes { MinHeap }
|
import datatypes { MinHeap }
|
||||||
import cron.expression { CronExpression, parse_expression }
|
import cron.expression { CronExpression, parse_expression }
|
||||||
|
import math
|
||||||
|
import arrays
|
||||||
|
|
||||||
struct ScheduledBuild {
|
struct ScheduledBuild {
|
||||||
pub:
|
pub:
|
||||||
|
@ -30,7 +32,7 @@ mut:
|
||||||
api_update_timestamp time.Time
|
api_update_timestamp time.Time
|
||||||
queue MinHeap<ScheduledBuild>
|
queue MinHeap<ScheduledBuild>
|
||||||
// Which builds are currently running
|
// Which builds are currently running
|
||||||
builds []git.GitRepo
|
builds []ScheduledBuild
|
||||||
// Atomic variables used to detect when a build has finished; length is the
|
// Atomic variables used to detect when a build has finished; length is the
|
||||||
// same as builds
|
// same as builds
|
||||||
atomics []u64
|
atomics []u64
|
||||||
|
@ -47,7 +49,7 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st
|
||||||
global_schedule: global_schedule
|
global_schedule: global_schedule
|
||||||
api_update_frequency: api_update_frequency
|
api_update_frequency: api_update_frequency
|
||||||
atomics: []u64{len: max_concurrent_builds}
|
atomics: []u64{len: max_concurrent_builds}
|
||||||
builds: []git.GitRepo{len: max_concurrent_builds}
|
builds: []ScheduledBuild{len: max_concurrent_builds}
|
||||||
logger: logger
|
logger: logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,15 +64,63 @@ pub fn init_daemon(logger log.Log, address string, api_key string, base_image st
|
||||||
// periodically refreshes the list of repositories to ensure we stay in sync.
|
// periodically refreshes the list of repositories to ensure we stay in sync.
|
||||||
pub fn (mut d Daemon) run() ? {
|
pub fn (mut d Daemon) run() ? {
|
||||||
for {
|
for {
|
||||||
d.update_builds() ?
|
// Update the API's contents if needed & renew the queue
|
||||||
println(d.queue)
|
if time.now() >= d.api_update_timestamp {
|
||||||
println(d.atomics)
|
d.renew_repos() ?
|
||||||
|
d.renew_queue() ?
|
||||||
|
}
|
||||||
|
|
||||||
time.sleep(60 * time.second)
|
// Cleans up finished builds, opening up spots for new builds
|
||||||
|
d.reschedule_builds() ?
|
||||||
|
|
||||||
|
// TODO rebuild builder image when needed
|
||||||
|
|
||||||
|
// Schedules new builds when possible
|
||||||
|
d.update_builds() ?
|
||||||
|
|
||||||
|
// Sleep either until we have to refresh the repos or when the next
|
||||||
|
// build has to start, with a minimum of 1 second.
|
||||||
|
now := time.now()
|
||||||
|
|
||||||
|
mut delay := d.api_update_timestamp - now
|
||||||
|
|
||||||
|
if d.queue.len() > 0 {
|
||||||
|
time_until_next_job := d.queue.peek() ?.timestamp - now
|
||||||
|
|
||||||
|
delay = math.min(delay, time_until_next_job)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.ldebug('Sleeping for ${delay}...')
|
||||||
|
|
||||||
|
// TODO if there are builds active, the sleep time should be much lower to clean up the builds when they're finished.
|
||||||
|
|
||||||
|
// We sleep for at least one second. This is to prevent the program
|
||||||
|
// from looping agressively when a cronjob can be scheduled, but
|
||||||
|
// there's no spots free for it to be started.
|
||||||
|
time.sleep(math.max(delay, 1 * time.second))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// schedule_build adds the next occurence of the given repo build to the queue.
|
||||||
|
fn (mut d Daemon) schedule_build(repo_id string, repo git.GitRepo) ? {
|
||||||
|
ce := parse_expression(repo.schedule) or {
|
||||||
|
// TODO This shouldn't return an error if the expression is empty.
|
||||||
|
d.lerror("Error while parsing cron expression '$repo.schedule' ($repo_id): $err.msg()")
|
||||||
|
|
||||||
|
d.global_schedule
|
||||||
|
}
|
||||||
|
// A repo that can't be scheduled will just be skipped for now
|
||||||
|
timestamp := ce.next_from_now() ?
|
||||||
|
|
||||||
|
d.queue.insert(ScheduledBuild{
|
||||||
|
repo_id: repo_id
|
||||||
|
repo: repo
|
||||||
|
timestamp: timestamp
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
fn (mut d Daemon) renew_repos() ? {
|
fn (mut d Daemon) renew_repos() ? {
|
||||||
|
d.ldebug('Renewing repos...')
|
||||||
mut new_repos := git.get_repos(d.address, d.api_key) ?
|
mut new_repos := git.get_repos(d.address, d.api_key) ?
|
||||||
|
|
||||||
d.repos_map = new_repos.move()
|
d.repos_map = new_repos.move()
|
||||||
|
@ -81,6 +131,7 @@ fn (mut d Daemon) renew_repos() ? {
|
||||||
// renew_queue replaces the old queue with a new one that reflects the newest
|
// renew_queue replaces the old queue with a new one that reflects the newest
|
||||||
// values in repos_map.
|
// values in repos_map.
|
||||||
fn (mut d Daemon) renew_queue() ? {
|
fn (mut d Daemon) renew_queue() ? {
|
||||||
|
d.ldebug('Renewing queue...')
|
||||||
mut new_queue := MinHeap<ScheduledBuild>{}
|
mut new_queue := MinHeap<ScheduledBuild>{}
|
||||||
|
|
||||||
// Move any jobs that should have already started from the old queue onto
|
// Move any jobs that should have already started from the old queue onto
|
||||||
|
@ -101,19 +152,11 @@ fn (mut d Daemon) renew_queue() ? {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.queue = new_queue
|
||||||
|
|
||||||
// For each repository in repos_map, parse their cron expression (or use
|
// For each repository in repos_map, parse their cron expression (or use
|
||||||
// the default one if not present) & add them to the queue
|
// the default one if not present) & add them to the queue
|
||||||
for id, repo in d.repos_map {
|
for id, repo in d.repos_map {
|
||||||
ce := parse_expression(repo.schedule) or { d.global_schedule }
|
d.schedule_build(id, repo) ?
|
||||||
// A repo that can't be scheduled will just be skipped for now
|
|
||||||
timestamp := ce.next(now) or { continue }
|
|
||||||
|
|
||||||
new_queue.insert(ScheduledBuild{
|
|
||||||
repo_id: id
|
|
||||||
repo: repo
|
|
||||||
timestamp: timestamp
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.queue = new_queue
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,7 @@ pub fn (ce &CronExpression) next(ref time.Time) ?time.Time {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (ce &CronExpression) next_from_now() ?time.Time {
|
pub fn (ce &CronExpression) next_from_now() ?time.Time {
|
||||||
return ce.next(time.now())
|
return ce.next(time.now())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,8 +28,8 @@ fn send(req &string) ?http.Response {
|
||||||
s.wait_for_write() ?
|
s.wait_for_write() ?
|
||||||
|
|
||||||
mut c := 0
|
mut c := 0
|
||||||
mut buf := []byte{len: docker.buf_len}
|
mut buf := []u8{len: docker.buf_len}
|
||||||
mut res := []byte{}
|
mut res := []u8{}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
c = s.read(mut buf) or { return error('Failed to read data from socket ${docker.socket}.') }
|
c = s.read(mut buf) or { return error('Failed to read data from socket ${docker.socket}.') }
|
||||||
|
@ -52,7 +52,7 @@ fn send(req &string) ?http.Response {
|
||||||
|
|
||||||
// We loop until we've encountered the end of the chunked response
|
// We loop until we've encountered the end of the chunked response
|
||||||
// A chunked HTTP response always ends with '0\r\n\r\n'.
|
// A chunked HTTP response always ends with '0\r\n\r\n'.
|
||||||
for res.len < 5 || res#[-5..] != [byte(`0`), `\r`, `\n`, `\r`, `\n`] {
|
for res.len < 5 || res#[-5..] != [u8(`0`), `\r`, `\n`, `\r`, `\n`] {
|
||||||
// Wait for the server to respond
|
// Wait for the server to respond
|
||||||
s.wait_for_write() ?
|
s.wait_for_write() ?
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the file to the archive
|
// Write the file to the archive
|
||||||
buf := [8192]byte{}
|
buf := [8192]u8{}
|
||||||
mut len := C.read(fd, &buf, sizeof(buf))
|
mut len := C.read(fd, &buf, sizeof(buf))
|
||||||
|
|
||||||
for len > 0 {
|
for len > 0 {
|
||||||
|
|
|
@ -30,7 +30,7 @@ pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? {
|
||||||
file.close()
|
file.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
mut buf := []byte{len: util.reader_buf_size}
|
mut buf := []u8{len: util.reader_buf_size}
|
||||||
mut bytes_left := length
|
mut bytes_left := length
|
||||||
|
|
||||||
// Repeat as long as the stream still has data
|
// Repeat as long as the stream still has data
|
||||||
|
@ -60,7 +60,7 @@ pub fn hash_file(path &string) ?(string, string) {
|
||||||
mut sha256sum := sha256.new()
|
mut sha256sum := sha256.new()
|
||||||
|
|
||||||
buf_size := int(1_000_000)
|
buf_size := int(1_000_000)
|
||||||
mut buf := []byte{len: buf_size}
|
mut buf := []u8{len: buf_size}
|
||||||
mut bytes_left := os.file_size(path)
|
mut bytes_left := os.file_size(path)
|
||||||
|
|
||||||
for bytes_left > 0 {
|
for bytes_left > 0 {
|
||||||
|
|
|
@ -285,7 +285,7 @@ pub fn (mut ctx Context) file(f_path string) Result {
|
||||||
resp.set_status(ctx.status)
|
resp.set_status(ctx.status)
|
||||||
send_string(mut ctx.conn, resp.bytestr()) or { return Result{} }
|
send_string(mut ctx.conn, resp.bytestr()) or { return Result{} }
|
||||||
|
|
||||||
mut buf := []byte{len: 1_000_000}
|
mut buf := []u8{len: 1_000_000}
|
||||||
mut bytes_left := file_size
|
mut bytes_left := file_size
|
||||||
|
|
||||||
// Repeat as long as the stream still has data
|
// Repeat as long as the stream still has data
|
||||||
|
|
Loading…
Reference in New Issue