forked from vieter-v/vieter
				
			Merge pull request 'Better env vars & api for managing repos' (#95) from repos-api into dev
Reviewed-on: Chewing_Bever/vieter#95main
						commit
						6a44eb705a
					
				|  | @ -18,3 +18,6 @@ test/ | |||
| 
 | ||||
| # V compiler directory | ||||
| v/ | ||||
| 
 | ||||
| # gdb log file | ||||
| gdb.txt | ||||
|  |  | |||
|  | @ -15,6 +15,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 | |||
|     * Packages are always rebuilt, even if they haven't changed | ||||
|     * Hardcoded planning of builds | ||||
|     * Builds are sequential | ||||
| * Better environment variable support | ||||
|     * Each env var can now be provided from a file by appending it with `_FILE` | ||||
|       & passing the path to the file as value | ||||
| * API for managing Git repositories to build | ||||
| 
 | ||||
| ## Fixed | ||||
| 
 | ||||
|  |  | |||
|  | @ -24,9 +24,10 @@ RUN curl --fail \ | |||
| FROM busybox:1.35.0 | ||||
| 
 | ||||
| ENV PATH=/bin \ | ||||
|     REPO_DIR=/data/repo \ | ||||
|     PKG_DIR=/data/pkgs \ | ||||
|     DOWNLOAD_DIR=/data/downloads | ||||
|     VIETER_REPO_DIR=/data/repo \ | ||||
|     VIETER_PKG_DIR=/data/pkgs \ | ||||
|     VIETER_DOWNLOAD_DIR=/data/downloads \ | ||||
|     VIETER_REPOS_FILE=/data/repos.json | ||||
| 
 | ||||
| COPY --from=builder /app/dumb-init /app/vieter /bin/ | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										35
									
								
								Makefile
								
								
								
								
							
							
						
						
									
										35
									
								
								Makefile
								
								
								
								
							|  | @ -13,10 +13,23 @@ vieter: $(SOURCES) | |||
| 	$(V) -g -o vieter $(SRC_DIR) | ||||
| 
 | ||||
| # Debug build using gcc
 | ||||
| # The debug build can't use the boehm garbage collector, as that is
 | ||||
| # multi-threaded and causes issues when running vieter inside gdb.
 | ||||
| .PHONY: debug | ||||
| debug: dvieter | ||||
| dvieter: $(SOURCES) | ||||
| 	$(V) -keepc -cg -cc gcc -o dvieter $(SRC_DIR) | ||||
| 	$(V_PATH) -showcc -keepc -cg -o dvieter $(SRC_DIR) | ||||
| 
 | ||||
| # Run the debug build inside gdb
 | ||||
| .PHONY: gdb | ||||
| gdb: dvieter | ||||
| 	 VIETER_API_KEY=test \
 | ||||
| 		VIETER_DOWNLOAD_DIR=data/downloads \
 | ||||
| 		VIETER_REPO_DIR=data/repo \
 | ||||
| 		VIETER_PKG_DIR=data/pkgs \
 | ||||
| 		VIETER_LOG_LEVEL=DEBUG \
 | ||||
| 		VIETER_REPOS_FILE=data/repos.json \
 | ||||
| 		gdb --args ./dvieter | ||||
| 
 | ||||
| # Optimised production build
 | ||||
| .PHONY: prod | ||||
|  | @ -34,16 +47,22 @@ c: | |||
| # Run the server in the default 'data' directory
 | ||||
| .PHONY: run | ||||
| run: vieter | ||||
| 	 API_KEY=test DOWNLOAD_DIR=data/downloads REPO_DIR=data/repo PKG_DIR=data/pkgs LOG_LEVEL=DEBUG ./vieter server | ||||
| 	 VIETER_API_KEY=test \
 | ||||
| 		VIETER_DOWNLOAD_DIR=data/downloads \
 | ||||
| 		VIETER_REPO_DIR=data/repo \
 | ||||
| 		VIETER_PKG_DIR=data/pkgs \
 | ||||
| 		VIETER_LOG_LEVEL=DEBUG \
 | ||||
| 		VIETER_REPOS_FILE=data/repos.json \
 | ||||
| 		./vieter server | ||||
| 
 | ||||
| .PHONY: run-prod | ||||
| run-prod: prod | ||||
| 	API_KEY=test DOWNLOAD_DIR=data/downloads REPO_DIR=data/repo PKG_DIR=data/pkgs LOG_LEVEL=DEBUG ./pvieter | ||||
| 
 | ||||
| # Same as run, but restart when the source code changes
 | ||||
| .PHONY: watch | ||||
| watch: | ||||
| 	API_KEY=test DOWNLOAD_DIR=data/downloads REPO_DIR=data/repo PKG_DIR=data/pkgs LOG_LEVEL=DEBUG $(V) watch run vieter | ||||
| 	VIETER_API_KEY=test \
 | ||||
| 		VIETER_DOWNLOAD_DIR=data/downloads \
 | ||||
| 		VIETER_REPO_DIR=data/repo \
 | ||||
| 		VIETER_PKG_DIR=data/pkgs \
 | ||||
| 		VIETER_LOG_LEVEL=DEBUG \
 | ||||
| 	./pvieter server | ||||
| 
 | ||||
| # =====OTHER=====
 | ||||
| .PHONY: lint | ||||
|  |  | |||
							
								
								
									
										25
									
								
								src/build.v
								
								
								
								
							
							
						
						
									
										25
									
								
								src/build.v
								
								
								
								
							|  | @ -4,21 +4,22 @@ import docker | |||
| import encoding.base64 | ||||
| import rand | ||||
| import time | ||||
| import os | ||||
| import json | ||||
| import git | ||||
| import server | ||||
| import env | ||||
| import net.http | ||||
| 
 | ||||
| const container_build_dir = '/build' | ||||
| 
 | ||||
| fn build(key string, repo_dir string) ? { | ||||
| 	server_url := os.getenv_opt('VIETER_ADDRESS') or { | ||||
| 		exit_with_message(1, 'No Vieter server address was provided.') | ||||
| 	} | ||||
| fn build() ? { | ||||
| 	conf := env.load<env.BuildConfig>() ? | ||||
| 
 | ||||
| 	// Read in the repos from a json file | ||||
| 	filename := os.join_path_single(repo_dir, 'repos.json') | ||||
| 	txt := os.read_file(filename) ? | ||||
| 	repos := json.decode([]git.GitRepo, txt) ? | ||||
| 	// We get the repos list from the Vieter instance | ||||
| 	mut req := http.new_request(http.Method.get, '$conf.address/api/repos', '') ? | ||||
| 	req.add_custom_header('X-Api-Key', conf.api_key) ? | ||||
| 
 | ||||
| 	res := req.do() ? | ||||
| 	repos := json.decode([]server.GitRepo, res.text) ? | ||||
| 
 | ||||
| 	mut commands := [ | ||||
| 		// Update repos & install required packages | ||||
|  | @ -48,7 +49,7 @@ fn build(key string, repo_dir string) ? { | |||
| 		uuids << uuid | ||||
| 
 | ||||
| 		commands << "su builder -c 'git clone --single-branch --depth 1 --branch $repo.branch $repo.url /build/$uuid'" | ||||
| 		commands << 'su builder -c \'cd /build/$uuid && makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\${pkg}" -H "X-API-KEY: \$API_KEY" $server_url/publish; done\'' | ||||
| 		commands << 'su builder -c \'cd /build/$uuid && makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\${pkg}" -H "X-API-KEY: \$API_KEY" $conf.address/publish; done\'' | ||||
| 	} | ||||
| 
 | ||||
| 	// We convert the list of commands into a base64 string, which then gets | ||||
|  | @ -57,7 +58,7 @@ fn build(key string, repo_dir string) ? { | |||
| 
 | ||||
| 	c := docker.NewContainer{ | ||||
| 		image: 'archlinux:latest' | ||||
| 		env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$key'] | ||||
| 		env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$conf.api_key'] | ||||
| 		entrypoint: ['/bin/sh', '-c'] | ||||
| 		cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/sh -e'] | ||||
| 	} | ||||
|  |  | |||
|  | @ -0,0 +1,86 @@ | |||
| module env | ||||
| 
 | ||||
| import os | ||||
| 
 | ||||
| // The prefix that every environment variable should have | ||||
| const prefix = 'VIETER_' | ||||
| 
 | ||||
| // The suffix an environment variable in order for it to be loaded from a file | ||||
| // instead | ||||
| const file_suffix = '_FILE' | ||||
| 
 | ||||
| pub struct ServerConfig { | ||||
| pub: | ||||
| 	log_level    string [default: WARN] | ||||
| 	log_file     string [default: 'vieter.log'] | ||||
| 	pkg_dir      string | ||||
| 	download_dir string | ||||
| 	api_key      string | ||||
| 	repo_dir     string | ||||
| 	repos_file   string | ||||
| } | ||||
| 
 | ||||
| pub struct BuildConfig { | ||||
| pub: | ||||
| 	api_key string | ||||
| 	address string | ||||
| } | ||||
| 
 | ||||
| fn get_env_var(field_name string) ?string { | ||||
| 	env_var_name := '$env.prefix$field_name.to_upper()' | ||||
| 	env_file_name := '$env.prefix$field_name.to_upper()$env.file_suffix' | ||||
| 	env_var := os.getenv(env_var_name) | ||||
| 	env_file := os.getenv(env_file_name) | ||||
| 
 | ||||
| 	// If both aren't set, we report them missing | ||||
| 	if env_var == '' && env_file == '' { | ||||
| 		return error('Either $env_var_name or $env_file_name is required.') | ||||
| 	} | ||||
| 
 | ||||
| 	// If they're both set, we report a conflict | ||||
| 	if env_var != '' && env_file != '' { | ||||
| 		return error('Only one of $env_var_name or $env_file_name can be defined.') | ||||
| 	} | ||||
| 
 | ||||
| 	// If it's the env var itself, we return it. | ||||
| 	// I'm pretty sure this also prevents variable ending in _FILE (e.g. | ||||
| 	// VIETER_LOG_FILE) from being mistakingely read as an _FILE suffixed env | ||||
| 	// var. | ||||
| 	if env_var != '' { | ||||
| 		return env_var | ||||
| 	} | ||||
| 
 | ||||
| 	// Otherwise, we process the file | ||||
| 	return os.read_file(env_file) or { | ||||
| 		error('Failed to read file defined in $env_file_name: ${err.msg}.') | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // load<T> attempts to create the given type from environment variables. For | ||||
| // each field, the corresponding env var is its name in uppercase prepended | ||||
| // with the hardcoded prefix. If this one isn't present, it looks for the env | ||||
| // var with the file_suffix suffix. | ||||
| pub fn load<T>() ?T { | ||||
| 	res := T{} | ||||
| 
 | ||||
| 	$for field in T.fields { | ||||
| 		res.$(field.name) = get_env_var(field.name) or { | ||||
| 			// We use the default instead, if it's present | ||||
| 			mut default := '' | ||||
| 
 | ||||
| 			for attr in field.attrs { | ||||
| 				if attr.starts_with('default: ') { | ||||
| 					default = attr[9..] | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| 			if default == '' { | ||||
| 				return err | ||||
| 			} | ||||
| 
 | ||||
| 			default | ||||
| 		} | ||||
| 	} | ||||
| 	return res | ||||
| } | ||||
|  | @ -1,7 +0,0 @@ | |||
| module git | ||||
| 
 | ||||
| pub struct GitRepo { | ||||
| pub: | ||||
| 	url    string [required] | ||||
| 	branch string [required] | ||||
| } | ||||
							
								
								
									
										63
									
								
								src/main.v
								
								
								
								
							
							
						
						
									
										63
									
								
								src/main.v
								
								
								
								
							|  | @ -1,68 +1,17 @@ | |||
| module main | ||||
| 
 | ||||
| import web | ||||
| import os | ||||
| import io | ||||
| import repo | ||||
| 
 | ||||
| const port = 8000 | ||||
| 
 | ||||
| const buf_size = 1_000_000 | ||||
| 
 | ||||
| struct App { | ||||
| 	web.Context | ||||
| pub: | ||||
| 	api_key string [required; web_global] | ||||
| 	dl_dir  string [required; web_global] | ||||
| pub mut: | ||||
| 	repo repo.Repo [required; web_global] | ||||
| } | ||||
| 
 | ||||
| [noreturn] | ||||
| fn exit_with_message(code int, msg string) { | ||||
| 	eprintln(msg) | ||||
| 	exit(code) | ||||
| } | ||||
| 
 | ||||
| fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? { | ||||
| 	mut file := os.create(path) ? | ||||
| 	defer { | ||||
| 		file.close() | ||||
| 	} | ||||
| 
 | ||||
| 	mut buf := []byte{len: buf_size} | ||||
| 	mut bytes_left := length | ||||
| 
 | ||||
| 	// Repeat as long as the stream still has data | ||||
| 	for bytes_left > 0 { | ||||
| 		// TODO check if just breaking here is safe | ||||
| 		bytes_read := reader.read(mut buf) or { break } | ||||
| 		bytes_left -= bytes_read | ||||
| 
 | ||||
| 		mut to_write := bytes_read | ||||
| 
 | ||||
| 		for to_write > 0 { | ||||
| 			// TODO don't just loop infinitely here | ||||
| 			bytes_written := file.write(buf[bytes_read - to_write..bytes_read]) or { continue } | ||||
| 
 | ||||
| 			to_write = to_write - bytes_written | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| import server | ||||
| import util | ||||
| 
 | ||||
| fn main() { | ||||
| 	key := os.getenv_opt('API_KEY') or { exit_with_message(1, 'No API key was provided.') } | ||||
| 	repo_dir := os.getenv_opt('REPO_DIR') or { | ||||
| 		exit_with_message(1, 'No repo directory was configured.') | ||||
| 	} | ||||
| 
 | ||||
| 	if os.args.len == 1 { | ||||
| 		exit_with_message(1, 'No action provided.') | ||||
| 		util.exit_with_message(1, 'No action provided.') | ||||
| 	} | ||||
| 
 | ||||
| 	match os.args[1] { | ||||
| 		'server' { server(key, repo_dir) } | ||||
| 		'build' { build(key, repo_dir) ? } | ||||
| 		else { exit_with_message(1, 'Unknown action: ${os.args[1]}') } | ||||
| 		'server' { server.server() ? } | ||||
| 		'build' { build() ? } | ||||
| 		else { util.exit_with_message(1, 'Unknown action: ${os.args[1]}') } | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -2,17 +2,12 @@ module repo | |||
| 
 | ||||
| import os | ||||
| import package | ||||
| 
 | ||||
| // Dummy struct to work around the fact that you can only share structs, maps & | ||||
| // arrays | ||||
| pub struct Dummy { | ||||
| 	x int | ||||
| } | ||||
| import util | ||||
| 
 | ||||
| // This struct manages a single repository. | ||||
| pub struct Repo { | ||||
| mut: | ||||
| 	mutex shared Dummy | ||||
| 	mutex shared util.Dummy | ||||
| pub: | ||||
| 	// Where to store repository files | ||||
| 	repo_dir string [required] | ||||
|  |  | |||
							
								
								
									
										51
									
								
								src/server.v
								
								
								
								
							
							
						
						
									
										51
									
								
								src/server.v
								
								
								
								
							|  | @ -1,51 +0,0 @@ | |||
| module main | ||||
| 
 | ||||
| import web | ||||
| import os | ||||
| import log | ||||
| import repo | ||||
| 
 | ||||
| fn server(key string, repo_dir string) { | ||||
| 	// Configure logger | ||||
| 	log_level_str := os.getenv_opt('LOG_LEVEL') or { 'WARN' } | ||||
| 	log_level := log.level_from_tag(log_level_str) or { | ||||
| 		exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') | ||||
| 	} | ||||
| 	log_file := os.getenv_opt('LOG_FILE') or { 'vieter.log' } | ||||
| 
 | ||||
| 	mut logger := log.Log{ | ||||
| 		level: log_level | ||||
| 	} | ||||
| 
 | ||||
| 	logger.set_full_logpath(log_file) | ||||
| 	logger.log_to_console_too() | ||||
| 
 | ||||
| 	defer { | ||||
| 		logger.info('Flushing log file') | ||||
| 		logger.flush() | ||||
| 		logger.close() | ||||
| 	} | ||||
| 
 | ||||
| 	// Configure web server | ||||
| 	pkg_dir := os.getenv_opt('PKG_DIR') or { | ||||
| 		exit_with_message(1, 'No package directory was configured.') | ||||
| 	} | ||||
| 	dl_dir := os.getenv_opt('DOWNLOAD_DIR') or { | ||||
| 		exit_with_message(1, 'No download directory was configured.') | ||||
| 	} | ||||
| 
 | ||||
| 	// This also creates the directories if needed | ||||
| 	repo := repo.new(repo_dir, pkg_dir) or { | ||||
| 		logger.error(err.msg) | ||||
| 		exit(1) | ||||
| 	} | ||||
| 
 | ||||
| 	os.mkdir_all(dl_dir) or { exit_with_message(1, 'Failed to create download directory.') } | ||||
| 
 | ||||
| 	web.run(&App{ | ||||
| 		logger: logger | ||||
| 		api_key: key | ||||
| 		dl_dir: dl_dir | ||||
| 		repo: repo | ||||
| 	}, port) | ||||
| } | ||||
|  | @ -1,4 +1,4 @@ | |||
| module main | ||||
| module server | ||||
| 
 | ||||
| import net.http | ||||
| 
 | ||||
|  | @ -7,5 +7,5 @@ fn (mut app App) is_authorized() bool { | |||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	return x_header.trim_space() == app.api_key | ||||
| 	return x_header.trim_space() == app.conf.api_key | ||||
| } | ||||
|  | @ -0,0 +1,129 @@ | |||
| module server | ||||
| 
 | ||||
| import web | ||||
| import os | ||||
| import json | ||||
| 
 | ||||
| const repos_file = 'repos.json' | ||||
| 
 | ||||
| pub struct GitRepo { | ||||
| pub: | ||||
| 	url    string [required] | ||||
| 	branch string [required] | ||||
| } | ||||
| 
 | ||||
| fn read_repos(path string) ?[]GitRepo { | ||||
| 	if !os.exists(path) { | ||||
| 		mut f := os.create(path) ? | ||||
| 
 | ||||
| 		defer { | ||||
| 			f.close() | ||||
| 		} | ||||
| 
 | ||||
| 		f.write_string('[]') ? | ||||
| 
 | ||||
| 		return [] | ||||
| 	} | ||||
| 
 | ||||
| 	content := os.read_file(path) ? | ||||
| 	res := json.decode([]GitRepo, content) ? | ||||
| 	return res | ||||
| } | ||||
| 
 | ||||
| fn write_repos(path string, repos []GitRepo) ? { | ||||
| 	mut f := os.create(path) ? | ||||
| 
 | ||||
| 	defer { | ||||
| 		f.close() | ||||
| 	} | ||||
| 
 | ||||
| 	value := json.encode(repos) | ||||
| 	f.write_string(value) ? | ||||
| } | ||||
| 
 | ||||
| ['/api/repos'; get] | ||||
| fn (mut app App) get_repos() web.Result { | ||||
| 	if !app.is_authorized() { | ||||
| 		return app.text('Unauthorized.') | ||||
| 	} | ||||
| 
 | ||||
| 	repos := rlock app.git_mutex { | ||||
| 		read_repos(app.conf.repos_file) or { | ||||
| 			app.lerror('Failed to read repos file.') | ||||
| 
 | ||||
| 			return app.server_error(500) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return app.json(repos) | ||||
| } | ||||
| 
 | ||||
| ['/api/repos'; post] | ||||
| fn (mut app App) post_repo() web.Result { | ||||
| 	if !app.is_authorized() { | ||||
| 		return app.text('Unauthorized.') | ||||
| 	} | ||||
| 
 | ||||
| 	if !('url' in app.query && 'branch' in app.query) { | ||||
| 		return app.server_error(400) | ||||
| 	} | ||||
| 
 | ||||
| 	new_repo := GitRepo{ | ||||
| 		url: app.query['url'] | ||||
| 		branch: app.query['branch'] | ||||
| 	} | ||||
| 
 | ||||
| 	mut repos := rlock app.git_mutex { | ||||
| 		read_repos(app.conf.repos_file) or { | ||||
| 			app.lerror('Failed to read repos file.') | ||||
| 
 | ||||
| 			return app.server_error(500) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// We need to check for duplicates | ||||
| 	for r in repos { | ||||
| 		if r == new_repo { | ||||
| 			return app.text('Duplicate repository.') | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	repos << new_repo | ||||
| 
 | ||||
| 	lock app.git_mutex { | ||||
| 		write_repos(app.conf.repos_file, repos) or { return app.server_error(500) } | ||||
| 	} | ||||
| 
 | ||||
| 	return app.ok('Repo added successfully.') | ||||
| } | ||||
| 
 | ||||
| ['/api/repos'; delete] | ||||
| fn (mut app App) delete_repo() web.Result { | ||||
| 	if !app.is_authorized() { | ||||
| 		return app.text('Unauthorized.') | ||||
| 	} | ||||
| 
 | ||||
| 	if !('url' in app.query && 'branch' in app.query) { | ||||
| 		return app.server_error(400) | ||||
| 	} | ||||
| 
 | ||||
| 	repo_to_remove := GitRepo{ | ||||
| 		url: app.query['url'] | ||||
| 		branch: app.query['branch'] | ||||
| 	} | ||||
| 
 | ||||
| 	mut repos := rlock app.git_mutex { | ||||
| 		read_repos(app.conf.repos_file) or { | ||||
| 			app.lerror('Failed to read repos file.') | ||||
| 
 | ||||
| 			return app.server_error(500) | ||||
| 		} | ||||
| 	} | ||||
| 	filtered := repos.filter(it != repo_to_remove) | ||||
| 
 | ||||
| 	lock app.git_mutex { | ||||
| 		write_repos(app.conf.repos_file, filtered) or { return app.server_error(500) } | ||||
| 	} | ||||
| 
 | ||||
| 	return app.ok('Repo removed successfully.') | ||||
| } | ||||
|  | @ -1,29 +1,11 @@ | |||
| module main | ||||
| module server | ||||
| 
 | ||||
| import web | ||||
| import os | ||||
| import repo | ||||
| import time | ||||
| import rand | ||||
| 
 | ||||
| const prefixes = ['B', 'KB', 'MB', 'GB'] | ||||
| 
 | ||||
| // pretty_bytes converts a byte count to human-readable version | ||||
| fn pretty_bytes(bytes int) string { | ||||
| 	mut i := 0 | ||||
| 	mut n := f32(bytes) | ||||
| 
 | ||||
| 	for n >= 1024 { | ||||
| 		i++ | ||||
| 		n /= 1024 | ||||
| 	} | ||||
| 
 | ||||
| 	return '${n:.2}${prefixes[i]}' | ||||
| } | ||||
| 
 | ||||
| fn is_pkg_name(s string) bool { | ||||
| 	return s.contains('.pkg') | ||||
| } | ||||
| import util | ||||
| 
 | ||||
| // healthcheck just returns a string, but can be used to quickly check if the | ||||
| // server is still responsive. | ||||
|  | @ -58,18 +40,18 @@ fn (mut app App) put_package() web.Result { | |||
| 
 | ||||
| 	if length := app.req.header.get(.content_length) { | ||||
| 		// Generate a random filename for the temp file | ||||
| 		pkg_path = os.join_path_single(app.dl_dir, rand.uuid_v4()) | ||||
| 		pkg_path = os.join_path_single(app.conf.download_dir, rand.uuid_v4()) | ||||
| 
 | ||||
| 		for os.exists(pkg_path) { | ||||
| 			pkg_path = os.join_path_single(app.dl_dir, rand.uuid_v4()) | ||||
| 			pkg_path = os.join_path_single(app.conf.download_dir, rand.uuid_v4()) | ||||
| 		} | ||||
| 
 | ||||
| 		app.ldebug("Uploading $length bytes (${pretty_bytes(length.int())}) to '$pkg_path'.") | ||||
| 		app.ldebug("Uploading $length bytes (${util.pretty_bytes(length.int())}) to '$pkg_path'.") | ||||
| 
 | ||||
| 		// This is used to time how long it takes to upload a file | ||||
| 		mut sw := time.new_stopwatch(time.StopWatchOptions{ auto_start: true }) | ||||
| 
 | ||||
| 		reader_to_file(mut app.reader, length.int(), pkg_path) or { | ||||
| 		util.reader_to_file(mut app.reader, length.int(), pkg_path) or { | ||||
| 			app.lwarn("Failed to upload '$pkg_path'") | ||||
| 
 | ||||
| 			return app.text('Failed to upload file.') | ||||
|  | @ -0,0 +1,59 @@ | |||
| module server | ||||
| 
 | ||||
| import web | ||||
| import os | ||||
| import log | ||||
| import repo | ||||
| import env | ||||
| import util | ||||
| 
 | ||||
| const port = 8000 | ||||
| 
 | ||||
| struct App { | ||||
| 	web.Context | ||||
| pub: | ||||
| 	conf env.ServerConfig [required; web_global] | ||||
| pub mut: | ||||
| 	repo repo.Repo [required; web_global] | ||||
| 	// This is used to claim the file lock on the repos file | ||||
| 	git_mutex shared util.Dummy | ||||
| } | ||||
| 
 | ||||
| // server starts the web server & starts listening for requests | ||||
| pub fn server() ? { | ||||
| 	conf := env.load<env.ServerConfig>() ? | ||||
| 
 | ||||
| 	// Configure logger | ||||
| 	log_level := log.level_from_tag(conf.log_level) or { | ||||
| 		util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.') | ||||
| 	} | ||||
| 
 | ||||
| 	mut logger := log.Log{ | ||||
| 		level: log_level | ||||
| 	} | ||||
| 
 | ||||
| 	logger.set_full_logpath(conf.log_file) | ||||
| 	logger.log_to_console_too() | ||||
| 
 | ||||
| 	defer { | ||||
| 		logger.info('Flushing log file') | ||||
| 		logger.flush() | ||||
| 		logger.close() | ||||
| 	} | ||||
| 
 | ||||
| 	// This also creates the directories if needed | ||||
| 	repo := repo.new(conf.repo_dir, conf.pkg_dir) or { | ||||
| 		logger.error(err.msg) | ||||
| 		exit(1) | ||||
| 	} | ||||
| 
 | ||||
| 	os.mkdir_all(conf.download_dir) or { | ||||
| 		util.exit_with_message(1, 'Failed to create download directory.') | ||||
| 	} | ||||
| 
 | ||||
| 	web.run(&App{ | ||||
| 		logger: logger | ||||
| 		conf: conf | ||||
| 		repo: repo | ||||
| 	}, server.port) | ||||
| } | ||||
							
								
								
									
										59
									
								
								src/util.v
								
								
								
								
							
							
						
						
									
										59
									
								
								src/util.v
								
								
								
								
							|  | @ -1,9 +1,55 @@ | |||
| module util | ||||
| 
 | ||||
| import os | ||||
| import io | ||||
| import crypto.md5 | ||||
| import crypto.sha256 | ||||
| 
 | ||||
| const reader_buf_size = 1_000_000 | ||||
| 
 | ||||
| const prefixes = ['B', 'KB', 'MB', 'GB'] | ||||
| 
 | ||||
| // Dummy struct to work around the fact that you can only share structs, maps & | ||||
| // arrays | ||||
| pub struct Dummy { | ||||
| 	x int | ||||
| } | ||||
| 
 | ||||
| // exit_with_message exits the program with a given status code after having | ||||
| // first printed a specific message to STDERR | ||||
| [noreturn] | ||||
| pub fn exit_with_message(code int, msg string) { | ||||
| 	eprintln(msg) | ||||
| 	exit(code) | ||||
| } | ||||
| 
 | ||||
| // reader_to_file writes the contents of a BufferedReader to a file | ||||
| pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? { | ||||
| 	mut file := os.create(path) ? | ||||
| 	defer { | ||||
| 		file.close() | ||||
| 	} | ||||
| 
 | ||||
| 	mut buf := []byte{len: util.reader_buf_size} | ||||
| 	mut bytes_left := length | ||||
| 
 | ||||
| 	// Repeat as long as the stream still has data | ||||
| 	for bytes_left > 0 { | ||||
| 		// TODO check if just breaking here is safe | ||||
| 		bytes_read := reader.read(mut buf) or { break } | ||||
| 		bytes_left -= bytes_read | ||||
| 
 | ||||
| 		mut to_write := bytes_read | ||||
| 
 | ||||
| 		for to_write > 0 { | ||||
| 			// TODO don't just loop infinitely here | ||||
| 			bytes_written := file.write(buf[bytes_read - to_write..bytes_read]) or { continue } | ||||
| 
 | ||||
| 			to_write = to_write - bytes_written | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // hash_file returns the md5 & sha256 hash of a given file | ||||
| // TODO actually implement sha256 | ||||
| pub fn hash_file(path &string) ?(string, string) { | ||||
|  | @ -32,3 +78,16 @@ pub fn hash_file(path &string) ?(string, string) { | |||
| 
 | ||||
| 	return md5sum.checksum().hex(), sha256sum.checksum().hex() | ||||
| } | ||||
| 
 | ||||
| // pretty_bytes converts a byte count to human-readable version | ||||
| pub fn pretty_bytes(bytes int) string { | ||||
| 	mut i := 0 | ||||
| 	mut n := f32(bytes) | ||||
| 
 | ||||
| 	for n >= 1024 { | ||||
| 		i++ | ||||
| 		n /= 1024 | ||||
| 	} | ||||
| 
 | ||||
| 	return '${n:.2}${util.prefixes[i]}' | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue