Merge pull request 'Release 0.2.0' (#121) from release-0.2.0 into main

Reviewed-on: #121
main 0.2.0
Jef Roosens 2022-04-11 09:41:43 +02:00
commit 4b6a661d71
42 changed files with 1607 additions and 668 deletions

View File

@ -1,4 +1,3 @@
# top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file
@ -7,4 +6,5 @@ end_of_line = lf
insert_final_newline = true
[*.v]
indent_style = space
# vfmt wants it :(
indent_style = tab

7
.gitignore vendored
View File

@ -5,6 +5,8 @@ data/
vieter
dvieter
pvieter
dvieterctl
vieterctl
vieter.c
# Ignore testing files
@ -17,4 +19,7 @@ libarchive-*
test/
# V compiler directory
v-*/
v/
# gdb log file
gdb.txt

View File

@ -0,0 +1,30 @@
matrix:
PLATFORM:
- linux/amd64
- linux/arm64
platform: ${PLATFORM}
branches: [dev]
pipeline:
build:
image: 'menci/archlinuxarm:base-devel'
commands:
# Update packages
- pacman -Syu --noconfirm
# Create non-root user to perform build & switch to their home
- groupadd -g 1000 builder
- useradd -mg builder builder
- chown -R builder:builder "$PWD"
- "echo 'builder ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers"
- su builder
# Build the package
- makepkg -s --noconfirm --needed
publish:
image: 'curlimages/curl'
commands:
# Publish the package
- 'for pkg in $(ls -1 *.pkg*); do curl -XPOST -T "$pkg" -H "X-API-KEY: $VIETER_API_KEY" https://arch.r8r.be/vieter/publish; done'
secrets:
- vieter_api_key

View File

@ -2,7 +2,8 @@ matrix:
PLATFORM:
- linux/amd64
- linux/arm64
- linux/arm/v7
# I just don't have a performant enough runner for this platform
# - linux/arm/v7
# These checks already get performed on the feature branches
platform: ${PLATFORM}
@ -42,12 +43,12 @@ pipeline:
commands:
# https://gist.github.com/JustinTimperio/7c7115f87b775618637d67ac911e595f
- export URL=s3.rustybever.be
- export OBJ_PATH="/vieter/commits/$CI_COMMIT_SHA/vieter-$(echo '${PLATFORM}' | sed 's:/:-:g')"
- export DATE="$(date -R --utc)"
- export CONTENT_TYPE='application/zstd'
- export OBJ_PATH="/vieter/commits/$CI_COMMIT_SHA/vieter-$(echo '${PLATFORM}' | sed 's:/:-:g')"
- export SIG_STRING="PUT\n\n$CONTENT_TYPE\n$DATE\n$OBJ_PATH"
- export SIGNATURE=`echo -en $SIG_STRING | openssl sha1 -hmac $S3_PASSWORD -binary | base64`
- >
curl
--silent

View File

@ -1,18 +0,0 @@
branches: dev
platform: linux/amd64
pipeline:
publish:
image: woodpeckerci/plugin-docker-buildx
secrets: [ docker_username, docker_password ]
settings:
repo: chewingbever/vlang
tag: latest
dockerfile: Dockerfile.builder
platforms: [ linux/arm/v7, linux/arm64/v8, linux/amd64 ]
when:
event: push
path:
- Makefile
- Dockerfile.builder
- patches/*

View File

@ -1,15 +0,0 @@
# Deploys the newest development image to my server
branches: [dev]
platform: linux/amd64
depends_on:
- docker
skip_clone: true
pipeline:
webhook:
image: chewingbever/vlang:latest
secrets:
- webhook
commands:
- curl -XPOST -s "$WEBHOOK"

View File

@ -1,7 +1,6 @@
branches: [main, dev]
platform: linux/amd64
depends_on:
- builder
- build
pipeline:
@ -10,9 +9,8 @@ pipeline:
secrets: [ docker_username, docker_password ]
settings:
repo: chewingbever/vieter
dockerfile: Dockerfile.ci
tag: dev
platforms: [ linux/arm/v7, linux/arm64/v8, linux/amd64 ]
platforms: [ linux/arm64/v8, linux/amd64 ]
build_args_from_env:
- CI_COMMIT_SHA
when:
@ -24,9 +22,8 @@ pipeline:
secrets: [ docker_username, docker_password ]
settings:
repo: chewingbever/vieter
dockerfile: Dockerfile.ci
auto_tag: true
platforms: [ linux/arm/v7, linux/arm64/v8, linux/amd64 ]
platforms: [ linux/arm64/v8, linux/amd64 ]
build_args_from_env:
- CI_COMMIT_SHA
when:

View File

@ -7,6 +7,40 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased](https://git.rustybever.be/Chewing_Bever/vieter)
## [0.2.0](https://git.rustybever.be/Chewing_Bever/vieter/src/tag/0.2.0)
### Changed
* Better config system
* Support for both a config file & environment variables
* Each env var can now be provided from a file by appending it with `_FILE`
& passing the path to the file as value
* Revamped web framework
* All routes now return proper JSON where applicable & the correct status
codes
### Added
* Very basic build system
* Build is triggered by separate cron container
* Packages build on cron container's system
* A HEAD request is used to determine whether a package should be rebuilt
or not
* Hardcoded planning of builds
* Builds are sequential
* API for managing Git repositories to build
* CLI to list, add & remove Git repos to build
* Published packages on my Vieter instance
* Support for multiple repositories
* Support for multiple architectures per repository
### Fixed
* Each package can now only have one version in the repository at once
(required by Pacman)
* Packages with unknown fields in .PKGINFO are now allowed
* Old packages are now properly removed
## [0.1.0](https://git.rustybever.be/Chewing_Bever/vieter/src/tag/0.1.0)
### Changed

View File

@ -1,19 +1,56 @@
FROM chewingbever/vlang:latest AS builder
ARG TARGETPLATFORM
ARG CI_COMMIT_SHA
ARG DI_VER=1.2.5
WORKDIR /app
# Build dumb-init
RUN curl -Lo - "https://github.com/Yelp/dumb-init/archive/refs/tags/v${DI_VER}.tar.gz" | tar -xzf - && \
cd "dumb-init-${DI_VER}" && \
make SHELL=/bin/sh && \
mv dumb-init .. && \
cd ..
# Copy over source code & build production binary
COPY src ./src
COPY Makefile ./
ENV LDFLAGS='-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static'
RUN v -o pvieter -cflags "-O3" src
RUN if [ -n "${CI_COMMIT_SHA}" ]; then \
curl --fail \
-o vieter \
"https://s3.rustybever.be/vieter/commits/${CI_COMMIT_SHA}/vieter-$(echo "${TARGETPLATFORM}" | sed 's:/:-:g')" && \
chmod +x vieter ; \
else \
LDFLAGS='-lz -lbz2 -llzma -lexpat -lzstd -llz4 -static' make prod && \
mv pvieter vieter ; \
fi
FROM alpine:3.15
FROM busybox:1.35.0
ENV REPO_DIR=/data
ENV PATH=/bin \
VIETER_REPOS_DIR=/data/repos \
VIETER_PKG_DIR=/data/pkgs \
VIETER_DOWNLOAD_DIR=/data/downloads \
VIETER_REPOS_FILE=/data/repos.json
COPY --from=builder /app/pvieter /usr/local/bin/vieter
COPY --from=builder /app/dumb-init /app/vieter /bin/
ENTRYPOINT [ "/usr/local/bin/vieter" ]
HEALTHCHECK --interval=30s \
--timeout=3s \
--start-period=5s \
CMD /bin/wget --spider http://localhost:8000/health || exit 1
RUN mkdir /data && \
chown -R www-data:www-data /data && \
mkdir -p '/var/spool/cron/crontabs' && \
echo '0 3 * * * /bin/vieter build' | crontab -
WORKDIR /data
USER www-data:www-data
ENTRYPOINT ["/bin/dumb-init", "--"]
CMD ["/bin/vieter", "server"]

View File

@ -1,35 +0,0 @@
FROM alpine:3.12
ARG TARGETPLATFORM
WORKDIR /opt/vlang
ENV VVV /opt/vlang
ENV PATH /opt/vlang:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ENV VFLAGS -cc gcc
ENV V_PATH /opt/vlang/v
RUN ln -s /opt/vlang/v /usr/bin/v && \
apk --no-cache add \
git make gcc curl openssl \
musl-dev \
openssl-libs-static openssl-dev \
zlib-static bzip2-static xz-dev expat-static zstd-static lz4-static \
sqlite-static sqlite-dev \
libx11-dev glfw-dev freetype-dev \
libarchive-static libarchive-dev \
diffutils
COPY patches ./patches
COPY Makefile ./
RUN make v && \
mv v-*/* /opt/vlang && \
v -version
RUN if [ "$TARGETPLATFORM" = 'linux/amd64' ]; then \
wget -O /usr/local/bin/mc https://dl.min.io/client/mc/release/linux-amd64/mc && \
chmod +x /usr/local/bin/mc ; \
fi
CMD ["v"]

View File

@ -1,46 +0,0 @@
# vim: ft=dockerfile
# This image just has the required tools to download the binaries
FROM chewingbever/vlang:latest AS builder
ARG TARGETPLATFORM
ARG CI_COMMIT_SHA
ARG DI_VER=1.2.5
WORKDIR /app
# Build dumb-init
RUN curl -Lo - "https://github.com/Yelp/dumb-init/archive/refs/tags/v${DI_VER}.tar.gz" | tar -xzf - && \
cd "dumb-init-${DI_VER}" && \
make SHELL=/bin/sh && \
mv dumb-init .. && \
cd ..
RUN curl --fail \
-o vieter \
"https://s3.rustybever.be/vieter/commits/${CI_COMMIT_SHA}/vieter-$(echo "${TARGETPLATFORM}" | sed 's:/:-:g')" && \
chmod +x vieter
FROM busybox:1.35.0
ENV PATH=/bin \
REPO_DIR=/data/repo \
PKG_DIR=/data/pkgs \
DOWNLOAD_DIR=/data/downloads
COPY --from=builder /app/dumb-init /app/vieter /bin/
HEALTHCHECK --interval=30s \
--timeout=3s \
--start-period=5s \
CMD /bin/wget --spider http://localhost:8000/health || exit 1
RUN mkdir /data && \
chown -R www-data:www-data /data
WORKDIR /data
USER www-data:www-data
ENTRYPOINT ["/bin/dumb-init", "--"]
CMD ["/bin/vieter"]

View File

@ -2,9 +2,8 @@
SRC_DIR := src
SOURCES != find '$(SRC_DIR)' -iname '*.v'
V_RELEASE := weekly.2022.05
V_PATH ?= v-$(V_RELEASE)/v
V := $(V_PATH) -showcc
V_PATH ?= v/v
V := $(V_PATH) -showcc -gc boehm
all: vieter
@ -14,10 +13,17 @@ vieter: $(SOURCES)
$(V) -g -o vieter $(SRC_DIR)
# Debug build using gcc
# The debug build can't use the boehm garbage collector, as that is
# multi-threaded and causes issues when running vieter inside gdb.
.PHONY: debug
debug: dvieter
dvieter: $(SOURCES)
$(V) -keepc -cg -cc gcc -o dvieter $(SRC_DIR)
$(V_PATH) -showcc -keepc -cg -o dvieter $(SRC_DIR)
# Run the debug build inside gdb
.PHONY: gdb
gdb: dvieter
gdb --args './dvieter -f vieter.toml server'
# Optimised production build
.PHONY: prod
@ -30,22 +36,15 @@ pvieter: $(SOURCES)
c:
$(V) -o vieter.c $(SRC_DIR)
# =====EXECUTION=====
# Run the server in the default 'data' directory
.PHONY: run
run: vieter
API_KEY=test DOWNLOAD_DIR=data/downloads REPO_DIR=data/repo PKG_DIR=data/pkgs LOG_LEVEL=DEBUG ./vieter
./vieter -f vieter.toml server
.PHONY: run-prod
run-prod: prod
API_KEY=test DOWNLOAD_DIR=data/downloads REPO_DIR=data/repo PKG_DIR=data/pkgs LOG_LEVEL=DEBUG ./pvieter
# Same as run, but restart when the source code changes
.PHONY: watch
watch:
API_KEY=test DOWNLOAD_DIR=data/downloads REPO_DIR=data/repo PKG_DIR=data/pkgs LOG_LEVEL=DEBUG $(V) watch run vieter
./pvieter -f vieter.toml server
# =====OTHER=====
.PHONY: lint
@ -63,11 +62,10 @@ vet:
# Build & patch the V compiler
.PHONY: v
v: v-$(V_RELEASE)/v
v-$(V_RELEASE)/v:
curl -Lo - 'https://github.com/vlang/v/archive/refs/tags/$(V_RELEASE).tar.gz' | tar xzf -
cd patches && sh patch.sh '../v-$(V_RELEASE)'
make -C 'v-$(V_RELEASE)'
v: v/v
v/v:
git clone --single-branch --branch patches https://git.rustybever.be/Chewing_Bever/vieter-v v
make -C v
clean:
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'v-$(V_RELEASE)'
rm -rf 'data' 'vieter' 'dvieter' 'pvieter' 'vieter.c' 'dvieterctl' 'vieterctl' 'pkg' 'src/vieter'

34
PKGBUILD 100644
View File

@ -0,0 +1,34 @@
# Maintainer: Jef Roosens
pkgbase='vieter'
pkgname='vieter'
pkgver=0.1.0.rc1.r117.gc3ac00f
pkgrel=1
depends=('glibc' 'openssl' 'libarchive' 'gc')
makedepends=('git' 'gcc')
arch=('x86_64' 'aarch64' 'armv7')
url='https://git.rustybever.be/Chewing_Bever/vieter'
license=('AGPL3')
source=($pkgname::git+https://git.rustybever.be/Chewing_Bever/vieter#branch=dev)
md5sums=('SKIP')
pkgver() {
cd "$pkgname"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
cd "$pkgname"
# Build the compiler
CFLAGS= make v
make prod
}
package() {
pkgdesc="Vieter is a lightweight implementation of an Arch repository server."
install -dm755 "$pkgdir/usr/bin"
install -Dm755 "$pkgbase/pvieter" "$pkgdir/usr/bin/vieter"
}

View File

@ -1,10 +1,17 @@
# Vieter
Vieter is a re-implementation of the Pieter project. The goal is to create a
simple PKGBUILD-based build system, combined with a self-hosted Arch
repository. This would allow me to periodically re-build AUR packages (or
PKGBUILDs I created myself), & make sure I never have to compile anything on my
own systems, making my updates a lot quicker.
## Documentation
I host documentation for Vieter over at https://rustybever.be/docs/vieter/.
## Overview
Vieter is a restart of the Pieter project. The goal is to create a simple,
lightweight self-hostable Arch repository server, paired with a system that
periodically builds & publishes select Arch packages. This would allow me to
build AUR packages (or PKGBUILDs I created myself) "in the cloud" & make sure I
never have to compile anything on my own systems, making my updates a lot
quicker.
## Why V?
@ -16,31 +23,30 @@ that.
### Custom Compiler
Currently, this program only works with a very slightly modified version of the
V standard library, and therefore the compiler. The changes that are made to
the standard V release can be found in the [patches](/patches) directory. You
can obtain this modified version of the compiler by running `make v`, which
will download, patch & build the compiler. Afterwards, all make commands that
require the V compiler will use this new binary.
V standard library, and therefore the compiler. The source code for this fork
can be found [here](https://git.rustybever.be/Chewing_Bever/vieter-v). You can
obtain this modified version of the compiler by running `make v`, which will
clone & build the compiler. Afterwards, all make commands that require the V
compiler will use this new binary. I try to keep this fork as up to date with
upstream as possible.
## Features
The project will consist of a server-agent model, where one or more builder
nodes can register with the server. These agents communicate with the Docker
daemon to start builds, which are then uploaded to the server's repository. The
server also allows for non-agents to upload packages, as long as they have the
required secrets. This allows me to also develop non-git packages, such as my
terminal, & upload them to the servers using CI.
* Arch repository server
* Support for multiple repositories & multiple architectures
* Endpoints for publishing new packages
* API for managing repositories to build
* Build system
* Periodic rebuilding of packages
* Prevent unnecessary rebuilds
## Directory Structure
## Building
The data directory consists of three main directories:
In order to build Vieter, you'll need a couple of libraries:
* `downloads` - This is where packages are initially downloaded. Because vieter
moves files from this folder to the `pkgs` folder, these two folders should
best be on the same drive
* `pkgs` - This is where approved package files are stored.
* `repos` - Each repository gets a subfolder here. The subfolder contains the
uncompressed contents of the db file.
* Each repo subdirectory contains the compressed db & files archive for the
repository, alongside a directory called `files` which contains the
uncompressed contents.
* gc
* libarchive
* openssl
Before building Vieter, you'll have to build the compiler using `make v`.
Afterwards, run `make` to build the debug binary.

View File

@ -1,23 +0,0 @@
// Parse the header of a raw HTTP request into a Request object
pub fn parse_request_head(mut reader io.BufferedReader) ?Request {
// request line
mut line := reader.read_line() ?
method, target, version := parse_request_line(line) ?
// headers
mut header := new_header()
line = reader.read_line() ?
for line != '' {
key, value := parse_header(line) ?
header.add_custom(key, value) ?
line = reader.read_line() ?
}
header.coerce(canonicalize: true)
return Request{
method: method
url: target.str()
header: header
version: version
}
}

View File

@ -1,13 +0,0 @@
#!/usr/bin/env sh
# This file patches the downloaded V version
# Should be run from within the directory it's in, as it uses relative paths to the files used for patching.
# $1 is the path to the downloaded V version
# Add parse_request_no_body
cat parse_request_no_body.v >> "$1"/vlib/net/http/request.v
# Make sha256 functions public
sed -i \
-e 's/\(fn (mut d Digest) checksum(\)/pub \1/' \
-e 's/\(fn (mut d Digest) write(\)/pub \1/' \
"$1"/vlib/crypto/sha256/sha256.v

View File

@ -15,6 +15,9 @@ fn C.archive_read_support_filter_zstd(&C.archive)
// Configure the archive to work with gzip compression
fn C.archive_read_support_filter_gzip(&C.archive)
// Configure the archive to work with xz compression
fn C.archive_read_support_filter_xz(&C.archive)
// Configure the archive to work with a tarball content
fn C.archive_read_support_format_tar(&C.archive)

133
src/build/build.v 100644
View File

@ -0,0 +1,133 @@
module build
import docker
import encoding.base64
import time
import git
import os
const container_build_dir = '/build'
const build_image_repo = 'vieter-build'
fn create_build_image(base_image string) ?string {
commands := [
// Update repos & install required packages
'pacman -Syu --needed --noconfirm base-devel git'
// Add a non-root user to run makepkg
'groupadd -g 1000 builder',
'useradd -mg builder builder'
// Make sure they can use sudo without a password
"echo 'builder ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers"
// Create the directory for the builds & make it writeable for the
// build user
'mkdir /build',
'chown -R builder:builder /build',
]
cmds_str := base64.encode_str(commands.join('\n'))
c := docker.NewContainer{
image: base_image
env: ['BUILD_SCRIPT=$cmds_str']
entrypoint: ['/bin/sh', '-c']
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/sh -e']
}
// This check is needed so the user can pass "archlinux" without passing a
// tag & make it still work
image_parts := base_image.split_nth(':', 2)
image_name := image_parts[0]
image_tag := if image_parts.len > 1 { image_parts[1] } else { 'latest' }
// We pull the provided image
docker.pull_image(image_name, image_tag) ?
id := docker.create_container(c) ?
docker.start_container(id) ?
// This loop waits until the container has stopped, so we can remove it after
for {
data := docker.inspect_container(id) ?
if !data.state.running {
break
}
// Wait for 5 seconds
time.sleep(5000000000)
}
// Finally, we create the image from the container
// As the tag, we use the epoch value
tag := time.sys_mono_now().str()
image := docker.create_image_from_container(id, 'vieter-build', tag) ?
docker.remove_container(id) ?
return image.id
}
fn build(conf Config) ? {
build_arch := os.uname().machine
// We get the repos map from the Vieter instance
repos_map := git.get_repos(conf.address, conf.api_key) ?
// We filter out any repos that aren't allowed to be built on this
// architecture
filtered_repos := repos_map.keys().map(repos_map[it]).filter(it.arch.contains(build_arch))
// No point in doing work if there's no repos present
if filtered_repos.len == 0 {
return
}
// First, we create a base image which has updated repos n stuff
image_id := create_build_image(conf.base_image) ?
for repo in filtered_repos {
// TODO what to do with PKGBUILDs that build multiple packages?
commands := [
'git clone --single-branch --depth 1 --branch $repo.branch $repo.url repo',
'cd repo',
'makepkg --nobuild --nodeps',
'source PKGBUILD',
// The build container checks whether the package is already
// present on the server
'curl --head --fail $conf.address/$repo.repo/$build_arch/\$pkgname-\$pkgver-\$pkgrel && exit 0',
'MAKEFLAGS="-j\$(nproc)" makepkg -s --noconfirm --needed && for pkg in \$(ls -1 *.pkg*); do curl -XPOST -T "\$pkg" -H "X-API-KEY: \$API_KEY" $conf.address/$repo.repo/publish; done',
]
// We convert the list of commands into a base64 string, which then gets
// passed to the container as an env var
cmds_str := base64.encode_str(commands.join('\n'))
c := docker.NewContainer{
image: '$image_id'
env: ['BUILD_SCRIPT=$cmds_str', 'API_KEY=$conf.api_key']
entrypoint: ['/bin/sh', '-c']
cmd: ['echo \$BUILD_SCRIPT | base64 -d | /bin/bash -e']
work_dir: '/build'
user: 'builder:builder'
}
id := docker.create_container(c) ?
docker.start_container(id) ?
// This loop waits until the container has stopped, so we can remove it after
for {
data := docker.inspect_container(id) ?
if !data.state.running {
break
}
// Wait for 5 seconds
time.sleep(5000000000)
}
docker.remove_container(id) ?
}
// Finally, we remove the builder image
docker.remove_image(image_id) ?
}

25
src/build/cli.v 100644
View File

@ -0,0 +1,25 @@
module build
import cli
import env
pub struct Config {
pub:
api_key string
address string
base_image string = 'archlinux:base-devel'
}
// cmd returns the cli submodule that handles the build process
pub fn cmd() cli.Command {
return cli.Command{
name: 'build'
description: 'Run the build process.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ?
conf := env.load<Config>(config_file) ?
build(conf) ?
}
}
}

View File

@ -0,0 +1,78 @@
module docker
import json
import net.urllib
struct Container {
id string [json: Id]
names []string [json: Names]
}
// containers returns a list of all currently running containers
pub fn containers() ?[]Container {
res := request('GET', urllib.parse('/v1.41/containers/json') ?) ?
return json.decode([]Container, res.text) or {}
}
pub struct NewContainer {
image string [json: Image]
entrypoint []string [json: Entrypoint]
cmd []string [json: Cmd]
env []string [json: Env]
work_dir string [json: WorkingDir]
user string [json: User]
}
struct CreatedContainer {
id string [json: Id]
}
// create_container creates a container defined by the given configuration. If
// successful, it returns the ID of the newly created container.
pub fn create_container(c &NewContainer) ?string {
res := request_with_json('POST', urllib.parse('/v1.41/containers/create') ?, c) ?
if res.status_code != 201 {
return error('Failed to create container.')
}
return json.decode(CreatedContainer, res.text) ?.id
}
// start_container starts a container with a given ID. It returns whether the
// container was started or not.
pub fn start_container(id string) ?bool {
res := request('POST', urllib.parse('/v1.41/containers/$id/start') ?) ?
return res.status_code == 204
}
struct ContainerInspect {
pub:
state ContainerState [json: State]
}
struct ContainerState {
pub:
running bool [json: Running]
}
// inspect_container returns the result of inspecting a container with a given
// ID.
pub fn inspect_container(id string) ?ContainerInspect {
res := request('GET', urllib.parse('/v1.41/containers/$id/json') ?) ?
if res.status_code != 200 {
return error('Failed to inspect container.')
}
return json.decode(ContainerInspect, res.text) or {}
}
// remove_container removes a container with a given ID.
pub fn remove_container(id string) ?bool {
res := request('DELETE', urllib.parse('/v1.41/containers/$id') ?) ?
return res.status_code == 204
}

View File

@ -0,0 +1,93 @@
module docker
import net.unix
import net.urllib
import net.http
import json
const socket = '/var/run/docker.sock'
const buf_len = 1024
fn send(req &string) ?http.Response {
// Open a connection to the socket
mut s := unix.connect_stream(docker.socket) or {
return error('Failed to connect to socket ${docker.socket}.')
}
defer {
// This or is required because otherwise, the V compiler segfaults for
// some reason
// https://github.com/vlang/v/issues/13534
s.close() or {}
}
// Write the request to the socket
s.write_string(req) or { return error('Failed to write request to socket ${docker.socket}.') }
s.wait_for_write() ?
mut c := 0
mut buf := []byte{len: docker.buf_len}
mut res := []byte{}
for {
c = s.read(mut buf) or { return error('Failed to read data from socket ${docker.socket}.') }
res << buf[..c]
if c < docker.buf_len {
break
}
}
// After reading the first part of the response, we parse it into an HTTP
// response. If it isn't chunked, we return early with the data.
parsed := http.parse_response(res.bytestr()) or {
return error('Failed to parse HTTP response from socket ${docker.socket}.')
}
if parsed.header.get(http.CommonHeader.transfer_encoding) or { '' } != 'chunked' {
return parsed
}
// We loop until we've encountered the end of the chunked response
// A chunked HTTP response always ends with '0\r\n\r\n'.
for res.len < 5 || res#[-5..] != [byte(`0`), `\r`, `\n`, `\r`, `\n`] {
// Wait for the server to respond
s.wait_for_write() ?
for {
c = s.read(mut buf) or {
return error('Failed to read data from socket ${docker.socket}.')
}
res << buf[..c]
if c < docker.buf_len {
break
}
}
}
// Decode chunked response
return http.parse_response(res.bytestr())
}
fn request_with_body(method string, url urllib.URL, content_type string, body string) ?http.Response {
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n'
return send(req)
}
fn request(method string, url urllib.URL) ?http.Response {
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n'
return send(req)
}
// request_with_json<T> sends a request to the Docker socket with a given JSON
// payload
pub fn request_with_json<T>(method string, url urllib.URL, data &T) ?http.Response {
body := json.encode(data)
return request_with_body(method, url, 'application/json', body)
}

View File

@ -0,0 +1,34 @@
module docker
import net.http
import net.urllib
import json
struct Image {
pub:
id string [json: Id]
}
// pull_image pulls tries to pull the image for the given image & tag
pub fn pull_image(image string, tag string) ?http.Response {
return request('POST', urllib.parse('/v1.41/images/create?fromImage=$image&tag=$tag') ?)
}
// create_image_from_container creates a new image from a container with the
// given repo & tag, given the container's ID.
pub fn create_image_from_container(id string, repo string, tag string) ?Image {
res := request('POST', urllib.parse('/v1.41/commit?container=$id&repo=$repo&tag=$tag') ?) ?
if res.status_code != 201 {
return error('Failed to create image from container.')
}
return json.decode(Image, res.text) or {}
}
// remove_image removes the image with the given ID.
pub fn remove_image(id string) ?bool {
res := request('DELETE', urllib.parse('/v1.41/images/$id') ?) ?
return res.status_code == 200
}

82
src/env.v 100644
View File

@ -0,0 +1,82 @@
module env
import os
import toml
// The prefix that every environment variable should have
const prefix = 'VIETER_'
// The suffix an environment variable in order for it to be loaded from a file
// instead
const file_suffix = '_FILE'
fn get_env_var(field_name string) ?string {
env_var_name := '$env.prefix$field_name.to_upper()'
env_file_name := '$env.prefix$field_name.to_upper()$env.file_suffix'
env_var := os.getenv(env_var_name)
env_file := os.getenv(env_file_name)
// If both are missing, we return an empty string
if env_var == '' && env_file == '' {
return ''
}
// If they're both set, we report a conflict
if env_var != '' && env_file != '' {
return error('Only one of $env_var_name or $env_file_name can be defined.')
}
// If it's the env var itself, we return it.
// I'm pretty sure this also prevents variable ending in _FILE (e.g.
// VIETER_LOG_FILE) from being mistakingely read as an _FILE suffixed env
// var.
if env_var != '' {
return env_var
}
// Otherwise, we process the file
return os.read_file(env_file) or {
error('Failed to read file defined in $env_file_name: ${err.msg}.')
}
}
// load<T> attempts to create an object of type T from the given path to a toml
// file & environment variables. For each field, it will select either a value
// given from an environment variable, a value defined in the config file or a
// configured default if present, in that order.
pub fn load<T>(path string) ?T {
mut res := T{}
if os.exists(path) {
// We don't use reflect here because reflect also sets any fields not
// in the toml back to their zero value, which we don't want
doc := toml.parse_file(path) ?
$for field in T.fields {
s := doc.value(field.name)
// We currently only support strings
if s.type_name() == 'string' {
res.$(field.name) = s.string()
}
}
}
$for field in T.fields {
$if field.typ is string {
env_value := get_env_var(field.name) ?
// The value of the env var will always be chosen over the config
// file
if env_value != '' {
res.$(field.name) = env_value
}
// If there's no value from the toml file either, we try to find a
// default value
else if res.$(field.name) == '' {
return error("Missing config variable '$field.name' with no provided default. Either add it to the config file or provide it using an environment variable.")
}
}
}
return res
}

147
src/git/cli.v 100644
View File

@ -0,0 +1,147 @@
module git
import cli
import env
struct Config {
address string [required]
api_key string [required]
}
// cmd returns the cli submodule that handles the repos API interaction
pub fn cmd() cli.Command {
return cli.Command{
name: 'repos'
description: 'Interact with the repos API.'
commands: [
cli.Command{
name: 'list'
description: 'List the current repos.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ?
conf := env.load<Config>(config_file) ?
list(conf) ?
}
},
cli.Command{
name: 'add'
required_args: 4
usage: 'url branch repo arch...'
description: 'Add a new repository.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ?
conf := env.load<Config>(config_file) ?
add(conf, cmd.args[0], cmd.args[1], cmd.args[2], cmd.args[3..]) ?
}
},
cli.Command{
name: 'remove'
required_args: 1
usage: 'id'
description: 'Remove a repository that matches the given ID prefix.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ?
conf := env.load<Config>(config_file) ?
remove(conf, cmd.args[0]) ?
}
},
cli.Command{
name: 'edit'
required_args: 1
usage: 'id'
description: 'Edit the repository that matches the given ID prefix.'
flags: [
cli.Flag{
name: 'url'
description: 'URL of the Git repository.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'branch'
description: 'Branch of the Git repository.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'repo'
description: 'Repo to publish builds to.'
flag: cli.FlagType.string
},
cli.Flag{
name: 'arch'
description: 'Comma-separated list of architectures to build on.'
flag: cli.FlagType.string
},
]
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ?
conf := env.load<Config>(config_file) ?
found := cmd.flags.get_all_found()
mut params := map[string]string{}
for f in found {
if f.name != 'config-file' {
params[f.name] = f.get_string() ?
}
}
patch(conf, cmd.args[0], params) ?
}
},
]
}
}
fn get_repo_id_by_prefix(conf Config, id_prefix string) ?string {
repos := get_repos(conf.address, conf.api_key) ?
mut res := []string{}
for id, _ in repos {
if id.starts_with(id_prefix) {
res << id
}
}
if res.len == 0 {
return error('No repo found for given prefix.')
}
if res.len > 1 {
return error('Multiple repos found for given prefix.')
}
return res[0]
}
fn list(conf Config) ? {
repos := get_repos(conf.address, conf.api_key) ?
for id, details in repos {
println('${id[..8]}\t$details.url\t$details.branch\t$details.repo\t$details.arch')
}
}
fn add(conf Config, url string, branch string, repo string, arch []string) ? {
res := add_repo(conf.address, conf.api_key, url, branch, repo, arch) ?
println(res.message)
}
fn remove(conf Config, id_prefix string) ? {
id := get_repo_id_by_prefix(conf, id_prefix) ?
res := remove_repo(conf.address, conf.api_key, id) ?
println(res.message)
}
fn patch(conf Config, id_prefix string, params map[string]string) ? {
id := get_repo_id_by_prefix(conf, id_prefix) ?
res := patch_repo(conf.address, conf.api_key, id, params) ?
println(res.message)
}

61
src/git/client.v 100644
View File

@ -0,0 +1,61 @@
module git
import json
import response { Response }
import net.http
fn send_request<T>(method http.Method, address string, url string, api_key string, params map[string]string) ?Response<T> {
mut full_url := '$address$url'
if params.len > 0 {
params_str := params.keys().map('$it=${params[it]}').join('&')
full_url = '$full_url?$params_str'
}
mut req := http.new_request(method, full_url, '') ?
req.add_custom_header('X-API-Key', api_key) ?
res := req.do() ?
data := json.decode(Response<T>, res.text) ?
return data
}
// get_repos returns the current list of repos.
pub fn get_repos(address string, api_key string) ?map[string]GitRepo {
data := send_request<map[string]GitRepo>(http.Method.get, address, '/api/repos', api_key,
{}) ?
return data.data
}
// add_repo adds a new repo to the server.
pub fn add_repo(address string, api_key string, url string, branch string, repo string, arch []string) ?Response<string> {
params := {
'url': url
'branch': branch
'repo': repo
'arch': arch.join(',')
}
data := send_request<string>(http.Method.post, address, '/api/repos', api_key, params) ?
return data
}
// remove_repo removes the repo with the given ID from the server.
pub fn remove_repo(address string, api_key string, id string) ?Response<string> {
data := send_request<string>(http.Method.delete, address, '/api/repos/$id', api_key,
{}) ?
return data
}
// patch_repo sends a PATCH request to the given repo with the params as
// payload.
pub fn patch_repo(address string, api_key string, id string, params map[string]string) ?Response<string> {
data := send_request<string>(http.Method.patch, address, '/api/repos/$id', api_key,
params) ?
return data
}

82
src/git/git.v 100644
View File

@ -0,0 +1,82 @@
module git
import os
import json
pub struct GitRepo {
pub mut:
// URL of the Git repository
url string
// Branch of the Git repository to use
branch string
// On which architectures the package is allowed to be built. In reality,
// this controls which builders will periodically build the image.
arch []string
// Which repo the builder should publish packages to
repo string
}
// patch_from_params patches a GitRepo from a map[string]string, usually
// provided from a web.App's params
pub fn (mut r GitRepo) patch_from_params(params map[string]string) {
$for field in GitRepo.fields {
if field.name in params {
$if field.typ is string {
r.$(field.name) = params[field.name]
// This specific type check is needed for the compiler to ensure
// our types are correct
} $else $if field.typ is []string {
r.$(field.name) = params[field.name].split(',')
}
}
}
}
// read_repos reads the provided path & parses it into a map of GitRepo's.
pub fn read_repos(path string) ?map[string]GitRepo {
if !os.exists(path) {
mut f := os.create(path) ?
defer {
f.close()
}
f.write_string('{}') ?
return {}
}
content := os.read_file(path) ?
res := json.decode(map[string]GitRepo, content) ?
return res
}
// write_repos writes a map of GitRepo's back to disk given the provided path.
pub fn write_repos(path string, repos &map[string]GitRepo) ? {
mut f := os.create(path) ?
defer {
f.close()
}
value := json.encode(repos)
f.write_string(value) ?
}
// repo_from_params creates a GitRepo from a map[string]string, usually
// provided from a web.App's params
pub fn repo_from_params(params map[string]string) ?GitRepo {
mut repo := GitRepo{}
// If we're creating a new GitRepo, we want all fields to be present before
// "patching".
$for field in GitRepo.fields {
if field.name !in params {
return error('Missing parameter: ${field.name}.')
}
}
repo.patch_from_params(params)
return repo
}

View File

@ -1,103 +1,33 @@
module main
import web
import os
import log
import io
import repo
const port = 8000
const buf_size = 1_000_000
const db_name = 'pieter.db'
struct App {
web.Context
pub:
api_key string [required; web_global]
dl_dir string [required; web_global]
pub mut:
repo repo.Repo [required; web_global]
}
[noreturn]
fn exit_with_message(code int, msg string) {
eprintln(msg)
exit(code)
}
fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? {
mut file := os.create(path) ?
defer {
file.close()
}
mut buf := []byte{len: buf_size}
mut bytes_left := length
// Repeat as long as the stream still has data
for bytes_left > 0 {
// TODO check if just breaking here is safe
bytes_read := reader.read(mut buf) or { break }
bytes_left -= bytes_read
mut to_write := bytes_read
for to_write > 0 {
// TODO don't just loop infinitely here
bytes_written := file.write(buf[bytes_read - to_write..bytes_read]) or { continue }
to_write = to_write - bytes_written
}
}
}
import server
import cli
import build
import git
fn main() {
// Configure logger
log_level_str := os.getenv_opt('LOG_LEVEL') or { 'WARN' }
log_level := log.level_from_tag(log_level_str) or {
exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
}
log_file := os.getenv_opt('LOG_FILE') or { 'vieter.log' }
mut logger := log.Log{
level: log_level
mut app := cli.Command{
name: 'vieter'
description: 'Vieter is a lightweight implementation of an Arch repository server.'
version: '0.2.0'
flags: [
cli.Flag{
flag: cli.FlagType.string
name: 'config-file'
abbrev: 'f'
description: 'Location of Vieter config file; defaults to ~/.vieterrc.'
global: true
default_value: [os.expand_tilde_to_home('~/.vieterrc')]
},
]
commands: [
server.cmd(),
build.cmd(),
git.cmd(),
]
}
logger.set_full_logpath(log_file)
logger.log_to_console_too()
defer {
logger.info('Flushing log file')
logger.flush()
logger.close()
}
// Configure web server
key := os.getenv_opt('API_KEY') or { exit_with_message(1, 'No API key was provided.') }
repo_dir := os.getenv_opt('REPO_DIR') or {
exit_with_message(1, 'No repo directory was configured.')
}
pkg_dir := os.getenv_opt('PKG_DIR') or {
exit_with_message(1, 'No package directory was configured.')
}
dl_dir := os.getenv_opt('DOWNLOAD_DIR') or {
exit_with_message(1, 'No download directory was configured.')
}
// This also creates the directories if needed
repo := repo.new(repo_dir, pkg_dir) or {
logger.error(err.msg)
exit(1)
}
os.mkdir_all(dl_dir) or { exit_with_message(1, 'Failed to create download directory.') }
web.run(&App{
logger: logger
api_key: key
dl_dir: dl_dir
repo: repo
}, port)
app.setup()
app.parse(os.args)
}

View File

@ -72,14 +72,11 @@ fn parse_pkg_info_string(pkg_info_str &string) ?PkgInfo {
'pkgbase' { pkg_info.base = value }
'pkgver' { pkg_info.version = value }
'pkgdesc' { pkg_info.description = value }
'csize' { continue }
'size' { pkg_info.size = value.int() }
'url' { pkg_info.url = value }
'arch' { pkg_info.arch = value }
'builddate' { pkg_info.build_date = value.int() }
'packager' { pkg_info.packager = value }
'md5sum' { continue }
'sha256sum' { continue }
'pgpsig' { pkg_info.pgpsig = value }
'pgpsigsize' { pkg_info.pgpsigsize = value.int() }
// Array values
@ -92,16 +89,19 @@ fn parse_pkg_info_string(pkg_info_str &string) ?PkgInfo {
'optdepend' { pkg_info.optdepends << value }
'makedepend' { pkg_info.makedepends << value }
'checkdepend' { pkg_info.checkdepends << value }
else { return error("Invalid key '$key'.") }
// There's no real point in trying to exactly manage which fields
// are allowed, so we just ignore any we don't explicitely need for
// in the db file
else { continue }
}
}
return pkg_info
}
// read_pkg extracts the file list & .PKGINFO contents from an archive
// NOTE: this command currently only supports zstd-compressed tarballs
pub fn read_pkg(pkg_path string) ?Pkg {
// read_pkg_archive extracts the file list & .PKGINFO contents from an archive
// NOTE: this command only supports zstd-, xz- & gzip-compressed tarballs.
pub fn read_pkg_archive(pkg_path string) ?Pkg {
if !os.is_file(pkg_path) {
return error("'$pkg_path' doesn't exist or isn't a file.")
}
@ -112,6 +112,7 @@ pub fn read_pkg(pkg_path string) ?Pkg {
// Sinds 2020, all newly built Arch packages use zstd
C.archive_read_support_filter_zstd(a)
C.archive_read_support_filter_gzip(a)
C.archive_read_support_filter_xz(a)
// The content should always be a tarball
C.archive_read_support_format_tar(a)
@ -190,6 +191,7 @@ pub fn (pkg &Pkg) filename() string {
ext := match pkg.compression {
0 { '.tar' }
1 { '.tar.gz' }
6 { '.tar.xz' }
14 { '.tar.zst' }
else { panic("Another compression code shouldn't be possible. Faulty code: $pkg.compression") }
}

View File

@ -2,22 +2,23 @@ module repo
import os
import package
import util
// Dummy struct to work around the fact that you can only share structs, maps &
// arrays
pub struct Dummy {
x int
}
// This struct manages a single repository.
pub struct Repo {
// Manages a group of repositories. Each repository contains one or more
// arch-repositories, each of which represent a specific architecture.
pub struct RepoGroupManager {
mut:
mutex shared Dummy
mutex shared util.Dummy
pub:
// Where to store repository files
repo_dir string [required]
// Where to find packages; packages are expected to all be in the same directory
// Where to store repositories' files
repos_dir string [required]
// Where packages are stored; each arch-repository gets its own
// subdirectory
pkg_dir string [required]
// The default architecture to use for a repository. Whenever a package of
// arch "any" is added to a repo, it will also be added to this
// architecture.
default_arch string [required]
}
pub struct RepoAddResult {
@ -26,55 +27,117 @@ pub:
pkg &package.Pkg [required]
}
// new creates a new Repo & creates the directories as needed
pub fn new(repo_dir string, pkg_dir string) ?Repo {
if !os.is_dir(repo_dir) {
os.mkdir_all(repo_dir) or { return error('Failed to create repo directory: $err.msg') }
// new creates a new RepoGroupManager & creates the directories as needed
pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager {
if !os.is_dir(repos_dir) {
os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg') }
}
if !os.is_dir(pkg_dir) {
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory: $err.msg') }
}
return Repo{
repo_dir: repo_dir
return RepoGroupManager{
repos_dir: repos_dir
pkg_dir: pkg_dir
default_arch: default_arch
}
}
// add_from_path adds a package from an arbitrary path & moves it into the pkgs
// directory if necessary.
pub fn (r &Repo) add_from_path(pkg_path string) ?RepoAddResult {
pkg := package.read_pkg(pkg_path) or { return error('Failed to read package file: $err.msg') }
added := r.add(pkg) ?
// If the add was successful, we move the file to the packages directory
if added {
dest_path := os.real_path(os.join_path_single(r.pkg_dir, pkg.filename()))
// Only move the file if it's not already in the package directory
if dest_path != os.real_path(pkg_path) {
os.mv(pkg_path, dest_path) ?
}
// add_pkg_from_path adds a package to a given repo, given the file path to the
// pkg archive. It's a wrapper around add_pkg_in_repo that parses the archive
// file, passes the result to add_pkg_in_repo, and hard links the archive to
// the right subdirectories in r.pkg_dir if it was successfully added.
pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult {
pkg := package.read_pkg_archive(pkg_path) or {
return error('Failed to read package file: $err.msg')
}
added := r.add_pkg_in_repo(repo, pkg) ?
// If the add was successful, we move the file to the packages directory
for arch in added {
repo_pkg_path := os.real_path(os.join_path(r.pkg_dir, repo, arch))
dest_path := os.join_path_single(repo_pkg_path, pkg.filename())
os.mkdir_all(repo_pkg_path) ?
// We create hard links so that "any" arch packages aren't stored
// multiple times
os.link(pkg_path, dest_path) ?
}
// After linking, we can remove the original file
os.rm(pkg_path) ?
return RepoAddResult{
added: added
added: added.len > 0
pkg: &pkg
}
}
// add adds a given Pkg to the repository
fn (r &Repo) add(pkg &package.Pkg) ?bool {
pkg_dir := r.pkg_path(pkg)
// We can't add the same package twice
if os.exists(pkg_dir) {
return false
// add_pkg_in_repo adds a package to a given repo. This function is responsible
// for inspecting the package architecture. If said architecture is 'any', the
// package is added to each arch-repository within the given repo. A package of
// architecture 'any' is always added to the arch-repo defined by
// r.default_arch. If this arch-repo doesn't exist yet, it is created. If the
// architecture isn't 'any', the package is only added to the specific
// architecture.
fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]string {
// A package not of arch 'any' can be handled easily by adding it to the
// respective repo
if pkg.info.arch != 'any' {
if r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg) ? {
return [pkg.info.arch]
} else {
return []
}
}
os.mkdir(pkg_dir) or { return error('Failed to create package directory.') }
mut arch_repos := []string{}
// If it is an "any" package, the package gets added to every currently
// present arch-repo. It will always get added to the r.default_arch repo,
// even if no or multiple others are present.
repo_dir := os.join_path_single(r.repos_dir, repo)
// If this is the first package that's added to the repo, the directory
// won't exist yet
if os.exists(repo_dir) {
arch_repos = os.ls(repo_dir) ?
}
// The default_arch should always be updated when a package with arch 'any'
// is added.
if !arch_repos.contains(r.default_arch) {
arch_repos << r.default_arch
}
mut added := []string{}
// We add the package to each repository. If any of the repositories
// return true, the result of the function is also true.
for arch in arch_repos {
if r.add_pkg_in_arch_repo(repo, arch, pkg) ? {
added << arch
}
}
return added
}
// add_pkg_in_arch_repo is the function that actually adds a package to a given
// arch-repo. It records the package's data in the arch-repo's desc & files
// files, and afterwards updates the db & files archives to reflect these
// changes. The function returns false if the package was already present in
// the repo, and true otherwise.
fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ?bool {
pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version')
// Remove the previous version of the package, if present
r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false) ?
os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') }
os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()) or {
os.rmdir_all(pkg_dir) ?
@ -87,12 +150,55 @@ fn (r &Repo) add(pkg &package.Pkg) ?bool {
return error('Failed to write files file.')
}
r.sync() ?
r.sync(repo, arch) ?
return true
}
// Returns the path where the given package's desc & files files are stored
fn (r &Repo) pkg_path(pkg &package.Pkg) string {
return os.join_path(r.repo_dir, '$pkg.info.name-$pkg.info.version')
// remove_pkg_from_arch_repo removes a package from an arch-repo's database. It
// returns false if the package wasn't present in the database. It also
// optionally re-syncs the repo archives.
fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) ?bool {
repo_dir := os.join_path(r.repos_dir, repo, arch)
// If the repository doesn't exist yet, the result is automatically false
if !os.exists(repo_dir) {
return false
}
// We iterate over every directory in the repo dir
// TODO filter so we only check directories
for d in os.ls(repo_dir) ? {
// Because a repository only allows a single version of each package,
// we need only compare whether the name of the package is the same,
// not the version.
name := d.split('-')#[..-2].join('-')
if name == pkg_name {
// We lock the mutex here to prevent other routines from creating a
// new archive while we remove an entry
lock r.mutex {
os.rmdir_all(os.join_path_single(repo_dir, d)) ?
}
// Also remove the package archive
repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch)
archives := os.ls(repo_pkg_dir) ?.filter(it.split('-')#[..-3].join('-') == name)
for archive_name in archives {
full_path := os.join_path_single(repo_pkg_dir, archive_name)
os.rm(full_path) ?
}
// Sync the db archives if requested
if sync {
r.sync(repo, arch) ?
}
return true
}
}
return false
}

View File

@ -30,8 +30,9 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri
}
// Re-generate the repo archive files
fn (r &Repo) sync() ? {
// TODO also write files archive
fn (r &RepoGroupManager) sync(repo string, arch string) ? {
subrepo_path := os.join_path(r.repos_dir, repo, arch)
lock r.mutex {
a_db := C.archive_write_new()
a_files := C.archive_write_new()
@ -44,18 +45,18 @@ fn (r &Repo) sync() ? {
C.archive_write_add_filter_gzip(a_files)
C.archive_write_set_format_pax_restricted(a_files)
db_path := os.join_path_single(r.repo_dir, 'vieter.db.tar.gz')
files_path := os.join_path_single(r.repo_dir, 'vieter.files.tar.gz')
db_path := os.join_path_single(subrepo_path, '${repo}.db.tar.gz')
files_path := os.join_path_single(subrepo_path, '${repo}.files.tar.gz')
C.archive_write_open_filename(a_db, &char(db_path.str))
C.archive_write_open_filename(a_files, &char(files_path.str))
// Iterate over each directory
for d in os.ls(r.repo_dir) ?.filter(os.is_dir(os.join_path_single(r.repo_dir,
for d in os.ls(subrepo_path) ?.filter(os.is_dir(os.join_path_single(subrepo_path,
it))) {
// desc
mut inner_path := os.join_path_single(d, 'desc')
mut actual_path := os.join_path_single(r.repo_dir, inner_path)
mut actual_path := os.join_path_single(subrepo_path, inner_path)
archive_add_entry(a_db, entry, actual_path, inner_path)
archive_add_entry(a_files, entry, actual_path, inner_path)
@ -64,7 +65,7 @@ fn (r &Repo) sync() ? {
// files
inner_path = os.join_path_single(d, 'files')
actual_path = os.join_path_single(r.repo_dir, inner_path)
actual_path = os.join_path_single(subrepo_path, inner_path)
archive_add_entry(a_files, entry, actual_path, inner_path)

34
src/response.v 100644
View File

@ -0,0 +1,34 @@
module response
pub struct Response<T> {
pub:
message string
data T
}
// new_response constructs a new Response<String> object with the given message
// & an empty data field.
pub fn new_response(message string) Response<string> {
return Response<string>{
message: message
data: ''
}
}
// new_data_response<T> constructs a new Response<T> object with the given data
// & an empty message field.
pub fn new_data_response<T>(data T) Response<T> {
return Response<T>{
message: ''
data: data
}
}
// new_full_response<T> constructs a new Response<T> object with the given
// message & data.
pub fn new_full_response<T>(message string, data T) Response<T> {
return Response<T>{
message: message
data: data
}
}

View File

@ -1,103 +0,0 @@
module main
import web
import os
import repo
import time
import rand
const prefixes = ['B', 'KB', 'MB', 'GB']
// pretty_bytes converts a byte count to human-readable version
fn pretty_bytes(bytes int) string {
mut i := 0
mut n := f32(bytes)
for n >= 1024 {
i++
n /= 1024
}
return '${n:.2}${prefixes[i]}'
}
fn is_pkg_name(s string) bool {
return s.contains('.pkg')
}
// healthcheck just returns a string, but can be used to quickly check if the
// server is still responsive.
['/health'; get]
pub fn (mut app App) healthcheck() web.Result {
return app.text('Healthy')
}
// get_root handles a GET request for a file on the root
['/:filename'; get]
fn (mut app App) get_root(filename string) web.Result {
mut full_path := ''
if filename.ends_with('.db') || filename.ends_with('.files') {
full_path = os.join_path_single(app.repo.repo_dir, '${filename}.tar.gz')
} else if filename.ends_with('.db.tar.gz') || filename.ends_with('.files.tar.gz') {
full_path = os.join_path_single(app.repo.repo_dir, '$filename')
} else {
full_path = os.join_path_single(app.repo.pkg_dir, filename)
}
return app.file(full_path)
}
['/publish'; post]
fn (mut app App) put_package() web.Result {
if !app.is_authorized() {
return app.text('Unauthorized.')
}
mut pkg_path := ''
if length := app.req.header.get(.content_length) {
// Generate a random filename for the temp file
pkg_path = os.join_path_single(app.dl_dir, rand.uuid_v4())
for os.exists(pkg_path) {
pkg_path = os.join_path_single(app.dl_dir, rand.uuid_v4())
}
app.ldebug("Uploading $length bytes (${pretty_bytes(length.int())}) to '$pkg_path'.")
// This is used to time how long it takes to upload a file
mut sw := time.new_stopwatch(time.StopWatchOptions{ auto_start: true })
reader_to_file(mut app.reader, length.int(), pkg_path) or {
app.lwarn("Failed to upload '$pkg_path'")
return app.text('Failed to upload file.')
}
sw.stop()
app.ldebug("Upload of '$pkg_path' completed in ${sw.elapsed().seconds():.3}s.")
} else {
app.lwarn('Tried to upload package without specifying a Content-Length.')
return app.text("Content-Type header isn't set.")
}
res := app.repo.add_from_path(pkg_path) or {
app.lerror('Error while adding package: $err.msg')
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") }
return app.text('Failed to add package.')
}
if !res.added {
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") }
app.lwarn("Duplicate package '$res.pkg.full_name()'.")
return app.text('File already exists.')
}
app.linfo("Added '$res.pkg.full_name()' to repository.")
return app.text('Package added successfully.')
}

View File

@ -1,4 +1,4 @@
module main
module server
import net.http
@ -7,5 +7,5 @@ fn (mut app App) is_authorized() bool {
return false
}
return x_header.trim_space() == app.api_key
return x_header.trim_space() == app.conf.api_key
}

30
src/server/cli.v 100644
View File

@ -0,0 +1,30 @@
module server
import cli
import env
struct Config {
pub:
log_level string = 'WARN'
log_file string = 'vieter.log'
pkg_dir string
download_dir string
api_key string
repos_dir string
repos_file string
default_arch string
}
// cmd returns the cli submodule that handles starting the server
pub fn cmd() cli.Command {
return cli.Command{
name: 'server'
description: 'Start the Vieter server.'
execute: fn (cmd cli.Command) ? {
config_file := cmd.flags.get_string('config-file') ?
conf := env.load<Config>(config_file) ?
server(conf) ?
}
}
}

141
src/server/git.v 100644
View File

@ -0,0 +1,141 @@
module server
import web
import git
import net.http
import rand
import response { new_data_response, new_response }
const repos_file = 'repos.json'
['/api/repos'; get]
fn (mut app App) get_repos() web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
repos := rlock app.git_mutex {
git.read_repos(app.conf.repos_file) or {
app.lerror('Failed to read repos file: $err.msg')
return app.status(http.Status.internal_server_error)
}
}
return app.json(http.Status.ok, new_data_response(repos))
}
['/api/repos/:id'; get]
fn (mut app App) get_single_repo(id string) web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
repos := rlock app.git_mutex {
git.read_repos(app.conf.repos_file) or {
app.lerror('Failed to read repos file.')
return app.status(http.Status.internal_server_error)
}
}
if id !in repos {
return app.not_found()
}
repo := repos[id]
return app.json(http.Status.ok, new_data_response(repo))
}
['/api/repos'; post]
fn (mut app App) post_repo() web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
new_repo := git.repo_from_params(app.query) or {
return app.json(http.Status.bad_request, new_response(err.msg))
}
id := rand.uuid_v4()
mut repos := rlock app.git_mutex {
git.read_repos(app.conf.repos_file) or {
app.lerror('Failed to read repos file.')
return app.status(http.Status.internal_server_error)
}
}
// We need to check for duplicates
for _, repo in repos {
if repo == new_repo {
return app.json(http.Status.bad_request, new_response('Duplicate repository.'))
}
}
repos[id] = new_repo
lock app.git_mutex {
git.write_repos(app.conf.repos_file, &repos) or {
return app.status(http.Status.internal_server_error)
}
}
return app.json(http.Status.ok, new_response('Repo added successfully.'))
}
['/api/repos/:id'; delete]
fn (mut app App) delete_repo(id string) web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
mut repos := rlock app.git_mutex {
git.read_repos(app.conf.repos_file) or {
app.lerror('Failed to read repos file.')
return app.status(http.Status.internal_server_error)
}
}
if id !in repos {
return app.not_found()
}
repos.delete(id)
lock app.git_mutex {
git.write_repos(app.conf.repos_file, &repos) or { return app.server_error(500) }
}
return app.json(http.Status.ok, new_response('Repo removed successfully.'))
}
['/api/repos/:id'; patch]
fn (mut app App) patch_repo(id string) web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
mut repos := rlock app.git_mutex {
git.read_repos(app.conf.repos_file) or {
app.lerror('Failed to read repos file.')
return app.status(http.Status.internal_server_error)
}
}
if id !in repos {
return app.not_found()
}
repos[id].patch_from_params(app.query)
lock app.git_mutex {
git.write_repos(app.conf.repos_file, &repos) or { return app.server_error(500) }
}
return app.json(http.Status.ok, new_response('Repo updated successfully.'))
}

108
src/server/routes.v 100644
View File

@ -0,0 +1,108 @@
module server
import web
import os
import repo
import time
import rand
import util
import net.http
import response { new_response }
// healthcheck just returns a string, but can be used to quickly check if the
// server is still responsive.
['/health'; get]
pub fn (mut app App) healthcheck() web.Result {
return app.json(http.Status.ok, new_response('Healthy.'))
}
['/:repo/:arch/:filename'; get; head]
fn (mut app App) get_repo_file(repo string, arch string, filename string) web.Result {
mut full_path := ''
db_exts := ['.db', '.files', '.db.tar.gz', '.files.tar.gz']
// There's no point in having the ability to serve db archives with wrong
// filenames
if db_exts.any(filename == '$repo$it') {
full_path = os.join_path(app.repo.repos_dir, repo, arch, filename)
// repo-add does this using symlinks, but we just change the requested
// path
if !full_path.ends_with('.tar.gz') {
full_path += '.tar.gz'
}
} else if filename.contains('.pkg') {
full_path = os.join_path(app.repo.pkg_dir, repo, arch, filename)
}
// Default behavior is to return the desc file for the package, if present.
// This can then also be used by the build system to properly check whether
// a package is present in an arch-repo.
else {
full_path = os.join_path(app.repo.repos_dir, repo, arch, filename, 'desc')
}
// Scuffed way to respond to HEAD requests
if app.req.method == http.Method.head {
if os.exists(full_path) {
return app.status(http.Status.ok)
}
return app.not_found()
}
return app.file(full_path)
}
['/:repo/publish'; post]
fn (mut app App) put_package(repo string) web.Result {
if !app.is_authorized() {
return app.json(http.Status.unauthorized, new_response('Unauthorized.'))
}
mut pkg_path := ''
if length := app.req.header.get(.content_length) {
// Generate a random filename for the temp file
pkg_path = os.join_path_single(app.conf.download_dir, rand.uuid_v4())
app.ldebug("Uploading $length bytes (${util.pretty_bytes(length.int())}) to '$pkg_path'.")
// This is used to time how long it takes to upload a file
mut sw := time.new_stopwatch(time.StopWatchOptions{ auto_start: true })
util.reader_to_file(mut app.reader, length.int(), pkg_path) or {
app.lwarn("Failed to upload '$pkg_path'")
return app.json(http.Status.internal_server_error, new_response('Failed to upload file.'))
}
sw.stop()
app.ldebug("Upload of '$pkg_path' completed in ${sw.elapsed().seconds():.3}s.")
} else {
app.lwarn('Tried to upload package without specifying a Content-Length.')
// length required
return app.status(http.Status.length_required)
}
res := app.repo.add_pkg_from_path(repo, pkg_path) or {
app.lerror('Error while adding package: $err.msg')
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") }
return app.json(http.Status.internal_server_error, new_response('Failed to add package.'))
}
if !res.added {
os.rm(pkg_path) or { app.lerror("Failed to remove download '$pkg_path': $err.msg") }
app.lwarn("Duplicate package '$res.pkg.full_name()' in repo '$repo'.")
return app.json(http.Status.bad_request, new_response('File already exists.'))
}
app.linfo("Added '$res.pkg.full_name()' to repo '$repo ($res.pkg.info.arch)'.")
return app.json(http.Status.ok, new_response('Package added successfully.'))
}

View File

@ -0,0 +1,61 @@
module server
import web
import os
import log
import repo
import util
const port = 8000
struct App {
web.Context
pub:
conf Config [required; web_global]
pub mut:
repo repo.RepoGroupManager [required; web_global]
// This is used to claim the file lock on the repos file
git_mutex shared util.Dummy
}
// server starts the web server & starts listening for requests
pub fn server(conf Config) ? {
// Prevent using 'any' as the default arch
if conf.default_arch == 'any' {
util.exit_with_message(1, "'any' is not allowed as the value for default_arch.")
}
// Configure logger
log_level := log.level_from_tag(conf.log_level) or {
util.exit_with_message(1, 'Invalid log level. The allowed values are FATAL, ERROR, WARN, INFO & DEBUG.')
}
mut logger := log.Log{
level: log_level
}
logger.set_full_logpath(conf.log_file)
logger.log_to_console_too()
defer {
logger.info('Flushing log file')
logger.flush()
logger.close()
}
// This also creates the directories if needed
repo := repo.new(conf.repos_dir, conf.pkg_dir, conf.default_arch) or {
logger.error(err.msg)
exit(1)
}
os.mkdir_all(conf.download_dir) or {
util.exit_with_message(1, 'Failed to create download directory.')
}
web.run(&App{
logger: logger
conf: conf
repo: repo
}, server.port)
}

View File

@ -1,9 +1,55 @@
module util
import os
import io
import crypto.md5
import crypto.sha256
const reader_buf_size = 1_000_000
const prefixes = ['B', 'KB', 'MB', 'GB']
// Dummy struct to work around the fact that you can only share structs, maps &
// arrays
pub struct Dummy {
x int
}
// exit_with_message exits the program with a given status code after having
// first printed a specific message to STDERR
[noreturn]
pub fn exit_with_message(code int, msg string) {
eprintln(msg)
exit(code)
}
// reader_to_file writes the contents of a BufferedReader to a file
pub fn reader_to_file(mut reader io.BufferedReader, length int, path string) ? {
mut file := os.create(path) ?
defer {
file.close()
}
mut buf := []byte{len: util.reader_buf_size}
mut bytes_left := length
// Repeat as long as the stream still has data
for bytes_left > 0 {
// TODO check if just breaking here is safe
bytes_read := reader.read(mut buf) or { break }
bytes_left -= bytes_read
mut to_write := bytes_read
for to_write > 0 {
// TODO don't just loop infinitely here
bytes_written := file.write(buf[bytes_read - to_write..bytes_read]) or { continue }
to_write = to_write - bytes_written
}
}
}
// hash_file returns the md5 & sha256 hash of a given file
// TODO actually implement sha256
pub fn hash_file(path &string) ?(string, string) {
@ -32,3 +78,16 @@ pub fn hash_file(path &string) ?(string, string) {
return md5sum.checksum().hex(), sha256sum.checksum().hex()
}
// pretty_bytes converts a byte count to human-readable version
pub fn pretty_bytes(bytes int) string {
mut i := 0
mut n := f32(bytes)
for n >= 1024 {
i++
n /= 1024
}
return '${n:.2}${util.prefixes[i]}'
}

View File

@ -34,7 +34,7 @@ fn parse_attrs(name string, attrs []string) ?([]http.Method, string) {
}
if x.len > 0 {
return IError(http.UnexpectedExtraAttributeError{
msg: 'Encountered unexpected extra attributes: $x'
attributes: x
})
}
if methods.len == 0 {
@ -49,8 +49,8 @@ fn parse_attrs(name string, attrs []string) ?([]http.Method, string) {
fn parse_query_from_url(url urllib.URL) map[string]string {
mut query := map[string]string{}
for k, v in url.query().data {
query[k] = v.data[0]
for v in url.query().data {
query[v.key] = v.value
}
return query
}

View File

@ -12,9 +12,6 @@ import time
import json
import log
// A type which don't get filtered inside templates
pub type RawHtml = string
// A dummy structure that returns from routes to indicate that you actually sent something to a user
[noinit]
pub struct Result {}
@ -22,7 +19,7 @@ pub struct Result {}
pub const (
methods_with_form = [http.Method.post, .put, .patch]
headers_close = http.new_custom_header_from_map({
'Server': 'VWeb'
'Server': 'VWeb'
http.CommonHeader.connection.str(): 'close'
}) or { panic('should never fail') }
@ -141,8 +138,8 @@ pub const (
// It has fields for the query, form, files.
pub struct Context {
mut:
content_type string = 'text/plain'
status string = '200 OK'
content_type string = 'text/plain'
status http.Status = http.Status.ok
pub:
// HTTP Request
req http.Request
@ -186,24 +183,14 @@ struct Route {
path string
}
// Defining this method is optional.
// init_server is called at server start.
// You can use it for initializing globals.
pub fn (ctx Context) init_server() {
eprintln('init_server() has been deprecated, please init your web app in `fn main()`')
}
// Defining this method is optional.
// before_request is called before every request (aka middleware).
// Probably you can use it for check user session cookie or add header.
pub fn (ctx Context) before_request() {}
pub struct Cookie {
name string
value string
expires time.Time
secure bool
http_only bool
// send_string
fn send_string(mut conn net.TcpConn, s string) ? {
conn.write(s.bytes()) ?
}
// send_response_to_client sends a response to the client
@ -225,34 +212,27 @@ pub fn (mut ctx Context) send_response_to_client(mimetype string, res string) bo
text: res
}
resp.set_version(.v1_1)
resp.set_status(http.status_from_int(ctx.status.int()))
resp.set_status(ctx.status)
send_string(mut ctx.conn, resp.bytestr()) or { return false }
return true
}
// html HTTP_OK with s as payload with content-type `text/html`
pub fn (mut ctx Context) html(s string) Result {
ctx.send_response_to_client('text/html', s)
return Result{}
}
// text responds to a request with some plaintext.
pub fn (mut ctx Context) text(status http.Status, s string) Result {
ctx.status = status
// text HTTP_OK with s as payload with content-type `text/plain`
pub fn (mut ctx Context) text(s string) Result {
ctx.send_response_to_client('text/plain', s)
return Result{}
}
// json<T> HTTP_OK with json_s as payload with content-type `application/json`
pub fn (mut ctx Context) json<T>(j T) Result {
pub fn (mut ctx Context) json<T>(status http.Status, j T) Result {
ctx.status = status
json_s := json.encode(j)
ctx.send_response_to_client('application/json', json_s)
return Result{}
}
// json_pretty<T> Response HTTP_OK with a pretty-printed JSON result
pub fn (mut ctx Context) json_pretty<T>(j T) Result {
json_s := json.encode_pretty(j)
ctx.send_response_to_client('application/json', json_s)
return Result{}
}
@ -302,7 +282,7 @@ pub fn (mut ctx Context) file(f_path string) Result {
header: header.join(web.headers_close)
}
resp.set_version(.v1_1)
resp.set_status(http.status_from_int(ctx.status.int()))
resp.set_status(ctx.status)
send_string(mut ctx.conn, resp.bytestr()) or { return Result{} }
mut buf := []byte{len: 1_000_000}
@ -328,10 +308,10 @@ pub fn (mut ctx Context) file(f_path string) Result {
return Result{}
}
// ok Response HTTP_OK with s as payload
pub fn (mut ctx Context) ok(s string) Result {
ctx.send_response_to_client(ctx.content_type, s)
return Result{}
// status responds with an empty textual response, essentially only returning
// the given status code.
pub fn (mut ctx Context) status(status http.Status) Result {
return ctx.text(status, '')
}
// server_error Response a server error
@ -361,64 +341,7 @@ pub fn (mut ctx Context) redirect(url string) Result {
// not_found Send an not_found response
pub fn (mut ctx Context) not_found() Result {
if ctx.done {
return Result{}
}
ctx.done = true
send_string(mut ctx.conn, web.http_404.bytestr()) or {}
return Result{}
}
// set_cookie Sets a cookie
pub fn (mut ctx Context) set_cookie(cookie Cookie) {
mut cookie_data := []string{}
mut secure := if cookie.secure { 'Secure;' } else { '' }
secure += if cookie.http_only { ' HttpOnly' } else { ' ' }
cookie_data << secure
if cookie.expires.unix > 0 {
cookie_data << 'expires=$cookie.expires.utc_string()'
}
data := cookie_data.join(' ')
ctx.add_header('Set-Cookie', '$cookie.name=$cookie.value; $data')
}
// set_content_type Sets the response content type
pub fn (mut ctx Context) set_content_type(typ string) {
ctx.content_type = typ
}
// set_cookie_with_expire_date Sets a cookie with a `expire_data`
pub fn (mut ctx Context) set_cookie_with_expire_date(key string, val string, expire_date time.Time) {
ctx.add_header('Set-Cookie', '$key=$val; Secure; HttpOnly; expires=$expire_date.utc_string()')
}
// get_cookie Gets a cookie by a key
pub fn (ctx &Context) get_cookie(key string) ?string { // TODO refactor
mut cookie_header := ctx.get_header('cookie')
if cookie_header == '' {
cookie_header = ctx.get_header('Cookie')
}
cookie_header = ' ' + cookie_header
// println('cookie_header="$cookie_header"')
// println(ctx.req.header)
cookie := if cookie_header.contains(';') {
cookie_header.find_between(' $key=', ';')
} else {
cookie_header.find_between(' $key=', '\r')
}
if cookie != '' {
return cookie.trim_space()
}
return error('Cookie not found')
}
// set_status Sets the response status
pub fn (mut ctx Context) set_status(code int, desc string) {
if code < 100 || code > 599 {
ctx.status = '500 Internal Server Error'
} else {
ctx.status = '$code $desc'
}
return ctx.status(http.Status.not_found)
}
// add_header Adds an header to the response with key and val
@ -560,12 +483,6 @@ fn handle_conn<T>(mut conn net.TcpConn, mut app T, routes map[string]Route) {
// Calling middleware...
app.before_request()
// Static handling
if serve_if_static<T>(mut app, url) {
// successfully served a static file
return
}
// Route matching
$for method in T.methods {
$if method.return_type is Result {
@ -661,83 +578,6 @@ fn route_matches(url_words []string, route_words []string) ?[]string {
return params
}
// serve_if_static<T> checks if request is for a static file and serves it
// returns true if we served a static file, false otherwise
[manualfree]
fn serve_if_static<T>(mut app T, url urllib.URL) bool {
// TODO: handle url parameters properly - for now, ignore them
static_file := app.static_files[url.path]
mime_type := app.static_mime_types[url.path]
if static_file == '' || mime_type == '' {
return false
}
data := os.read_file(static_file) or {
send_string(mut app.conn, web.http_404.bytestr()) or {}
return true
}
app.send_response_to_client(mime_type, data)
unsafe { data.free() }
return true
}
// scan_static_directory makes a static route for each file in a directory
fn (mut ctx Context) scan_static_directory(directory_path string, mount_path string) {
files := os.ls(directory_path) or { panic(err) }
if files.len > 0 {
for file in files {
full_path := os.join_path(directory_path, file)
if os.is_dir(full_path) {
ctx.scan_static_directory(full_path, mount_path + '/' + file)
} else if file.contains('.') && !file.starts_with('.') && !file.ends_with('.') {
ext := os.file_ext(file)
// Rudimentary guard against adding files not in mime_types.
// Use serve_static directly to add non-standard mime types.
if ext in web.mime_types {
ctx.serve_static(mount_path + '/' + file, full_path)
}
}
}
}
}
// handle_static Handles a directory static
// If `root` is set the mount path for the dir will be in '/'
pub fn (mut ctx Context) handle_static(directory_path string, root bool) bool {
if ctx.done || !os.exists(directory_path) {
return false
}
dir_path := directory_path.trim_space().trim_right('/')
mut mount_path := ''
if dir_path != '.' && os.is_dir(dir_path) && !root {
// Mount point hygene, "./assets" => "/assets".
mount_path = '/' + dir_path.trim_left('.').trim('/')
}
ctx.scan_static_directory(dir_path, mount_path)
return true
}
// mount_static_folder_at - makes all static files in `directory_path` and inside it, available at http://server/mount_path
// For example: suppose you have called .mount_static_folder_at('/var/share/myassets', '/assets'),
// and you have a file /var/share/myassets/main.css .
// => That file will be available at URL: http://server/assets/main.css .
pub fn (mut ctx Context) mount_static_folder_at(directory_path string, mount_path string) bool {
if ctx.done || mount_path.len < 1 || mount_path[0] != `/` || !os.exists(directory_path) {
return false
}
dir_path := directory_path.trim_right('/')
ctx.scan_static_directory(dir_path, mount_path[1..])
return true
}
// serve_static Serves a file static
// `url` is the access path on the site, `file_path` is the real path to the file, `mime_type` is the file type
pub fn (mut ctx Context) serve_static(url string, file_path string) {
ctx.static_files[url] = file_path
// ctx.static_mime_types[url] = mime_type
ext := os.file_ext(file_path)
ctx.static_mime_types[url] = web.mime_types[ext]
}
// ip Returns the ip address from the current user
pub fn (ctx &Context) ip() string {
mut ip := ctx.req.header.get(.x_forwarded_for) or { '' }
@ -760,16 +600,6 @@ pub fn (mut ctx Context) error(s string) {
ctx.form_error = s
}
// not_found Returns an empty result
pub fn not_found() Result {
return Result{}
}
// send_string
fn send_string(mut conn net.TcpConn, s string) ? {
conn.write(s.bytes()) ?
}
// filter Do not delete.
// It used by `vlib/v/gen/c/str_intp.v:130` for string interpolation inside web templates
// TODO: move it to template render

View File

@ -38,7 +38,7 @@ def create_random_pkginfo(words, name_min_len, name_max_len):
Generates a random .PKGINFO
"""
name = "-".join(random_words(words, name_min_len, name_max_len))
ver = "0.1.0-1" # doesn't matter what it is anyways
ver = "0.1.0-3" # doesn't matter what it is anyways
# TODO add random dependencies (all types)
@ -97,7 +97,7 @@ async def upload_random_package(tar_path, sem):
async with sem:
with open(tar_path, 'rb') as f:
async with aiohttp.ClientSession() as s:
async with s.post("http://localhost:8000/publish", data=f.read(), headers={"x-api-key": "test"}) as r:
async with s.post("http://localhost:8000/vieter/publish", data=f.read(), headers={"x-api-key": "test"}) as r:
return await check_output(r)

10
vieter.toml 100644
View File

@ -0,0 +1,10 @@
# This file contains settings used during development
api_key = "test"
download_dir = "data/downloads"
repos_dir = "data/repos"
pkg_dir = "data/pkgs"
log_level = "DEBUG"
repos_file = "data/repos.json"
default_arch = "x86_64"
address = "http://localhost:8000"