diff --git a/src/build/build.v b/src/build/build.v index 8048ec4..6688296 100644 --- a/src/build/build.v +++ b/src/build/build.v @@ -1,6 +1,6 @@ module build -import docker +import vieter.vdocker as docker import encoding.base64 import time import os diff --git a/src/console/targets/build.v b/src/console/targets/build.v index 1f522a6..7403e2e 100644 --- a/src/console/targets/build.v +++ b/src/console/targets/build.v @@ -1,7 +1,7 @@ module targets import client -import docker +import vieter.vdocker as docker import os import build diff --git a/src/cron/daemon/daemon.v b/src/cron/daemon/daemon.v index 9a06dcd..0ab792a 100644 --- a/src/cron/daemon/daemon.v +++ b/src/cron/daemon/daemon.v @@ -6,7 +6,7 @@ import datatypes { MinHeap } import cron.expression { CronExpression, parse_expression } import math import build -import docker +import vieter.vdocker as docker import os import client import models { Target } diff --git a/src/docker/README.md b/src/docker/README.md deleted file mode 100644 index 4cc8971..0000000 --- a/src/docker/README.md +++ /dev/null @@ -1,3 +0,0 @@ -This module implements part of the Docker Engine API v1.41 -([documentation](https://docs.docker.com/engine/api/v1.41/)) using socket-based -HTTP communication. diff --git a/src/docker/containers.v b/src/docker/containers.v deleted file mode 100644 index 8fbf027..0000000 --- a/src/docker/containers.v +++ /dev/null @@ -1,123 +0,0 @@ -module docker - -import json -import net.urllib -import time -import net.http { Method } - -struct DockerError { - message string -} - -pub struct NewContainer { - image string [json: Image] - entrypoint []string [json: Entrypoint] - cmd []string [json: Cmd] - env []string [json: Env] - work_dir string [json: WorkingDir] - user string [json: User] -} - -struct CreatedContainer { -pub: - id string [json: Id] - warnings []string [json: Warnings] -} - -// create_container creates a new container with the given config. -pub fn (mut d DockerConn) create_container(c NewContainer) ?CreatedContainer { - d.send_request_with_json(Method.post, urllib.parse('/v1.41/containers/create')?, c)? - head, res := d.read_response()? - - if head.status_code != 201 { - data := json.decode(DockerError, res)? - - return error(data.message) - } - - data := json.decode(CreatedContainer, res)? - - return data -} - -// start_container starts the container with the given id. -pub fn (mut d DockerConn) start_container(id string) ? { - d.send_request(Method.post, urllib.parse('/v1.41/containers/$id/start')?)? - head, body := d.read_response()? - - if head.status_code != 204 { - data := json.decode(DockerError, body)? - - return error(data.message) - } -} - -struct ContainerInspect { -pub mut: - state ContainerState [json: State] -} - -struct ContainerState { -pub: - running bool [json: Running] - status string [json: Status] - exit_code int [json: ExitCode] - // These use a rather specific format so they have to be parsed later - start_time_str string [json: StartedAt] - end_time_str string [json: FinishedAt] -pub mut: - start_time time.Time [skip] - end_time time.Time [skip] -} - -// inspect_container returns detailed information for a given container. -pub fn (mut d DockerConn) inspect_container(id string) ?ContainerInspect { - d.send_request(Method.get, urllib.parse('/v1.41/containers/$id/json')?)? - head, body := d.read_response()? - - if head.status_code != 200 { - data := json.decode(DockerError, body)? - - return error(data.message) - } - - mut data := json.decode(ContainerInspect, body)? - - // The Docker engine API *should* always return UTC time. - data.state.start_time = time.parse_rfc3339(data.state.start_time_str)? - - if data.state.status == 'exited' { - data.state.end_time = time.parse_rfc3339(data.state.end_time_str)? - } - - return data -} - -// remove_container removes the container with the given id. -pub fn (mut d DockerConn) remove_container(id string) ? { - d.send_request(Method.delete, urllib.parse('/v1.41/containers/$id')?)? - head, body := d.read_response()? - - if head.status_code != 204 { - data := json.decode(DockerError, body)? - - return error(data.message) - } -} - -// get_container_logs returns a reader object allowing access to the -// container's logs. -pub fn (mut d DockerConn) get_container_logs(id string) ?&StreamFormatReader { - d.send_request(Method.get, urllib.parse('/v1.41/containers/$id/logs?stdout=true&stderr=true')?)? - head := d.read_response_head()? - - if head.status_code != 200 { - content_length := head.header.get(http.CommonHeader.content_length)?.int() - body := d.read_response_body(content_length)? - data := json.decode(DockerError, body)? - - return error(data.message) - } - - return d.get_stream_format_reader() -} diff --git a/src/docker/docker.v b/src/docker/docker.v deleted file mode 100644 index ccc6bed..0000000 --- a/src/docker/docker.v +++ /dev/null @@ -1,137 +0,0 @@ -module docker - -import net.unix -import io -import net.http -import strings -import net.urllib -import json -import util - -const ( - socket = '/var/run/docker.sock' - buf_len = 10 * 1024 - http_separator = [u8(`\r`), `\n`, `\r`, `\n`] - http_chunk_separator = [u8(`\r`), `\n`] -) - -pub struct DockerConn { -mut: - socket &unix.StreamConn - reader &io.BufferedReader -} - -// new_conn creates a new connection to the Docker daemon. -pub fn new_conn() ?&DockerConn { - s := unix.connect_stream(docker.socket)? - - d := &DockerConn{ - socket: s - reader: io.new_buffered_reader(reader: s) - } - - return d -} - -// close closes the underlying socket connection. -pub fn (mut d DockerConn) close() ? { - d.socket.close()? -} - -// send_request sends an HTTP request without body. -pub fn (mut d DockerConn) send_request(method http.Method, url urllib.URL) ? { - req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n' - - d.socket.write_string(req)? - - // When starting a new request, the reader needs to be reset. - d.reader = io.new_buffered_reader(reader: d.socket) -} - -// send_request_with_body sends an HTTP request with the given body. -pub fn (mut d DockerConn) send_request_with_body(method http.Method, url urllib.URL, content_type string, body string) ? { - req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n' - - d.socket.write_string(req)? - - // When starting a new request, the reader needs to be reset. - d.reader = io.new_buffered_reader(reader: d.socket) -} - -// send_request_with_json is a convenience wrapper around -// send_request_with_body that encodes the input as JSON. -pub fn (mut d DockerConn) send_request_with_json(method http.Method, url urllib.URL, data &T) ? { - body := json.encode(data) - - return d.send_request_with_body(method, url, 'application/json', body) -} - -// read_response_head consumes the socket's contents until it encounters -// '\r\n\r\n', after which it parses the response as an HTTP response. -// Importantly, this function never consumes the reader past the HTTP -// separator, so the body can be read fully later on. -pub fn (mut d DockerConn) read_response_head() ?http.Response { - mut res := []u8{} - - util.read_until_separator(mut d.reader, mut res, docker.http_separator)? - - return http.parse_response(res.bytestr()) -} - -// read_response_body reads `length` bytes from the stream. It can be used when -// the response encoding isn't chunked to fully read it. -pub fn (mut d DockerConn) read_response_body(length int) ?string { - if length == 0 { - return '' - } - - mut buf := []u8{len: docker.buf_len} - mut c := 0 - mut builder := strings.new_builder(docker.buf_len) - - for builder.len < length { - c = d.reader.read(mut buf) or { break } - - builder.write(buf[..c])? - } - - return builder.str() -} - -// read_response is a convenience function which always consumes the entire -// response & returns it. It should only be used when we're certain that the -// result isn't too large. -pub fn (mut d DockerConn) read_response() ?(http.Response, string) { - head := d.read_response_head()? - - if head.header.get(http.CommonHeader.transfer_encoding) or { '' } == 'chunked' { - mut builder := strings.new_builder(1024) - mut body := d.get_chunked_response_reader() - - util.reader_to_writer(mut body, mut builder)? - - return head, builder.str() - } - - content_length := head.header.get(http.CommonHeader.content_length)?.int() - res := d.read_response_body(content_length)? - - return head, res -} - -// get_chunked_response_reader returns a ChunkedResponseReader using the socket -// as reader. -pub fn (mut d DockerConn) get_chunked_response_reader() &ChunkedResponseReader { - r := new_chunked_response_reader(d.reader) - - return r -} - -// get_stream_format_reader returns a StreamFormatReader using the socket as -// reader. -pub fn (mut d DockerConn) get_stream_format_reader() &StreamFormatReader { - r := new_chunked_response_reader(d.reader) - r2 := new_stream_format_reader(r) - - return r2 -} diff --git a/src/docker/images.v b/src/docker/images.v deleted file mode 100644 index 6161565..0000000 --- a/src/docker/images.v +++ /dev/null @@ -1,61 +0,0 @@ -module docker - -import net.http { Method } -import net.urllib -import json - -struct Image { -pub: - id string [json: Id] -} - -// pull_image pulls the given image:tag. -pub fn (mut d DockerConn) pull_image(image string, tag string) ? { - d.send_request(Method.post, urllib.parse('/v1.41/images/create?fromImage=$image&tag=$tag')?)? - head := d.read_response_head()? - - if head.status_code != 200 { - content_length := head.header.get(http.CommonHeader.content_length)?.int() - body := d.read_response_body(content_length)? - data := json.decode(DockerError, body)? - - return error(data.message) - } - - // Keep reading the body until the pull has completed - mut body := d.get_chunked_response_reader() - - mut buf := []u8{len: 1024} - - for { - body.read(mut buf) or { break } - } -} - -// create_image_from_container creates a new image from a container. -pub fn (mut d DockerConn) create_image_from_container(id string, repo string, tag string) ?Image { - d.send_request(Method.post, urllib.parse('/v1.41/commit?container=$id&repo=$repo&tag=$tag')?)? - head, body := d.read_response()? - - if head.status_code != 201 { - data := json.decode(DockerError, body)? - - return error(data.message) - } - - data := json.decode(Image, body)? - - return data -} - -// remove_image removes the image with the given id. -pub fn (mut d DockerConn) remove_image(id string) ? { - d.send_request(Method.delete, urllib.parse('/v1.41/images/$id')?)? - head, body := d.read_response()? - - if head.status_code != 200 { - data := json.decode(DockerError, body)? - - return error(data.message) - } -} diff --git a/src/docker/stream.v b/src/docker/stream.v deleted file mode 100644 index 001f4b3..0000000 --- a/src/docker/stream.v +++ /dev/null @@ -1,135 +0,0 @@ -module docker - -import io -import util -import encoding.binary -import encoding.hex - -// ChunkedResponseReader parses an underlying HTTP chunked response, exposing -// it as if it was a continuous stream of data. -struct ChunkedResponseReader { -mut: - reader io.BufferedReader - bytes_left_in_chunk u64 - started bool -} - -// new_chunked_response_reader creates a new ChunkedResponseReader on the heap -// with the provided reader. -pub fn new_chunked_response_reader(reader io.BufferedReader) &ChunkedResponseReader { - r := &ChunkedResponseReader{ - reader: reader - } - - return r -} - -// read satisfies the io.Reader interface. -pub fn (mut r ChunkedResponseReader) read(mut buf []u8) ?int { - if r.bytes_left_in_chunk == 0 { - // An io.BufferedReader always returns none if its stream has - // ended. - r.bytes_left_in_chunk = r.read_chunk_size()? - } - - mut c := 0 - - // Make sure we don't read more than we can safely read. This is to avoid - // the underlying reader from becoming out of sync with our parsing: - if buf.len > r.bytes_left_in_chunk { - c = r.reader.read(mut buf[..r.bytes_left_in_chunk])? - } else { - c = r.reader.read(mut buf)? - } - - r.bytes_left_in_chunk -= u64(c) - - return c -} - -// read_chunk_size advances the reader & reads the size of the next HTTP chunk. -// This function should only be called if the previous chunk has been -// completely consumed. -fn (mut r ChunkedResponseReader) read_chunk_size() ?u64 { - if r.started { - mut buf := []u8{len: 2} - - // Each chunk ends with a `\r\n` which we want to skip first - r.reader.read(mut buf)? - } - - r.started = true - - mut res := []u8{} - util.read_until_separator(mut r.reader, mut res, http_chunk_separator)? - - // The length of the next chunk is provided as a hexadecimal - mut num_data := hex.decode(res#[..-2].bytestr())? - - for num_data.len < 8 { - num_data.insert(0, 0) - } - - num := binary.big_endian_u64(num_data) - - // This only occurs for the very last chunk, which always reports a size of - // 0. - if num == 0 { - return none - } - - return num -} - -// StreamFormatReader parses an underlying stream of Docker logs, removing the -// header bytes. -struct StreamFormatReader { -mut: - reader ChunkedResponseReader - bytes_left_in_chunk u32 -} - -// new_stream_format_reader creates a new StreamFormatReader using the given -// reader. -pub fn new_stream_format_reader(reader ChunkedResponseReader) &StreamFormatReader { - r := &StreamFormatReader{ - reader: reader - } - - return r -} - -// read satisfies the io.Reader interface. -pub fn (mut r StreamFormatReader) read(mut buf []u8) ?int { - if r.bytes_left_in_chunk == 0 { - r.bytes_left_in_chunk = r.read_chunk_size()? - } - - mut c := 0 - - if buf.len > r.bytes_left_in_chunk { - c = r.reader.read(mut buf[..r.bytes_left_in_chunk])? - } else { - c = r.reader.read(mut buf)? - } - - r.bytes_left_in_chunk -= u32(c) - - return c -} - -// read_chunk_size advances the reader & reads the header bytes for the length -// of the next chunk. -fn (mut r StreamFormatReader) read_chunk_size() ?u32 { - mut buf := []u8{len: 8} - - r.reader.read(mut buf)? - - num := binary.big_endian_u32(buf[4..]) - - if num == 0 { - return none - } - - return num -} diff --git a/src/v.mod b/src/v.mod index 7f45917..15e3518 100644 --- a/src/v.mod +++ b/src/v.mod @@ -1,5 +1,6 @@ Module { dependencies: [ - 'https://git.rustybever.be/vieter/vconf' + 'https://git.rustybever.be/vieter/vconf', + 'https://git.rustybever.be/vieter/vdocker' ] }