commit 21db1050fafe456cc81766a075a54a9124cbfbef Author: Chewing_Bever Date: Sat Jun 18 17:59:22 2022 +0200 Moved over code from Vieter diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..140f8cf --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*.so diff --git a/README.md b/README.md new file mode 100644 index 0000000..5db18f6 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# Docker + +V library for interacting with the Docker HTTP API. diff --git a/containers.v b/containers.v new file mode 100644 index 0000000..8fbf027 --- /dev/null +++ b/containers.v @@ -0,0 +1,123 @@ +module docker + +import json +import net.urllib +import time +import net.http { Method } + +struct DockerError { + message string +} + +pub struct NewContainer { + image string [json: Image] + entrypoint []string [json: Entrypoint] + cmd []string [json: Cmd] + env []string [json: Env] + work_dir string [json: WorkingDir] + user string [json: User] +} + +struct CreatedContainer { +pub: + id string [json: Id] + warnings []string [json: Warnings] +} + +// create_container creates a new container with the given config. +pub fn (mut d DockerConn) create_container(c NewContainer) ?CreatedContainer { + d.send_request_with_json(Method.post, urllib.parse('/v1.41/containers/create')?, c)? + head, res := d.read_response()? + + if head.status_code != 201 { + data := json.decode(DockerError, res)? + + return error(data.message) + } + + data := json.decode(CreatedContainer, res)? + + return data +} + +// start_container starts the container with the given id. +pub fn (mut d DockerConn) start_container(id string) ? { + d.send_request(Method.post, urllib.parse('/v1.41/containers/$id/start')?)? + head, body := d.read_response()? + + if head.status_code != 204 { + data := json.decode(DockerError, body)? + + return error(data.message) + } +} + +struct ContainerInspect { +pub mut: + state ContainerState [json: State] +} + +struct ContainerState { +pub: + running bool [json: Running] + status string [json: Status] + exit_code int [json: ExitCode] + // These use a rather specific format so they have to be parsed later + start_time_str string [json: StartedAt] + end_time_str string [json: FinishedAt] +pub mut: + start_time time.Time [skip] + end_time time.Time [skip] +} + +// inspect_container returns detailed information for a given container. +pub fn (mut d DockerConn) inspect_container(id string) ?ContainerInspect { + d.send_request(Method.get, urllib.parse('/v1.41/containers/$id/json')?)? + head, body := d.read_response()? + + if head.status_code != 200 { + data := json.decode(DockerError, body)? + + return error(data.message) + } + + mut data := json.decode(ContainerInspect, body)? + + // The Docker engine API *should* always return UTC time. + data.state.start_time = time.parse_rfc3339(data.state.start_time_str)? + + if data.state.status == 'exited' { + data.state.end_time = time.parse_rfc3339(data.state.end_time_str)? + } + + return data +} + +// remove_container removes the container with the given id. +pub fn (mut d DockerConn) remove_container(id string) ? { + d.send_request(Method.delete, urllib.parse('/v1.41/containers/$id')?)? + head, body := d.read_response()? + + if head.status_code != 204 { + data := json.decode(DockerError, body)? + + return error(data.message) + } +} + +// get_container_logs returns a reader object allowing access to the +// container's logs. +pub fn (mut d DockerConn) get_container_logs(id string) ?&StreamFormatReader { + d.send_request(Method.get, urllib.parse('/v1.41/containers/$id/logs?stdout=true&stderr=true')?)? + head := d.read_response_head()? + + if head.status_code != 200 { + content_length := head.header.get(http.CommonHeader.content_length)?.int() + body := d.read_response_body(content_length)? + data := json.decode(DockerError, body)? + + return error(data.message) + } + + return d.get_stream_format_reader() +} diff --git a/docker.v b/docker.v new file mode 100644 index 0000000..ccc6bed --- /dev/null +++ b/docker.v @@ -0,0 +1,137 @@ +module docker + +import net.unix +import io +import net.http +import strings +import net.urllib +import json +import util + +const ( + socket = '/var/run/docker.sock' + buf_len = 10 * 1024 + http_separator = [u8(`\r`), `\n`, `\r`, `\n`] + http_chunk_separator = [u8(`\r`), `\n`] +) + +pub struct DockerConn { +mut: + socket &unix.StreamConn + reader &io.BufferedReader +} + +// new_conn creates a new connection to the Docker daemon. +pub fn new_conn() ?&DockerConn { + s := unix.connect_stream(docker.socket)? + + d := &DockerConn{ + socket: s + reader: io.new_buffered_reader(reader: s) + } + + return d +} + +// close closes the underlying socket connection. +pub fn (mut d DockerConn) close() ? { + d.socket.close()? +} + +// send_request sends an HTTP request without body. +pub fn (mut d DockerConn) send_request(method http.Method, url urllib.URL) ? { + req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n' + + d.socket.write_string(req)? + + // When starting a new request, the reader needs to be reset. + d.reader = io.new_buffered_reader(reader: d.socket) +} + +// send_request_with_body sends an HTTP request with the given body. +pub fn (mut d DockerConn) send_request_with_body(method http.Method, url urllib.URL, content_type string, body string) ? { + req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n' + + d.socket.write_string(req)? + + // When starting a new request, the reader needs to be reset. + d.reader = io.new_buffered_reader(reader: d.socket) +} + +// send_request_with_json is a convenience wrapper around +// send_request_with_body that encodes the input as JSON. +pub fn (mut d DockerConn) send_request_with_json(method http.Method, url urllib.URL, data &T) ? { + body := json.encode(data) + + return d.send_request_with_body(method, url, 'application/json', body) +} + +// read_response_head consumes the socket's contents until it encounters +// '\r\n\r\n', after which it parses the response as an HTTP response. +// Importantly, this function never consumes the reader past the HTTP +// separator, so the body can be read fully later on. +pub fn (mut d DockerConn) read_response_head() ?http.Response { + mut res := []u8{} + + util.read_until_separator(mut d.reader, mut res, docker.http_separator)? + + return http.parse_response(res.bytestr()) +} + +// read_response_body reads `length` bytes from the stream. It can be used when +// the response encoding isn't chunked to fully read it. +pub fn (mut d DockerConn) read_response_body(length int) ?string { + if length == 0 { + return '' + } + + mut buf := []u8{len: docker.buf_len} + mut c := 0 + mut builder := strings.new_builder(docker.buf_len) + + for builder.len < length { + c = d.reader.read(mut buf) or { break } + + builder.write(buf[..c])? + } + + return builder.str() +} + +// read_response is a convenience function which always consumes the entire +// response & returns it. It should only be used when we're certain that the +// result isn't too large. +pub fn (mut d DockerConn) read_response() ?(http.Response, string) { + head := d.read_response_head()? + + if head.header.get(http.CommonHeader.transfer_encoding) or { '' } == 'chunked' { + mut builder := strings.new_builder(1024) + mut body := d.get_chunked_response_reader() + + util.reader_to_writer(mut body, mut builder)? + + return head, builder.str() + } + + content_length := head.header.get(http.CommonHeader.content_length)?.int() + res := d.read_response_body(content_length)? + + return head, res +} + +// get_chunked_response_reader returns a ChunkedResponseReader using the socket +// as reader. +pub fn (mut d DockerConn) get_chunked_response_reader() &ChunkedResponseReader { + r := new_chunked_response_reader(d.reader) + + return r +} + +// get_stream_format_reader returns a StreamFormatReader using the socket as +// reader. +pub fn (mut d DockerConn) get_stream_format_reader() &StreamFormatReader { + r := new_chunked_response_reader(d.reader) + r2 := new_stream_format_reader(r) + + return r2 +} diff --git a/images.v b/images.v new file mode 100644 index 0000000..6161565 --- /dev/null +++ b/images.v @@ -0,0 +1,61 @@ +module docker + +import net.http { Method } +import net.urllib +import json + +struct Image { +pub: + id string [json: Id] +} + +// pull_image pulls the given image:tag. +pub fn (mut d DockerConn) pull_image(image string, tag string) ? { + d.send_request(Method.post, urllib.parse('/v1.41/images/create?fromImage=$image&tag=$tag')?)? + head := d.read_response_head()? + + if head.status_code != 200 { + content_length := head.header.get(http.CommonHeader.content_length)?.int() + body := d.read_response_body(content_length)? + data := json.decode(DockerError, body)? + + return error(data.message) + } + + // Keep reading the body until the pull has completed + mut body := d.get_chunked_response_reader() + + mut buf := []u8{len: 1024} + + for { + body.read(mut buf) or { break } + } +} + +// create_image_from_container creates a new image from a container. +pub fn (mut d DockerConn) create_image_from_container(id string, repo string, tag string) ?Image { + d.send_request(Method.post, urllib.parse('/v1.41/commit?container=$id&repo=$repo&tag=$tag')?)? + head, body := d.read_response()? + + if head.status_code != 201 { + data := json.decode(DockerError, body)? + + return error(data.message) + } + + data := json.decode(Image, body)? + + return data +} + +// remove_image removes the image with the given id. +pub fn (mut d DockerConn) remove_image(id string) ? { + d.send_request(Method.delete, urllib.parse('/v1.41/images/$id')?)? + head, body := d.read_response()? + + if head.status_code != 200 { + data := json.decode(DockerError, body)? + + return error(data.message) + } +} diff --git a/stream.v b/stream.v new file mode 100644 index 0000000..001f4b3 --- /dev/null +++ b/stream.v @@ -0,0 +1,135 @@ +module docker + +import io +import util +import encoding.binary +import encoding.hex + +// ChunkedResponseReader parses an underlying HTTP chunked response, exposing +// it as if it was a continuous stream of data. +struct ChunkedResponseReader { +mut: + reader io.BufferedReader + bytes_left_in_chunk u64 + started bool +} + +// new_chunked_response_reader creates a new ChunkedResponseReader on the heap +// with the provided reader. +pub fn new_chunked_response_reader(reader io.BufferedReader) &ChunkedResponseReader { + r := &ChunkedResponseReader{ + reader: reader + } + + return r +} + +// read satisfies the io.Reader interface. +pub fn (mut r ChunkedResponseReader) read(mut buf []u8) ?int { + if r.bytes_left_in_chunk == 0 { + // An io.BufferedReader always returns none if its stream has + // ended. + r.bytes_left_in_chunk = r.read_chunk_size()? + } + + mut c := 0 + + // Make sure we don't read more than we can safely read. This is to avoid + // the underlying reader from becoming out of sync with our parsing: + if buf.len > r.bytes_left_in_chunk { + c = r.reader.read(mut buf[..r.bytes_left_in_chunk])? + } else { + c = r.reader.read(mut buf)? + } + + r.bytes_left_in_chunk -= u64(c) + + return c +} + +// read_chunk_size advances the reader & reads the size of the next HTTP chunk. +// This function should only be called if the previous chunk has been +// completely consumed. +fn (mut r ChunkedResponseReader) read_chunk_size() ?u64 { + if r.started { + mut buf := []u8{len: 2} + + // Each chunk ends with a `\r\n` which we want to skip first + r.reader.read(mut buf)? + } + + r.started = true + + mut res := []u8{} + util.read_until_separator(mut r.reader, mut res, http_chunk_separator)? + + // The length of the next chunk is provided as a hexadecimal + mut num_data := hex.decode(res#[..-2].bytestr())? + + for num_data.len < 8 { + num_data.insert(0, 0) + } + + num := binary.big_endian_u64(num_data) + + // This only occurs for the very last chunk, which always reports a size of + // 0. + if num == 0 { + return none + } + + return num +} + +// StreamFormatReader parses an underlying stream of Docker logs, removing the +// header bytes. +struct StreamFormatReader { +mut: + reader ChunkedResponseReader + bytes_left_in_chunk u32 +} + +// new_stream_format_reader creates a new StreamFormatReader using the given +// reader. +pub fn new_stream_format_reader(reader ChunkedResponseReader) &StreamFormatReader { + r := &StreamFormatReader{ + reader: reader + } + + return r +} + +// read satisfies the io.Reader interface. +pub fn (mut r StreamFormatReader) read(mut buf []u8) ?int { + if r.bytes_left_in_chunk == 0 { + r.bytes_left_in_chunk = r.read_chunk_size()? + } + + mut c := 0 + + if buf.len > r.bytes_left_in_chunk { + c = r.reader.read(mut buf[..r.bytes_left_in_chunk])? + } else { + c = r.reader.read(mut buf)? + } + + r.bytes_left_in_chunk -= u32(c) + + return c +} + +// read_chunk_size advances the reader & reads the header bytes for the length +// of the next chunk. +fn (mut r StreamFormatReader) read_chunk_size() ?u32 { + mut buf := []u8{len: 8} + + r.reader.read(mut buf)? + + num := binary.big_endian_u32(buf[4..]) + + if num == 0 { + return none + } + + return num +} diff --git a/util/util.v b/util/util.v new file mode 100644 index 0000000..412e39c --- /dev/null +++ b/util/util.v @@ -0,0 +1,65 @@ +module util + +import io + +// reader_to_writer tries to consume the entire reader & write it to the writer. +pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ? { + mut buf := []u8{len: 10 * 1024} + + for { + bytes_read := reader.read(mut buf) or { break } + mut bytes_written := 0 + + for bytes_written < bytes_read { + c := writer.write(buf[bytes_written..bytes_read]) or { break } + + bytes_written += c + } + } +} + +// match_array_in_array returns how many elements of a2 overlap with a1. For +// example, if a1 = "abcd" & a2 = "cd", the result will be 2. If the match is +// not at the end of a1, the result is 0. +pub fn match_array_in_array(a1 []T, a2 []T) int { + mut i := 0 + mut match_len := 0 + + for i + match_len < a1.len { + if a1[i + match_len] == a2[match_len] { + match_len += 1 + } else { + i += match_len + 1 + match_len = 0 + } + } + + return match_len +} + +// read_until_separator consumes an io.Reader until it encounters some +// separator array. The data read is stored inside the provided res array. +pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ? { + mut buf := []u8{len: sep.len} + + for { + c := reader.read(mut buf)? + res << buf[..c] + + match_len := match_array_in_array(buf[..c], sep) + + if match_len == sep.len { + break + } + + if match_len > 0 { + match_left := sep.len - match_len + c2 := reader.read(mut buf[..match_left])? + res << buf[..c2] + + if buf[..c2] == sep[match_len..] { + break + } + } + } +} diff --git a/v.mod b/v.mod new file mode 100644 index 0000000..bc42cd7 --- /dev/null +++ b/v.mod @@ -0,0 +1,7 @@ +Module { + name: 'vdocker' + description: 'Library for interacting with the Docker HTTP API' + version: '0.0.0' + license: 'MIT' + dependencies: [] +}