Merge branch 'v-0.3.2' into dev
ci/woodpecker/push/lint Pipeline failed Details

dev
Jef Roosens 2022-11-01 20:47:28 +01:00
commit db0818024e
Signed by: Jef Roosens
GPG Key ID: B75D4F293C7052DB
9 changed files with 113 additions and 107 deletions

View File

@ -1,13 +0,0 @@
branches:
exclude: [ main ]
platform: 'linux/amd64'
pipeline:
lint:
image: 'chewingbever/vlang:latest'
pull: true
commands:
- make lint
when:
event: [ push ]

View File

@ -0,0 +1,19 @@
branches:
exclude: [ main ]
platform: 'linux/amd64'
pipeline:
build:
image: 'git.rustybever.be/chewing_bever/vlang:0.3.2'
commands:
- make
when:
event: [ push ]
lint:
image: 'git.rustybever.be/chewing_bever/vlang:0.3.2'
commands:
- make lint
when:
event: [ push ]

View File

@ -13,10 +13,10 @@ pub struct ContainerListConfig {
filters map[string][]string filters map[string][]string
} }
pub fn (mut d DockerConn) container_list(c ContainerListConfig) ?[]ContainerListItem { pub fn (mut d DockerConn) container_list(c ContainerListConfig) ![]ContainerListItem {
d.get('/containers/json') d.get('/containers/json')
d.params(c) d.params(c)
d.send()? d.send()!
return d.read_json_response<[]ContainerListItem>() return d.read_json_response<[]ContainerListItem>()
} }
@ -36,28 +36,28 @@ pub:
warnings []string [json: Warnings] warnings []string [json: Warnings]
} }
pub fn (mut d DockerConn) container_create(c NewContainer) ?CreatedContainer { pub fn (mut d DockerConn) container_create(c NewContainer) !CreatedContainer {
d.send_request_with_json(Method.post, '/containers/create', c)? d.send_request_with_json(Method.post, '/containers/create', c)!
head, res := d.read_response()? head, res := d.read_response()!
if head.status_code != 201 { if head.status_code != 201 {
data := json.decode(DockerError, res)? data := json.decode(DockerError, res)!
return error(data.message) return error(data.message)
} }
data := json.decode(CreatedContainer, res)? data := json.decode(CreatedContainer, res)!
return data return data
} }
// start_container starts the container with the given id. // start_container starts the container with the given id.
pub fn (mut d DockerConn) container_start(id string) ? { pub fn (mut d DockerConn) container_start(id string) ! {
d.send_request(Method.post, '/containers/$id/start')? d.send_request(Method.post, '/containers/$id/start')!
head, body := d.read_response()? head, body := d.read_response()!
if head.status_code != 204 { if head.status_code != 204 {
data := json.decode(DockerError, body)? data := json.decode(DockerError, body)!
return error(data.message) return error(data.message)
} }
@ -81,47 +81,47 @@ pub mut:
end_time time.Time [skip] end_time time.Time [skip]
} }
pub fn (mut d DockerConn) container_inspect(id string) ?ContainerInspect { pub fn (mut d DockerConn) container_inspect(id string) !ContainerInspect {
d.send_request(Method.get, '/containers/$id/json')? d.send_request(Method.get, '/containers/$id/json')!
head, body := d.read_response()? head, body := d.read_response()!
if head.status_code != 200 { if head.status_code != 200 {
data := json.decode(DockerError, body)? data := json.decode(DockerError, body)!
return error(data.message) return error(data.message)
} }
mut data := json.decode(ContainerInspect, body)? mut data := json.decode(ContainerInspect, body)!
// The Docker engine API *should* always return UTC time. // The Docker engine API *should* always return UTC time.
data.state.start_time = time.parse_rfc3339(data.state.start_time_str)? data.state.start_time = time.parse_rfc3339(data.state.start_time_str)!
if data.state.status == 'exited' { if data.state.status == 'exited' {
data.state.end_time = time.parse_rfc3339(data.state.end_time_str)? data.state.end_time = time.parse_rfc3339(data.state.end_time_str)!
} }
return data return data
} }
pub fn (mut d DockerConn) container_remove(id string) ? { pub fn (mut d DockerConn) container_remove(id string) ! {
d.send_request(Method.delete, '/containers/$id')? d.send_request(Method.delete, '/containers/$id')!
head, body := d.read_response()? head, body := d.read_response()!
if head.status_code != 204 { if head.status_code != 204 {
data := json.decode(DockerError, body)? data := json.decode(DockerError, body)!
return error(data.message) return error(data.message)
} }
} }
pub fn (mut d DockerConn) container_get_logs(id string) ?&StreamFormatReader { pub fn (mut d DockerConn) container_get_logs(id string) !&StreamFormatReader {
d.send_request(Method.get, '/containers/$id/logs?stdout=true&stderr=true')? d.send_request(Method.get, '/containers/$id/logs?stdout=true&stderr=true')!
head := d.read_response_head()? head := d.read_response_head()!
if head.status_code != 200 { if head.status_code != 200 {
content_length := head.header.get(http.CommonHeader.content_length)?.int() content_length := head.header.get(http.CommonHeader.content_length)!.int()
body := d.read_response_body(content_length)? body := d.read_response_body(content_length)!
data := json.decode(DockerError, body)? data := json.decode(DockerError, body)!
return error(data.message) return error(data.message)
} }

View File

@ -31,8 +31,8 @@ mut:
} }
// new_conn creates a new connection to the Docker daemon. // new_conn creates a new connection to the Docker daemon.
pub fn new_conn() ?&DockerConn { pub fn new_conn() !&DockerConn {
s := unix.connect_stream(docker.socket)? s := unix.connect_stream(docker.socket)!
d := &DockerConn{ d := &DockerConn{
socket: s socket: s
@ -43,27 +43,27 @@ pub fn new_conn() ?&DockerConn {
} }
// close closes the underlying socket connection. // close closes the underlying socket connection.
pub fn (mut d DockerConn) close() ? { pub fn (mut d DockerConn) close() ! {
d.socket.close()? d.socket.close()!
} }
// send_request sends an HTTP request without body. // send_request sends an HTTP request without body.
fn (mut d DockerConn) send_request(method http.Method, url_str string) ? { fn (mut d DockerConn) send_request(method http.Method, url_str string) ! {
url := urllib.parse('/$docker.api_version$url_str')? url := urllib.parse('/$docker.api_version$url_str')!
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n' req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\n\n'
d.socket.write_string(req)? d.socket.write_string(req)!
// When starting a new request, the reader needs to be reset. // When starting a new request, the reader needs to be reset.
d.reader = io.new_buffered_reader(reader: d.socket) d.reader = io.new_buffered_reader(reader: d.socket)
} }
// send_request_with_body sends an HTTP request with the given body. // send_request_with_body sends an HTTP request with the given body.
fn (mut d DockerConn) send_request_with_body(method http.Method, url_str string, content_type string, body string) ? { fn (mut d DockerConn) send_request_with_body(method http.Method, url_str string, content_type string, body string) ! {
url := urllib.parse('/$docker.api_version$url_str')? url := urllib.parse('/$docker.api_version$url_str')!
req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n' req := '$method $url.request_uri() HTTP/1.1\nHost: localhost\nContent-Type: $content_type\nContent-Length: $body.len\n\n$body\n\n'
d.socket.write_string(req)? d.socket.write_string(req)!
// When starting a new request, the reader needs to be reset. // When starting a new request, the reader needs to be reset.
d.reader = io.new_buffered_reader(reader: d.socket) d.reader = io.new_buffered_reader(reader: d.socket)
@ -71,7 +71,7 @@ fn (mut d DockerConn) send_request_with_body(method http.Method, url_str string,
// send_request_with_json<T> is a convenience wrapper around // send_request_with_json<T> is a convenience wrapper around
// send_request_with_body that encodes the input as JSON. // send_request_with_body that encodes the input as JSON.
fn (mut d DockerConn) send_request_with_json<T>(method http.Method, url_str string, data &T) ? { fn (mut d DockerConn) send_request_with_json<T>(method http.Method, url_str string, data &T) ! {
body := json.encode(data) body := json.encode(data)
return d.send_request_with_body(method, url_str, 'application/json', body) return d.send_request_with_body(method, url_str, 'application/json', body)
@ -81,17 +81,17 @@ fn (mut d DockerConn) send_request_with_json<T>(method http.Method, url_str stri
// '\r\n\r\n', after which it parses the response as an HTTP response. // '\r\n\r\n', after which it parses the response as an HTTP response.
// Importantly, this function never consumes the reader past the HTTP // Importantly, this function never consumes the reader past the HTTP
// separator, so the body can be read fully later on. // separator, so the body can be read fully later on.
fn (mut d DockerConn) read_response_head() ?http.Response { fn (mut d DockerConn) read_response_head() !http.Response {
mut res := []u8{} mut res := []u8{}
util.read_until_separator(mut d.reader, mut res, docker.http_separator)? util.read_until_separator(mut d.reader, mut res, docker.http_separator)!
return http.parse_response(res.bytestr()) return http.parse_response(res.bytestr())
} }
// read_response_body reads `length` bytes from the stream. It can be used when // read_response_body reads `length` bytes from the stream. It can be used when
// the response encoding isn't chunked to fully read it. // the response encoding isn't chunked to fully read it.
fn (mut d DockerConn) read_response_body(length int) ?string { fn (mut d DockerConn) read_response_body(length int) !string {
if length == 0 { if length == 0 {
return '' return ''
} }
@ -103,7 +103,7 @@ fn (mut d DockerConn) read_response_body(length int) ?string {
for builder.len < length { for builder.len < length {
c = d.reader.read(mut buf) or { break } c = d.reader.read(mut buf) or { break }
builder.write(buf[..c])? builder.write(buf[..c])!
} }
return builder.str() return builder.str()
@ -112,34 +112,34 @@ fn (mut d DockerConn) read_response_body(length int) ?string {
// read_response is a convenience function which always consumes the entire // read_response is a convenience function which always consumes the entire
// response & returns it. It should only be used when we're certain that the // response & returns it. It should only be used when we're certain that the
// result isn't too large. // result isn't too large.
fn (mut d DockerConn) read_response() ?(http.Response, string) { fn (mut d DockerConn) read_response() !(http.Response, string) {
head := d.read_response_head()? head := d.read_response_head()!
if head.header.get(http.CommonHeader.transfer_encoding) or { '' } == 'chunked' { if head.header.get(http.CommonHeader.transfer_encoding) or { '' } == 'chunked' {
mut builder := strings.new_builder(1024) mut builder := strings.new_builder(1024)
mut body := d.get_chunked_response_reader() mut body := d.get_chunked_response_reader()
util.reader_to_writer(mut body, mut builder)? util.reader_to_writer(mut body, mut builder)!
return head, builder.str() return head, builder.str()
} }
content_length := head.header.get(http.CommonHeader.content_length)?.int() content_length := head.header.get(http.CommonHeader.content_length)!.int()
res := d.read_response_body(content_length)? res := d.read_response_body(content_length)!
return head, res return head, res
} }
fn (mut d DockerConn) read_json_response<T>() ?T { fn (mut d DockerConn) read_json_response<T>() !T {
head, body := d.read_response()? head, body := d.read_response()!
if head.status_code < 200 || head.status_code > 300 { if head.status_code < 200 || head.status_code > 300 {
data := json.decode(DockerError, body)? data := json.decode(DockerError, body)!
return docker_error(head.status_code, data.message) return docker_error(head.status_code, data.message)
} }
mut data := json.decode(T, body)? mut data := json.decode(T, body)!
//$for field in T.fields { //$for field in T.fields {
//$if field.typ is time.Time { //$if field.typ is time.Time {

View File

@ -9,14 +9,14 @@ pub:
} }
// pull_image pulls the given image:tag. // pull_image pulls the given image:tag.
pub fn (mut d DockerConn) pull_image(image string, tag string) ? { pub fn (mut d DockerConn) pull_image(image string, tag string) ! {
d.send_request(Method.post, '/images/create?fromImage=$image&tag=$tag')? d.send_request(Method.post, '/images/create?fromImage=$image&tag=$tag')!
head := d.read_response_head()? head := d.read_response_head()!
if head.status_code != 200 { if head.status_code != 200 {
content_length := head.header.get(http.CommonHeader.content_length)?.int() content_length := head.header.get(http.CommonHeader.content_length)!.int()
body := d.read_response_body(content_length)? body := d.read_response_body(content_length)!
data := json.decode(DockerError, body)? data := json.decode(DockerError, body)!
return error(data.message) return error(data.message)
} }
@ -32,28 +32,28 @@ pub fn (mut d DockerConn) pull_image(image string, tag string) ? {
} }
// create_image_from_container creates a new image from a container. // create_image_from_container creates a new image from a container.
pub fn (mut d DockerConn) create_image_from_container(id string, repo string, tag string) ?Image { pub fn (mut d DockerConn) create_image_from_container(id string, repo string, tag string) !Image {
d.send_request(Method.post, '/commit?container=$id&repo=$repo&tag=$tag')? d.send_request(Method.post, '/commit?container=$id&repo=$repo&tag=$tag')!
head, body := d.read_response()? head, body := d.read_response()!
if head.status_code != 201 { if head.status_code != 201 {
data := json.decode(DockerError, body)? data := json.decode(DockerError, body)!
return error(data.message) return error(data.message)
} }
data := json.decode(Image, body)? data := json.decode(Image, body)!
return data return data
} }
// remove_image removes the image with the given id. // remove_image removes the image with the given id.
pub fn (mut d DockerConn) remove_image(id string) ? { pub fn (mut d DockerConn) remove_image(id string) ! {
d.send_request(Method.delete, '/images/$id')? d.send_request(Method.delete, '/images/$id')!
head, body := d.read_response()? head, body := d.read_response()!
if head.status_code != 200 { if head.status_code != 200 {
data := json.decode(DockerError, body)? data := json.decode(DockerError, body)!
return error(data.message) return error(data.message)
} }

View File

@ -26,7 +26,7 @@ fn (mut d DockerConn) params<T>(o T) {
} }
} }
fn (mut d DockerConn) send() ? { fn (mut d DockerConn) send() ! {
mut full_url := d.url mut full_url := d.url
if d.params.len > 0 { if d.params.len > 0 {
@ -35,7 +35,7 @@ fn (mut d DockerConn) send() ? {
} }
// This is to make sure we actually created a valid URL // This is to make sure we actually created a valid URL
parsed_url := urllib.parse(full_url)? parsed_url := urllib.parse(full_url)!
final_url := parsed_url.request_uri() final_url := parsed_url.request_uri()
req := if d.body == '' { req := if d.body == '' {
@ -44,7 +44,7 @@ fn (mut d DockerConn) send() ? {
'$d.method $final_url HTTP/1.1\nHost: localhost\nContent-Type: $d.content_type\nContent-Length: $d.body.len\n\n$d.body\n\n' '$d.method $final_url HTTP/1.1\nHost: localhost\nContent-Type: $d.content_type\nContent-Length: $d.body.len\n\n$d.body\n\n'
} }
d.socket.write_string(req)? d.socket.write_string(req)!
// When starting a new request, the reader needs to be reset. // When starting a new request, the reader needs to be reset.
d.reader = io.new_buffered_reader(reader: d.socket) d.reader = io.new_buffered_reader(reader: d.socket)

View File

@ -25,11 +25,11 @@ pub fn new_chunked_response_reader(reader io.BufferedReader) &ChunkedResponseRea
} }
// read satisfies the io.Reader interface. // read satisfies the io.Reader interface.
pub fn (mut r ChunkedResponseReader) read(mut buf []u8) ?int { pub fn (mut r ChunkedResponseReader) read(mut buf []u8) !int {
if r.bytes_left_in_chunk == 0 { if r.bytes_left_in_chunk == 0 {
// An io.BufferedReader always returns none if its stream has // An io.BufferedReader always returns none if its stream has
// ended. // ended.
r.bytes_left_in_chunk = r.read_chunk_size()? r.bytes_left_in_chunk = r.read_chunk_size()!
} }
mut c := 0 mut c := 0
@ -37,9 +37,9 @@ pub fn (mut r ChunkedResponseReader) read(mut buf []u8) ?int {
// Make sure we don't read more than we can safely read. This is to avoid // Make sure we don't read more than we can safely read. This is to avoid
// the underlying reader from becoming out of sync with our parsing: // the underlying reader from becoming out of sync with our parsing:
if buf.len > r.bytes_left_in_chunk { if buf.len > r.bytes_left_in_chunk {
c = r.reader.read(mut buf[..r.bytes_left_in_chunk])? c = r.reader.read(mut buf[..r.bytes_left_in_chunk])!
} else { } else {
c = r.reader.read(mut buf)? c = r.reader.read(mut buf)!
} }
r.bytes_left_in_chunk -= u64(c) r.bytes_left_in_chunk -= u64(c)
@ -50,21 +50,21 @@ pub fn (mut r ChunkedResponseReader) read(mut buf []u8) ?int {
// read_chunk_size advances the reader & reads the size of the next HTTP chunk. // read_chunk_size advances the reader & reads the size of the next HTTP chunk.
// This function should only be called if the previous chunk has been // This function should only be called if the previous chunk has been
// completely consumed. // completely consumed.
fn (mut r ChunkedResponseReader) read_chunk_size() ?u64 { fn (mut r ChunkedResponseReader) read_chunk_size() !u64 {
if r.started { if r.started {
mut buf := []u8{len: 2} mut buf := []u8{len: 2}
// Each chunk ends with a `\r\n` which we want to skip first // Each chunk ends with a `\r\n` which we want to skip first
r.reader.read(mut buf)? r.reader.read(mut buf)!
} }
r.started = true r.started = true
mut res := []u8{} mut res := []u8{}
util.read_until_separator(mut r.reader, mut res, http_chunk_separator)? util.read_until_separator(mut r.reader, mut res, http_chunk_separator)!
// The length of the next chunk is provided as a hexadecimal // The length of the next chunk is provided as a hexadecimal
mut num_data := hex.decode(res#[..-2].bytestr())? mut num_data := hex.decode(res#[..-2].bytestr())!
for num_data.len < 8 { for num_data.len < 8 {
num_data.insert(0, 0) num_data.insert(0, 0)
@ -75,7 +75,7 @@ fn (mut r ChunkedResponseReader) read_chunk_size() ?u64 {
// This only occurs for the very last chunk, which always reports a size of // This only occurs for the very last chunk, which always reports a size of
// 0. // 0.
if num == 0 { if num == 0 {
return none return error('end of stream')
} }
return num return num
@ -100,17 +100,17 @@ pub fn new_stream_format_reader(reader ChunkedResponseReader) &StreamFormatReade
} }
// read satisfies the io.Reader interface. // read satisfies the io.Reader interface.
pub fn (mut r StreamFormatReader) read(mut buf []u8) ?int { pub fn (mut r StreamFormatReader) read(mut buf []u8) !int {
if r.bytes_left_in_chunk == 0 { if r.bytes_left_in_chunk == 0 {
r.bytes_left_in_chunk = r.read_chunk_size()? r.bytes_left_in_chunk = r.read_chunk_size()!
} }
mut c := 0 mut c := 0
if buf.len > r.bytes_left_in_chunk { if buf.len > r.bytes_left_in_chunk {
c = r.reader.read(mut buf[..r.bytes_left_in_chunk])? c = r.reader.read(mut buf[..r.bytes_left_in_chunk])!
} else { } else {
c = r.reader.read(mut buf)? c = r.reader.read(mut buf)!
} }
r.bytes_left_in_chunk -= u32(c) r.bytes_left_in_chunk -= u32(c)
@ -120,15 +120,15 @@ pub fn (mut r StreamFormatReader) read(mut buf []u8) ?int {
// read_chunk_size advances the reader & reads the header bytes for the length // read_chunk_size advances the reader & reads the header bytes for the length
// of the next chunk. // of the next chunk.
fn (mut r StreamFormatReader) read_chunk_size() ?u32 { fn (mut r StreamFormatReader) read_chunk_size() !u32 {
mut buf := []u8{len: 8} mut buf := []u8{len: 8}
r.reader.read(mut buf)? r.reader.read(mut buf)!
num := binary.big_endian_u32(buf[4..]) num := binary.big_endian_u32(buf[4..])
if num == 0 { if num == 0 {
return none return error('end of stream')
} }
return num return num

View File

@ -3,7 +3,7 @@ module util
import io import io
// reader_to_writer tries to consume the entire reader & write it to the writer. // reader_to_writer tries to consume the entire reader & write it to the writer.
pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ? { pub fn reader_to_writer(mut reader io.Reader, mut writer io.Writer) ! {
mut buf := []u8{len: 10 * 1024} mut buf := []u8{len: 10 * 1024}
for { for {
@ -39,11 +39,11 @@ pub fn match_array_in_array<T>(a1 []T, a2 []T) int {
// read_until_separator consumes an io.Reader until it encounters some // read_until_separator consumes an io.Reader until it encounters some
// separator array. The data read is stored inside the provided res array. // separator array. The data read is stored inside the provided res array.
pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ? { pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ! {
mut buf := []u8{len: sep.len} mut buf := []u8{len: sep.len}
for { for {
c := reader.read(mut buf)? c := reader.read(mut buf)!
res << buf[..c] res << buf[..c]
match_len := match_array_in_array(buf[..c], sep) match_len := match_array_in_array(buf[..c], sep)
@ -54,7 +54,7 @@ pub fn read_until_separator(mut reader io.Reader, mut res []u8, sep []u8) ? {
if match_len > 0 { if match_len > 0 {
match_left := sep.len - match_len match_left := sep.len - match_len
c2 := reader.read(mut buf[..match_left])? c2 := reader.read(mut buf[..match_left])!
res << buf[..c2] res << buf[..c2]
if buf[..c2] == sep[match_len..] { if buf[..c2] == sep[match_len..] {

View File

@ -35,13 +35,13 @@ struct VolumeListResponse {
warnings []string [json: Warnings] warnings []string [json: Warnings]
} }
pub fn (mut d DockerConn) volume_list() ?VolumeListResponse { pub fn (mut d DockerConn) volume_list() !VolumeListResponse {
d.send_request(Method.get, '/volumes')? d.send_request(Method.get, '/volumes')!
mut data := d.read_json_response<VolumeListResponse>()? mut data := d.read_json_response<VolumeListResponse>()!
for mut vol in data.volumes { for mut vol in data.volumes {
vol.created_at = time.parse_rfc3339(vol.created_at_str)? vol.created_at = time.parse_rfc3339(vol.created_at_str)!
} }
return data return data