all: replace []byte with []u8
parent
0527ac633e
commit
fb192d949b
|
@ -14,7 +14,7 @@ fn main() {
|
||||||
mut checksum := u64(0)
|
mut checksum := u64(0)
|
||||||
mut start_pos := 0
|
mut start_pos := 0
|
||||||
mut bgenerating := benchmark.start()
|
mut bgenerating := benchmark.start()
|
||||||
mut bytepile := []byte{}
|
mut bytepile := []u8{}
|
||||||
for _ in 0 .. sample_size * max_str_len {
|
for _ in 0 .. sample_size * max_str_len {
|
||||||
bytepile << u8(rand.int_in_range(40, 125) or { 40 })
|
bytepile << u8(rand.int_in_range(40, 125) or { 40 })
|
||||||
}
|
}
|
||||||
|
|
|
@ -2209,7 +2209,7 @@ fn (t Tree) array_node_int(nodes []int) &Node {
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (t Tree) array_node_u8(nodes []byte) &Node {
|
fn (t Tree) array_node_u8(nodes []u8) &Node {
|
||||||
mut arr := new_array()
|
mut arr := new_array()
|
||||||
for node in nodes {
|
for node in nodes {
|
||||||
arr.add_item(t.number_node(node))
|
arr.add_item(t.number_node(node))
|
||||||
|
|
|
@ -46,7 +46,7 @@ fn (context Context) footer() string {
|
||||||
return ')\n'
|
return ')\n'
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (context Context) file2v(bname string, fbytes []byte, bn_max int) string {
|
fn (context Context) file2v(bname string, fbytes []u8, bn_max int) string {
|
||||||
mut sb := strings.new_builder(1000)
|
mut sb := strings.new_builder(1000)
|
||||||
bn_diff_len := bn_max - bname.len
|
bn_diff_len := bn_max - bname.len
|
||||||
sb.write_string('\t${bname}_len' + ' '.repeat(bn_diff_len - 4) + ' = $fbytes.len\n')
|
sb.write_string('\t${bname}_len' + ' '.repeat(bn_diff_len - 4) + ' = $fbytes.len\n')
|
||||||
|
@ -73,7 +73,7 @@ fn (context Context) file2v(bname string, fbytes []byte, bn_max int) string {
|
||||||
return sb.str()
|
return sb.str()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (context Context) bname_and_bytes(file string) ?(string, []byte) {
|
fn (context Context) bname_and_bytes(file string) ?(string, []u8) {
|
||||||
fname := os.file_name(file)
|
fname := os.file_name(file)
|
||||||
fname_escaped := fname.replace_each(['.', '_', '-', '_'])
|
fname_escaped := fname.replace_each(['.', '_', '-', '_'])
|
||||||
byte_name := '$context.prefix$fname_escaped'.to_lower()
|
byte_name := '$context.prefix$fname_escaped'.to_lower()
|
||||||
|
@ -120,7 +120,7 @@ fn main() {
|
||||||
if context.write_file != '' && os.file_ext(context.write_file) !in ['.vv', '.v'] {
|
if context.write_file != '' && os.file_ext(context.write_file) !in ['.vv', '.v'] {
|
||||||
context.write_file += '.v'
|
context.write_file += '.v'
|
||||||
}
|
}
|
||||||
mut file_byte_map := map[string][]byte{}
|
mut file_byte_map := map[string][]u8{}
|
||||||
for file in real_files {
|
for file in real_files {
|
||||||
bname, fbytes := context.bname_and_bytes(file) or {
|
bname, fbytes := context.bname_and_bytes(file) or {
|
||||||
eprintln(err.msg())
|
eprintln(err.msg())
|
||||||
|
|
|
@ -9,7 +9,7 @@ fn main() {
|
||||||
println('------------------------------------------')
|
println('------------------------------------------')
|
||||||
is_server := '-l' in os.args
|
is_server := '-l' in os.args
|
||||||
port := cmdline.option(os.args, '-p', '40001').int()
|
port := cmdline.option(os.args, '-p', '40001').int()
|
||||||
mut buf := []byte{len: 100}
|
mut buf := []u8{len: 100}
|
||||||
if is_server {
|
if is_server {
|
||||||
println('UDP echo server, listening for udp packets on port: $port')
|
println('UDP echo server, listening for udp packets on port: $port')
|
||||||
mut c := net.listen_udp(':$port') ?
|
mut c := net.listen_udp(':$port') ?
|
||||||
|
|
|
@ -35,14 +35,14 @@ pub fn (s ImageSettings) to_grid_settings() sim.GridSettings {
|
||||||
pub struct PPMWriter {
|
pub struct PPMWriter {
|
||||||
mut:
|
mut:
|
||||||
file os.File
|
file os.File
|
||||||
cache []byte
|
cache []u8
|
||||||
cache_size int
|
cache_size int
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ppm_writer_for_fname(fname string, settings ImageSettings) ?&PPMWriter {
|
pub fn ppm_writer_for_fname(fname string, settings ImageSettings) ?&PPMWriter {
|
||||||
mut writer := &PPMWriter{
|
mut writer := &PPMWriter{
|
||||||
cache_size: settings.cache_size
|
cache_size: settings.cache_size
|
||||||
cache: []byte{cap: settings.cache_size}
|
cache: []u8{cap: settings.cache_size}
|
||||||
}
|
}
|
||||||
writer.start_for_file(fname, settings) ?
|
writer.start_for_file(fname, settings) ?
|
||||||
return writer
|
return writer
|
||||||
|
|
|
@ -23,10 +23,10 @@ pub fn read_lines_from_file(file_path string) []string {
|
||||||
return rows
|
return rows
|
||||||
}
|
}
|
||||||
|
|
||||||
// read a file as []byte
|
// read a file as []u8
|
||||||
pub fn read_bytes_from_file(file_path string) []byte {
|
pub fn read_bytes_from_file(file_path string) []u8 {
|
||||||
mut path := ''
|
mut path := ''
|
||||||
mut buffer := []byte{}
|
mut buffer := []u8{}
|
||||||
$if android {
|
$if android {
|
||||||
path = 'models/' + file_path
|
path = 'models/' + file_path
|
||||||
buffer = os.read_apk_asset(path) or {
|
buffer = os.read_apk_asset(path) or {
|
||||||
|
|
|
@ -215,9 +215,9 @@ fn (mut app App) read_bytes(path string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// read a file as []byte
|
// read a file as []u8
|
||||||
pub fn read_bytes_from_file(file_path string) []byte {
|
pub fn read_bytes_from_file(file_path string) []u8 {
|
||||||
mut buffer := []byte{}
|
mut buffer := []u8{}
|
||||||
buffer = os.read_bytes(file_path) or {
|
buffer = os.read_bytes(file_path) or {
|
||||||
eprintln('ERROR: Texure file: [$file_path] NOT FOUND.')
|
eprintln('ERROR: Texure file: [$file_path] NOT FOUND.')
|
||||||
exit(0)
|
exit(0)
|
||||||
|
|
|
@ -26,7 +26,7 @@ const (
|
||||||
|
|
||||||
// from_bytes converts a byte array into a bitfield.
|
// from_bytes converts a byte array into a bitfield.
|
||||||
// [0x0F, 0x01] => 0000 1111 0000 0001
|
// [0x0F, 0x01] => 0000 1111 0000 0001
|
||||||
pub fn from_bytes(input []byte) BitField {
|
pub fn from_bytes(input []u8) BitField {
|
||||||
mut output := new(input.len * 8)
|
mut output := new(input.len * 8)
|
||||||
for i, b in input {
|
for i, b in input {
|
||||||
mut ob := u8(0)
|
mut ob := u8(0)
|
||||||
|
@ -61,7 +61,7 @@ pub fn from_bytes(input []byte) BitField {
|
||||||
|
|
||||||
// from_bytes_lowest_bits_first converts a byte array into a bitfield
|
// from_bytes_lowest_bits_first converts a byte array into a bitfield
|
||||||
// [0x0F, 0x01] => 1111 0000 1000 0000
|
// [0x0F, 0x01] => 1111 0000 1000 0000
|
||||||
pub fn from_bytes_lowest_bits_first(input []byte) BitField {
|
pub fn from_bytes_lowest_bits_first(input []u8) BitField {
|
||||||
mut output := new(input.len * 8)
|
mut output := new(input.len * 8)
|
||||||
for i, b in input {
|
for i, b in input {
|
||||||
output.field[i / 4] |= u32(b) << ((i % 4) * 8)
|
output.field[i / 4] |= u32(b) << ((i % 4) * 8)
|
||||||
|
|
|
@ -839,7 +839,7 @@ pub fn (a []string) str() string {
|
||||||
|
|
||||||
// hex returns a string with the hexadecimal representation
|
// hex returns a string with the hexadecimal representation
|
||||||
// of the byte elements of the array.
|
// of the byte elements of the array.
|
||||||
pub fn (b []byte) hex() string {
|
pub fn (b []u8) hex() string {
|
||||||
mut hex := unsafe { malloc_noscan(b.len * 2 + 1) }
|
mut hex := unsafe { malloc_noscan(b.len * 2 + 1) }
|
||||||
mut dst_i := 0
|
mut dst_i := 0
|
||||||
for i in b {
|
for i in b {
|
||||||
|
@ -865,7 +865,7 @@ pub fn (b []byte) hex() string {
|
||||||
// Returns the number of elements copied.
|
// Returns the number of elements copied.
|
||||||
// NOTE: This is not an `array` method. It is a function that takes two arrays of bytes.
|
// NOTE: This is not an `array` method. It is a function that takes two arrays of bytes.
|
||||||
// See also: `arrays.copy`.
|
// See also: `arrays.copy`.
|
||||||
pub fn copy(mut dst []byte, src []byte) int {
|
pub fn copy(mut dst []u8, src []u8) int {
|
||||||
min := if dst.len < src.len { dst.len } else { src.len }
|
min := if dst.len < src.len { dst.len } else { src.len }
|
||||||
if min > 0 {
|
if min > 0 {
|
||||||
unsafe { vmemmove(&u8(dst.data), src.data, min) }
|
unsafe { vmemmove(&u8(dst.data), src.data, min) }
|
||||||
|
@ -913,10 +913,10 @@ pub fn (a array) pointers() []voidptr {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// vbytes on`voidptr` makes a V []byte structure from a C style memory buffer.
|
// vbytes on`voidptr` makes a V []u8 structure from a C style memory buffer.
|
||||||
// NOTE: the data is reused, NOT copied!
|
// NOTE: the data is reused, NOT copied!
|
||||||
[unsafe]
|
[unsafe]
|
||||||
pub fn (data voidptr) vbytes(len int) []byte {
|
pub fn (data voidptr) vbytes(len int) []u8 {
|
||||||
res := array{
|
res := array{
|
||||||
element_size: 1
|
element_size: 1
|
||||||
data: data
|
data: data
|
||||||
|
@ -926,9 +926,9 @@ pub fn (data voidptr) vbytes(len int) []byte {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// vbytes on `&byte` makes a V []byte structure from a C style memory buffer.
|
// vbytes on `&byte` makes a V []u8 structure from a C style memory buffer.
|
||||||
// NOTE: the data is reused, NOT copied!
|
// NOTE: the data is reused, NOT copied!
|
||||||
[unsafe]
|
[unsafe]
|
||||||
pub fn (data &byte) vbytes(len int) []byte {
|
pub fn (data &byte) vbytes(len int) []u8 {
|
||||||
return unsafe { voidptr(data).vbytes(len) }
|
return unsafe { voidptr(data).vbytes(len) }
|
||||||
}
|
}
|
||||||
|
|
|
@ -525,9 +525,9 @@ pub fn (c u8) is_capital() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// clone clones the byte array, and returns the newly created copy.
|
// clone clones the byte array, and returns the newly created copy.
|
||||||
pub fn (b []u8) clone() []byte {
|
pub fn (b []u8) clone() []u8 {
|
||||||
mut res := []byte{len: b.len}
|
mut res := []u8{len: b.len}
|
||||||
// mut res := make([]byte, {repeat:b.len})
|
// mut res := make([]u8, {repeat:b.len})
|
||||||
for i in 0 .. b.len {
|
for i in 0 .. b.len {
|
||||||
res[i] = b[i]
|
res[i] = b[i]
|
||||||
}
|
}
|
||||||
|
|
|
@ -373,7 +373,7 @@ pub fn (mut a array) delete_last() {
|
||||||
pub fn (a &array) free() {
|
pub fn (a &array) free() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: once (a []byte) will work rewrite this
|
// todo: once (a []u8) will work rewrite this
|
||||||
pub fn (a array) bytestr() string {
|
pub fn (a array) bytestr() string {
|
||||||
res := ''
|
res := ''
|
||||||
#for (let i = 0;i < a.arr.len.valueOf();i++) res.str += String.fromCharCode(a.arr.get(new int(i)))
|
#for (let i = 0;i < a.arr.len.valueOf();i++) res.str += String.fromCharCode(a.arr.get(new int(i)))
|
||||||
|
@ -487,7 +487,7 @@ pub interface JS.Float64Array {
|
||||||
every(JS.EveryFn) JS.Boolean
|
every(JS.EveryFn) JS.Boolean
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn uint8_array(arr []byte) JS.Uint8Array {
|
pub fn uint8_array(arr []u8) JS.Uint8Array {
|
||||||
#let tmp = new Array();
|
#let tmp = new Array();
|
||||||
|
|
||||||
for elem in arr {
|
for elem in arr {
|
||||||
|
|
|
@ -142,7 +142,7 @@ pub fn (x byte) hex() string {
|
||||||
|
|
||||||
// hex returns a string with the hexadecimal representation
|
// hex returns a string with the hexadecimal representation
|
||||||
// of the byte elements of the array.
|
// of the byte elements of the array.
|
||||||
pub fn (b []byte) hex() string {
|
pub fn (b []u8) hex() string {
|
||||||
mut hex := ''
|
mut hex := ''
|
||||||
for i in b {
|
for i in b {
|
||||||
mut z := i
|
mut z := i
|
||||||
|
|
|
@ -85,13 +85,13 @@ pub fn (s string) split(dot string) []string {
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (s string) bytes() []byte {
|
pub fn (s string) bytes() []u8 {
|
||||||
sep := ''
|
sep := ''
|
||||||
tmparr := s.str.split(sep.str).map(fn (it JS.Any) JS.Any {
|
tmparr := s.str.split(sep.str).map(fn (it JS.Any) JS.Any {
|
||||||
return JS.Any(u8(JS.String(it).charCodeAt(JS.Number(0))))
|
return JS.Any(u8(JS.String(it).charCodeAt(JS.Number(0))))
|
||||||
})
|
})
|
||||||
_ := tmparr
|
_ := tmparr
|
||||||
mut arr := []byte{}
|
mut arr := []u8{}
|
||||||
#arr = new array(new array_buffer({arr: tmparr,index_start: new int(0),len: new int(tmparr.length)}))
|
#arr = new array(new array_buffer({arr: tmparr,index_start: new int(0),len: new int(tmparr.length)}))
|
||||||
|
|
||||||
return arr
|
return arr
|
||||||
|
@ -500,7 +500,7 @@ pub fn (s string) strip_margin_custom(del byte) string {
|
||||||
}
|
}
|
||||||
// don't know how much space the resulting string will be, but the max it
|
// don't know how much space the resulting string will be, but the max it
|
||||||
// can be is this big
|
// can be is this big
|
||||||
mut ret := []byte{}
|
mut ret := []u8{}
|
||||||
#ret = new array()
|
#ret = new array()
|
||||||
|
|
||||||
mut count := 0
|
mut count := 0
|
||||||
|
|
|
@ -404,7 +404,7 @@ pub fn sys_dup2(oldfd int, newfd int) (i64, Errno) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// 59 sys_execve const char *filename const char *const argv[] const char *const envp[]
|
// 59 sys_execve const char *filename const char *const argv[] const char *const envp[]
|
||||||
// pub fn sys_execve(filename byteptr, argv []byteptr, envp []byteptr) int {
|
// pub fn sys_execve(filename byteptr, argv []u8ptr, envp []u8ptr) int {
|
||||||
// return sys_call3(59, filename, argv, envp)
|
// return sys_call3(59, filename, argv, envp)
|
||||||
//}
|
//}
|
||||||
|
|
||||||
|
|
|
@ -2,9 +2,9 @@ module builtin
|
||||||
|
|
||||||
// Note: this file will be removed soon
|
// Note: this file will be removed soon
|
||||||
|
|
||||||
// byteptr.vbytes() - makes a V []byte structure from a C style memory buffer. Note: the data is reused, NOT copied!
|
// byteptr.vbytes() - makes a V []u8 structure from a C style memory buffer. Note: the data is reused, NOT copied!
|
||||||
[unsafe]
|
[unsafe]
|
||||||
pub fn (data byteptr) vbytes(len int) []byte {
|
pub fn (data byteptr) vbytes(len int) []u8 {
|
||||||
return unsafe { voidptr(data).vbytes(len) }
|
return unsafe { voidptr(data).vbytes(len) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ fn C.tinfl_decompress_mem_to_heap(source_buf voidptr, source_buf_len usize, out_
|
||||||
// compresses an array of bytes using zlib and returns the compressed bytes in a new array
|
// compresses an array of bytes using zlib and returns the compressed bytes in a new array
|
||||||
// Example: compressed := zlib.compress(b) ?
|
// Example: compressed := zlib.compress(b) ?
|
||||||
[manualfree]
|
[manualfree]
|
||||||
pub fn compress(data []byte) ?[]byte {
|
pub fn compress(data []u8) ?[]u8 {
|
||||||
if u64(data.len) > zlib.max_size {
|
if u64(data.len) > zlib.max_size {
|
||||||
return error('data too large ($data.len > $zlib.max_size)')
|
return error('data too large ($data.len > $zlib.max_size)')
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@ pub fn compress(data []byte) ?[]byte {
|
||||||
// decompresses an array of bytes using zlib and returns the decompressed bytes in a new array
|
// decompresses an array of bytes using zlib and returns the decompressed bytes in a new array
|
||||||
// Example: decompressed := zlib.decompress(b) ?
|
// Example: decompressed := zlib.decompress(b) ?
|
||||||
[manualfree]
|
[manualfree]
|
||||||
pub fn decompress(data []byte) ?[]byte {
|
pub fn decompress(data []u8) ?[]u8 {
|
||||||
mut out_len := usize(0)
|
mut out_len := usize(0)
|
||||||
|
|
||||||
// flags = TINFL_FLAG_PARSE_ZLIB_HEADER (0x1)
|
// flags = TINFL_FLAG_PARSE_ZLIB_HEADER (0x1)
|
||||||
|
|
|
@ -28,7 +28,7 @@ mut:
|
||||||
// The key argument should be the AES key,
|
// The key argument should be the AES key,
|
||||||
// either 16, 24, or 32 bytes to select
|
// either 16, 24, or 32 bytes to select
|
||||||
// AES-128, AES-192, or AES-256.
|
// AES-128, AES-192, or AES-256.
|
||||||
pub fn new_cipher(key []byte) cipher.Block {
|
pub fn new_cipher(key []u8) cipher.Block {
|
||||||
k := key.len
|
k := key.len
|
||||||
match k {
|
match k {
|
||||||
16, 24, 32 {
|
16, 24, 32 {
|
||||||
|
@ -52,7 +52,7 @@ pub fn (c &AesCipher) block_size() int {
|
||||||
// NOTE: `dst` and `src` are both mutable for performance reasons.
|
// NOTE: `dst` and `src` are both mutable for performance reasons.
|
||||||
// NOTE: `dst` and `src` must both be pre-allocated to the correct length.
|
// NOTE: `dst` and `src` must both be pre-allocated to the correct length.
|
||||||
// NOTE: `dst` and `src` may be the same (overlapping entirely).
|
// NOTE: `dst` and `src` may be the same (overlapping entirely).
|
||||||
pub fn (c &AesCipher) encrypt(mut dst []byte, src []byte) {
|
pub fn (c &AesCipher) encrypt(mut dst []u8, src []u8) {
|
||||||
if src.len < aes.block_size {
|
if src.len < aes.block_size {
|
||||||
panic('crypto.aes: input not full block')
|
panic('crypto.aes: input not full block')
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@ pub fn (c &AesCipher) encrypt(mut dst []byte, src []byte) {
|
||||||
// NOTE: `dst` and `src` are both mutable for performance reasons.
|
// NOTE: `dst` and `src` are both mutable for performance reasons.
|
||||||
// NOTE: `dst` and `src` must both be pre-allocated to the correct length.
|
// NOTE: `dst` and `src` must both be pre-allocated to the correct length.
|
||||||
// NOTE: `dst` and `src` may be the same (overlapping entirely).
|
// NOTE: `dst` and `src` may be the same (overlapping entirely).
|
||||||
pub fn (c &AesCipher) decrypt(mut dst []byte, src []byte) {
|
pub fn (c &AesCipher) decrypt(mut dst []u8, src []u8) {
|
||||||
if src.len < aes.block_size {
|
if src.len < aes.block_size {
|
||||||
panic('crypto.aes: input not full block')
|
panic('crypto.aes: input not full block')
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,7 @@ module aes
|
||||||
import encoding.binary
|
import encoding.binary
|
||||||
|
|
||||||
// Encrypt one block from src into dst, using the expanded key xk.
|
// Encrypt one block from src into dst, using the expanded key xk.
|
||||||
fn encrypt_block_generic(xk []u32, mut dst []byte, src []byte) {
|
fn encrypt_block_generic(xk []u32, mut dst []u8, src []u8) {
|
||||||
_ = src[15] // early bounds check
|
_ = src[15] // early bounds check
|
||||||
mut s0 := binary.big_endian_u32(src[..4])
|
mut s0 := binary.big_endian_u32(src[..4])
|
||||||
mut s1 := binary.big_endian_u32(src[4..8])
|
mut s1 := binary.big_endian_u32(src[4..8])
|
||||||
|
@ -85,7 +85,7 @@ fn encrypt_block_generic(xk []u32, mut dst []byte, src []byte) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt one block from src into dst, using the expanded key xk.
|
// Decrypt one block from src into dst, using the expanded key xk.
|
||||||
fn decrypt_block_generic(xk []u32, mut dst []byte, src []byte) {
|
fn decrypt_block_generic(xk []u32, mut dst []u8, src []u8) {
|
||||||
_ = src[15] // early bounds check
|
_ = src[15] // early bounds check
|
||||||
mut s0 := binary.big_endian_u32(src[0..4])
|
mut s0 := binary.big_endian_u32(src[0..4])
|
||||||
mut s1 := binary.big_endian_u32(src[4..8])
|
mut s1 := binary.big_endian_u32(src[4..8])
|
||||||
|
@ -143,7 +143,7 @@ fn rotw(w u32) u32 {
|
||||||
|
|
||||||
// Key expansion algorithm. See FIPS-197, Figure 11.
|
// Key expansion algorithm. See FIPS-197, Figure 11.
|
||||||
// Their rcon[i] is our powx[i-1] << 24.
|
// Their rcon[i] is our powx[i-1] << 24.
|
||||||
fn expand_key_generic(key []byte, mut enc []u32, mut dec []u32) {
|
fn expand_key_generic(key []u8, mut enc []u32, mut dec []u32) {
|
||||||
// Encryption key setup.
|
// Encryption key setup.
|
||||||
mut i := 0
|
mut i := 0
|
||||||
nk := key.len / 4
|
nk := key.len / 4
|
||||||
|
|
|
@ -7,7 +7,7 @@ import crypto.cipher
|
||||||
|
|
||||||
// new_cipher_generic creates and returns a new cipher.Block
|
// new_cipher_generic creates and returns a new cipher.Block
|
||||||
// this is the generiv v version, no arch optimisations
|
// this is the generiv v version, no arch optimisations
|
||||||
fn new_cipher_generic(key []byte) cipher.Block {
|
fn new_cipher_generic(key []u8) cipher.Block {
|
||||||
n := key.len + 28
|
n := key.len + 28
|
||||||
mut c := AesCipher{
|
mut c := AesCipher{
|
||||||
enc: []u32{len: n}
|
enc: []u32{len: n}
|
||||||
|
|
|
@ -20,8 +20,8 @@ pub const (
|
||||||
|
|
||||||
pub struct Hashed {
|
pub struct Hashed {
|
||||||
mut:
|
mut:
|
||||||
hash []byte
|
hash []u8
|
||||||
salt []byte
|
salt []u8
|
||||||
cost int
|
cost int
|
||||||
major string
|
major string
|
||||||
minor string
|
minor string
|
||||||
|
@ -31,14 +31,14 @@ const magic_cipher_data = [u8(0x4f), 0x72, 0x70, 0x68, 0x65, 0x61, 0x6e, 0x42, 0
|
||||||
0x6c, 0x64, 0x65, 0x72, 0x53, 0x63, 0x72, 0x79, 0x44, 0x6f, 0x75, 0x62, 0x74]
|
0x6c, 0x64, 0x65, 0x72, 0x53, 0x63, 0x72, 0x79, 0x44, 0x6f, 0x75, 0x62, 0x74]
|
||||||
|
|
||||||
// generate_from_password return a bcrypt string from Hashed struct.
|
// generate_from_password return a bcrypt string from Hashed struct.
|
||||||
pub fn generate_from_password(password []byte, cost int) ?string {
|
pub fn generate_from_password(password []u8, cost int) ?string {
|
||||||
mut p := new_from_password(password, cost) or { return error('Error: $err') }
|
mut p := new_from_password(password, cost) or { return error('Error: $err') }
|
||||||
x := p.hash_u8()
|
x := p.hash_u8()
|
||||||
return x.bytestr()
|
return x.bytestr()
|
||||||
}
|
}
|
||||||
|
|
||||||
// compare_hash_and_password compares a bcrypt hashed password with its possible hashed version.
|
// compare_hash_and_password compares a bcrypt hashed password with its possible hashed version.
|
||||||
pub fn compare_hash_and_password(password []byte, hashed_password []byte) ? {
|
pub fn compare_hash_and_password(password []u8, hashed_password []u8) ? {
|
||||||
mut p := new_from_hash(hashed_password) or { return error('Error: $err') }
|
mut p := new_from_hash(hashed_password) or { return error('Error: $err') }
|
||||||
p.salt << `=`
|
p.salt << `=`
|
||||||
p.salt << `=`
|
p.salt << `=`
|
||||||
|
@ -64,7 +64,7 @@ pub fn generate_salt() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// new_from_password converting from password to a Hashed struct with bcrypt.
|
// new_from_password converting from password to a Hashed struct with bcrypt.
|
||||||
fn new_from_password(password []byte, cost int) ?&Hashed {
|
fn new_from_password(password []u8, cost int) ?&Hashed {
|
||||||
mut cost_ := cost
|
mut cost_ := cost
|
||||||
if cost < bcrypt.min_cost {
|
if cost < bcrypt.min_cost {
|
||||||
cost_ = bcrypt.default_cost
|
cost_ = bcrypt.default_cost
|
||||||
|
@ -86,7 +86,7 @@ fn new_from_password(password []byte, cost int) ?&Hashed {
|
||||||
}
|
}
|
||||||
|
|
||||||
// new_from_hash converting from hashed data to a Hashed struct.
|
// new_from_hash converting from hashed data to a Hashed struct.
|
||||||
fn new_from_hash(hashed_secret []byte) ?&Hashed {
|
fn new_from_hash(hashed_secret []u8) ?&Hashed {
|
||||||
mut tmp := hashed_secret.clone()
|
mut tmp := hashed_secret.clone()
|
||||||
if tmp.len < bcrypt.min_hash_size {
|
if tmp.len < bcrypt.min_hash_size {
|
||||||
return error('hash to short')
|
return error('hash to short')
|
||||||
|
@ -106,8 +106,8 @@ fn new_from_hash(hashed_secret []byte) ?&Hashed {
|
||||||
}
|
}
|
||||||
|
|
||||||
// bcrypt hashing passwords.
|
// bcrypt hashing passwords.
|
||||||
fn bcrypt(password []byte, cost int, salt []byte) ?[]byte {
|
fn bcrypt(password []u8, cost int, salt []u8) ?[]u8 {
|
||||||
mut cipher_data := []byte{len: 72 - bcrypt.magic_cipher_data.len, init: 0}
|
mut cipher_data := []u8{len: 72 - bcrypt.magic_cipher_data.len, init: 0}
|
||||||
cipher_data << bcrypt.magic_cipher_data
|
cipher_data << bcrypt.magic_cipher_data
|
||||||
|
|
||||||
mut bf := expensive_blowfish_setup(password, u32(cost), salt) or { return err }
|
mut bf := expensive_blowfish_setup(password, u32(cost), salt) or { return err }
|
||||||
|
@ -123,7 +123,7 @@ fn bcrypt(password []byte, cost int, salt []byte) ?[]byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// expensive_blowfish_setup generate a Blowfish cipher, given key, cost and salt.
|
// expensive_blowfish_setup generate a Blowfish cipher, given key, cost and salt.
|
||||||
fn expensive_blowfish_setup(key []byte, cost u32, salt []byte) ?&blowfish.Blowfish {
|
fn expensive_blowfish_setup(key []u8, cost u32, salt []u8) ?&blowfish.Blowfish {
|
||||||
csalt := base64.decode(salt.bytestr())
|
csalt := base64.decode(salt.bytestr())
|
||||||
|
|
||||||
mut bf := blowfish.new_salted_cipher(key, csalt) or { return err }
|
mut bf := blowfish.new_salted_cipher(key, csalt) or { return err }
|
||||||
|
@ -140,8 +140,8 @@ fn expensive_blowfish_setup(key []byte, cost u32, salt []byte) ?&blowfish.Blowfi
|
||||||
}
|
}
|
||||||
|
|
||||||
// hash_byte converts the hash value to a byte array.
|
// hash_byte converts the hash value to a byte array.
|
||||||
fn (mut h Hashed) hash_u8() []byte {
|
fn (mut h Hashed) hash_u8() []u8 {
|
||||||
mut arr := []byte{len: 65, init: 0}
|
mut arr := []u8{len: 65, init: 0}
|
||||||
arr[0] = `$`
|
arr[0] = `$`
|
||||||
arr[1] = h.major[0]
|
arr[1] = h.major[0]
|
||||||
mut n := 2
|
mut n := 2
|
||||||
|
@ -164,7 +164,7 @@ fn (mut h Hashed) hash_u8() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode_version decode bcrypt version.
|
// decode_version decode bcrypt version.
|
||||||
fn (mut h Hashed) decode_version(sbytes []byte) ?int {
|
fn (mut h Hashed) decode_version(sbytes []u8) ?int {
|
||||||
if sbytes[0] != `$` {
|
if sbytes[0] != `$` {
|
||||||
return error("bcrypt hashes must start with '$'")
|
return error("bcrypt hashes must start with '$'")
|
||||||
}
|
}
|
||||||
|
@ -181,7 +181,7 @@ fn (mut h Hashed) decode_version(sbytes []byte) ?int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode_cost extracts the value of cost and returns the next index in the array.
|
// decode_cost extracts the value of cost and returns the next index in the array.
|
||||||
fn (mut h Hashed) decode_cost(sbytes []byte) ?int {
|
fn (mut h Hashed) decode_cost(sbytes []u8) ?int {
|
||||||
cost := sbytes[0..2].bytestr().int()
|
cost := sbytes[0..2].bytestr().int()
|
||||||
check_cost(cost) or { return err }
|
check_cost(cost) or { return err }
|
||||||
h.cost = cost
|
h.cost = cost
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
module blowfish
|
module blowfish
|
||||||
|
|
||||||
// expand_key performs a key expansion on the given Blowfish cipher.
|
// expand_key performs a key expansion on the given Blowfish cipher.
|
||||||
pub fn expand_key(key []byte, mut bf Blowfish) {
|
pub fn expand_key(key []u8, mut bf Blowfish) {
|
||||||
mut j := 0
|
mut j := 0
|
||||||
for i := 0; i < 18; i++ {
|
for i := 0; i < 18; i++ {
|
||||||
mut d := u32(0)
|
mut d := u32(0)
|
||||||
|
@ -41,7 +41,7 @@ pub fn expand_key(key []byte, mut bf Blowfish) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// expand_key_with_salt using salt to expand the key.
|
// expand_key_with_salt using salt to expand the key.
|
||||||
pub fn expand_key_with_salt(key []byte, salt []byte, mut bf Blowfish) {
|
pub fn expand_key_with_salt(key []u8, salt []u8, mut bf Blowfish) {
|
||||||
mut j := 0
|
mut j := 0
|
||||||
for i := 0; i < 18; i++ {
|
for i := 0; i < 18; i++ {
|
||||||
bf.p[i] ^= get_next_word(key, &j)
|
bf.p[i] ^= get_next_word(key, &j)
|
||||||
|
@ -128,7 +128,7 @@ fn setup_tables(l u32, r u32, mut bf Blowfish) []u32 {
|
||||||
|
|
||||||
// get_next_word returns the next big-endian u32 value from the byte
|
// get_next_word returns the next big-endian u32 value from the byte
|
||||||
// slice at the given position in a circular manner, updating the position.
|
// slice at the given position in a circular manner, updating the position.
|
||||||
fn get_next_word(b []byte, pos &int) u32 {
|
fn get_next_word(b []u8, pos &int) u32 {
|
||||||
mut w := u32(0)
|
mut w := u32(0)
|
||||||
mut j := 0
|
mut j := 0
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|
|
@ -8,7 +8,7 @@ pub mut:
|
||||||
|
|
||||||
// new_cipher creates and returns a new Blowfish cipher.
|
// new_cipher creates and returns a new Blowfish cipher.
|
||||||
// The key argument should be the Blowfish key, from 1 to 56 bytes.
|
// The key argument should be the Blowfish key, from 1 to 56 bytes.
|
||||||
pub fn new_cipher(key []byte) ?Blowfish {
|
pub fn new_cipher(key []u8) ?Blowfish {
|
||||||
mut bf := Blowfish{}
|
mut bf := Blowfish{}
|
||||||
unsafe { vmemcpy(&bf.p[0], &p[0], int(sizeof(bf.p))) }
|
unsafe { vmemcpy(&bf.p[0], &p[0], int(sizeof(bf.p))) }
|
||||||
unsafe { vmemcpy(&bf.s[0], &s[0], int(sizeof(bf.s))) }
|
unsafe { vmemcpy(&bf.s[0], &s[0], int(sizeof(bf.s))) }
|
||||||
|
@ -21,7 +21,7 @@ pub fn new_cipher(key []byte) ?Blowfish {
|
||||||
}
|
}
|
||||||
|
|
||||||
// new_salted_cipher returns a new Blowfish cipher that folds a salt into its key schedule.
|
// new_salted_cipher returns a new Blowfish cipher that folds a salt into its key schedule.
|
||||||
pub fn new_salted_cipher(key []byte, salt []byte) ?Blowfish {
|
pub fn new_salted_cipher(key []u8, salt []u8) ?Blowfish {
|
||||||
if salt.len == 0 {
|
if salt.len == 0 {
|
||||||
return new_cipher(key)
|
return new_cipher(key)
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ pub fn new_salted_cipher(key []byte, salt []byte) ?Blowfish {
|
||||||
}
|
}
|
||||||
|
|
||||||
// encrypt encrypts the 8-byte buffer src using the key k and stores the result in dst.
|
// encrypt encrypts the 8-byte buffer src using the key k and stores the result in dst.
|
||||||
pub fn (mut bf Blowfish) encrypt(mut dst []byte, src []byte) {
|
pub fn (mut bf Blowfish) encrypt(mut dst []u8, src []u8) {
|
||||||
l := u32(src[0]) << 24 | u32(src[1]) << 16 | u32(src[2]) << 8 | u32(src[3])
|
l := u32(src[0]) << 24 | u32(src[1]) << 16 | u32(src[2]) << 8 | u32(src[3])
|
||||||
r := u32(src[4]) << 24 | u32(src[5]) << 16 | u32(src[6]) << 8 | u32(src[7])
|
r := u32(src[4]) << 24 | u32(src[5]) << 16 | u32(src[6]) << 8 | u32(src[7])
|
||||||
arr := setup_tables(l, r, mut bf)
|
arr := setup_tables(l, r, mut bf)
|
||||||
|
|
|
@ -19,13 +19,13 @@ fn test_aes_cbc() {
|
||||||
println('test_aes_cbc ok')
|
println('test_aes_cbc ok')
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aes_cbc_en(mut src []byte, key []byte, iv []byte) {
|
fn aes_cbc_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := aes.new_cipher(key)
|
block := aes.new_cipher(key)
|
||||||
mut mode := cipher.new_cbc(block, iv)
|
mut mode := cipher.new_cbc(block, iv)
|
||||||
mode.encrypt_blocks(mut src, src.clone())
|
mode.encrypt_blocks(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aes_cbc_de(mut src []byte, key []byte, iv []byte) {
|
fn aes_cbc_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := aes.new_cipher(key)
|
block := aes.new_cipher(key)
|
||||||
mut mode := cipher.new_cbc(block, iv)
|
mut mode := cipher.new_cbc(block, iv)
|
||||||
mode.decrypt_blocks(mut src, src.clone())
|
mode.decrypt_blocks(mut src, src.clone())
|
||||||
|
|
|
@ -16,13 +16,13 @@ fn test_aes_cfb() {
|
||||||
println('test_aes_cfb ok')
|
println('test_aes_cfb ok')
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aes_cfb_en(mut src []byte, key []byte, iv []byte) {
|
fn aes_cfb_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := aes.new_cipher(key)
|
block := aes.new_cipher(key)
|
||||||
mut mode := cipher.new_cfb_encrypter(block, iv)
|
mut mode := cipher.new_cfb_encrypter(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aes_cfb_de(mut src []byte, key []byte, iv []byte) {
|
fn aes_cfb_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := aes.new_cipher(key)
|
block := aes.new_cipher(key)
|
||||||
mut mode := cipher.new_cfb_decrypter(block, iv)
|
mut mode := cipher.new_cfb_decrypter(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
|
|
|
@ -16,13 +16,13 @@ fn test_aes_ctr() {
|
||||||
println('test_aes_ctr ok')
|
println('test_aes_ctr ok')
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aes_ctr_en(mut src []byte, key []byte, iv []byte) {
|
fn aes_ctr_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := aes.new_cipher(key)
|
block := aes.new_cipher(key)
|
||||||
mode := cipher.new_ctr(block, iv)
|
mode := cipher.new_ctr(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aes_ctr_de(mut src []byte, key []byte, iv []byte) {
|
fn aes_ctr_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := aes.new_cipher(key)
|
block := aes.new_cipher(key)
|
||||||
mode := cipher.new_ctr(block, iv)
|
mode := cipher.new_ctr(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
|
|
|
@ -18,13 +18,13 @@ fn test_aes_ofb() {
|
||||||
println('test_aes_ofb ok')
|
println('test_aes_ofb ok')
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aes_ofb_en(mut src []byte, key []byte, iv []byte) {
|
fn aes_ofb_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := aes.new_cipher(key)
|
block := aes.new_cipher(key)
|
||||||
mut mode := cipher.new_ofb(block, iv)
|
mut mode := cipher.new_ofb(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aes_ofb_de(mut src []byte, key []byte, iv []byte) {
|
fn aes_ofb_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := aes.new_cipher(key)
|
block := aes.new_cipher(key)
|
||||||
mut mode := cipher.new_ofb(block, iv)
|
mut mode := cipher.new_ofb(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
|
|
|
@ -15,24 +15,24 @@ struct Cbc {
|
||||||
mut:
|
mut:
|
||||||
b Block
|
b Block
|
||||||
block_size int
|
block_size int
|
||||||
iv []byte
|
iv []u8
|
||||||
tmp []byte
|
tmp []u8
|
||||||
}
|
}
|
||||||
|
|
||||||
// internal
|
// internal
|
||||||
fn new_des_cbc(b Block, iv []byte) Cbc {
|
fn new_des_cbc(b Block, iv []u8) Cbc {
|
||||||
return Cbc{
|
return Cbc{
|
||||||
b: b
|
b: b
|
||||||
block_size: b.block_size
|
block_size: b.block_size
|
||||||
iv: iv.clone()
|
iv: iv.clone()
|
||||||
tmp: []byte{len: b.block_size}
|
tmp: []u8{len: b.block_size}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// new_cbc returns a `DesCbc` which encrypts in cipher block chaining
|
// new_cbc returns a `DesCbc` which encrypts in cipher block chaining
|
||||||
// mode, using the given Block. The length of iv must be the same as the
|
// mode, using the given Block. The length of iv must be the same as the
|
||||||
// Block's block size.
|
// Block's block size.
|
||||||
pub fn new_cbc(b Block, iv []byte) Cbc {
|
pub fn new_cbc(b Block, iv []u8) Cbc {
|
||||||
if iv.len != b.block_size {
|
if iv.len != b.block_size {
|
||||||
panic('crypto.cipher.new_cbc_encrypter: IV length must equal block size')
|
panic('crypto.cipher.new_cbc_encrypter: IV length must equal block size')
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ pub fn new_cbc(b Block, iv []byte) Cbc {
|
||||||
|
|
||||||
// encrypt_blocks encrypts the blocks in `src_` to `dst_`.
|
// encrypt_blocks encrypts the blocks in `src_` to `dst_`.
|
||||||
// Please note: `dst_` is mutable for performance reasons.
|
// Please note: `dst_` is mutable for performance reasons.
|
||||||
pub fn (mut x Cbc) encrypt_blocks(mut dst_ []byte, src_ []byte) {
|
pub fn (mut x Cbc) encrypt_blocks(mut dst_ []u8, src_ []u8) {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut dst := *dst_
|
mut dst := *dst_
|
||||||
mut src := src_
|
mut src := src_
|
||||||
|
@ -75,7 +75,7 @@ pub fn (mut x Cbc) encrypt_blocks(mut dst_ []byte, src_ []byte) {
|
||||||
|
|
||||||
// decrypt_blocks decrypts the blocks in `src` to `dst`.
|
// decrypt_blocks decrypts the blocks in `src` to `dst`.
|
||||||
// Please note: `dst` is mutable for performance reasons.
|
// Please note: `dst` is mutable for performance reasons.
|
||||||
pub fn (mut x Cbc) decrypt_blocks(mut dst []byte, src []byte) {
|
pub fn (mut x Cbc) decrypt_blocks(mut dst []u8, src []u8) {
|
||||||
if src.len % x.block_size != 0 {
|
if src.len % x.block_size != 0 {
|
||||||
panic('crypto.cipher: input not full blocks')
|
panic('crypto.cipher: input not full blocks')
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ pub fn (mut x Cbc) decrypt_blocks(mut dst []byte, src []byte) {
|
||||||
x.tmp = x.iv
|
x.tmp = x.iv
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut x Cbc) set_iv(iv []byte) {
|
fn (mut x Cbc) set_iv(iv []u8) {
|
||||||
if iv.len != x.iv.len {
|
if iv.len != x.iv.len {
|
||||||
panic('cipher: incorrect length IV')
|
panic('cipher: incorrect length IV')
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,8 +13,8 @@ import crypto.internal.subtle
|
||||||
struct Cfb {
|
struct Cfb {
|
||||||
mut:
|
mut:
|
||||||
b Block
|
b Block
|
||||||
next []byte
|
next []u8
|
||||||
out []byte
|
out []u8
|
||||||
out_used int
|
out_used int
|
||||||
|
|
||||||
decrypt bool
|
decrypt bool
|
||||||
|
@ -23,26 +23,26 @@ mut:
|
||||||
// new_cfb_encrypter returns a `Cfb` which encrypts with cipher feedback mode,
|
// new_cfb_encrypter returns a `Cfb` which encrypts with cipher feedback mode,
|
||||||
// using the given Block. The iv must be the same length as the Block's block
|
// using the given Block. The iv must be the same length as the Block's block
|
||||||
// size
|
// size
|
||||||
pub fn new_cfb_encrypter(b Block, iv []byte) Cfb {
|
pub fn new_cfb_encrypter(b Block, iv []u8) Cfb {
|
||||||
return new_cfb(b, iv, false)
|
return new_cfb(b, iv, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// new_cfb_decrypter returns a `Cfb` which decrypts with cipher feedback mode,
|
// new_cfb_decrypter returns a `Cfb` which decrypts with cipher feedback mode,
|
||||||
// using the given Block. The iv must be the same length as the Block's block
|
// using the given Block. The iv must be the same length as the Block's block
|
||||||
// size
|
// size
|
||||||
pub fn new_cfb_decrypter(b Block, iv []byte) Cfb {
|
pub fn new_cfb_decrypter(b Block, iv []u8) Cfb {
|
||||||
return new_cfb(b, iv, true)
|
return new_cfb(b, iv, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_cfb(b Block, iv []byte, decrypt bool) Cfb {
|
fn new_cfb(b Block, iv []u8, decrypt bool) Cfb {
|
||||||
block_size := b.block_size
|
block_size := b.block_size
|
||||||
if iv.len != block_size {
|
if iv.len != block_size {
|
||||||
panic('cipher.new_cfb: IV length must be equal block size')
|
panic('cipher.new_cfb: IV length must be equal block size')
|
||||||
}
|
}
|
||||||
mut x := Cfb{
|
mut x := Cfb{
|
||||||
b: b
|
b: b
|
||||||
out: []byte{len: b.block_size}
|
out: []u8{len: b.block_size}
|
||||||
next: []byte{len: b.block_size}
|
next: []u8{len: b.block_size}
|
||||||
out_used: block_size
|
out_used: block_size
|
||||||
decrypt: decrypt
|
decrypt: decrypt
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ fn new_cfb(b Block, iv []byte, decrypt bool) Cfb {
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (mut x Cfb) xor_key_stream(mut dst_ []byte, src_ []byte) {
|
pub fn (mut x Cfb) xor_key_stream(mut dst_ []u8, src_ []u8) {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut dst := *dst_
|
mut dst := *dst_
|
||||||
mut src := src_
|
mut src := src_
|
||||||
|
|
|
@ -8,9 +8,9 @@ module cipher
|
||||||
// extend that capability to streams of blocks.
|
// extend that capability to streams of blocks.
|
||||||
interface Block {
|
interface Block {
|
||||||
block_size int // block_size returns the cipher's block size.
|
block_size int // block_size returns the cipher's block size.
|
||||||
encrypt(mut dst []byte, src []byte) // Encrypt encrypts the first block in src into dst.
|
encrypt(mut dst []u8, src []u8) // Encrypt encrypts the first block in src into dst.
|
||||||
// Dst and src must overlap entirely or not at all.
|
// Dst and src must overlap entirely or not at all.
|
||||||
decrypt(mut dst []byte, src []byte) // Decrypt decrypts the first block in src into dst.
|
decrypt(mut dst []u8, src []u8) // Decrypt decrypts the first block in src into dst.
|
||||||
// Dst and src must overlap entirely or not at all.
|
// Dst and src must overlap entirely or not at all.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,14 +26,14 @@ interface Stream {
|
||||||
// Multiple calls to xor_key_stream behave as if the concatenation of
|
// Multiple calls to xor_key_stream behave as if the concatenation of
|
||||||
// the src buffers was passed in a single run. That is, Stream
|
// the src buffers was passed in a single run. That is, Stream
|
||||||
// maintains state and does not reset at each xor_key_stream call.
|
// maintains state and does not reset at each xor_key_stream call.
|
||||||
xor_key_stream(mut dst []byte, src []byte)
|
xor_key_stream(mut dst []u8, src []u8)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A BlockMode represents a block cipher running in a block-based mode (CBC,
|
// A BlockMode represents a block cipher running in a block-based mode (CBC,
|
||||||
// ECB etc).
|
// ECB etc).
|
||||||
interface BlockMode {
|
interface BlockMode {
|
||||||
block_size int // block_size returns the mode's block size.
|
block_size int // block_size returns the mode's block size.
|
||||||
crypt_blocks(mut dst []byte, src []byte) // crypt_blocks encrypts or decrypts a number of blocks. The length of
|
crypt_blocks(mut dst []u8, src []u8) // crypt_blocks encrypts or decrypts a number of blocks. The length of
|
||||||
// src must be a multiple of the block size. Dst and src must overlap
|
// src must be a multiple of the block size. Dst and src must overlap
|
||||||
// entirely or not at all.
|
// entirely or not at all.
|
||||||
//
|
//
|
||||||
|
@ -48,8 +48,8 @@ interface BlockMode {
|
||||||
|
|
||||||
// Utility routines
|
// Utility routines
|
||||||
|
|
||||||
// fn dup(p []byte) []byte {
|
// fn dup(p []u8) []u8 {
|
||||||
// q := make([]byte, p.len)
|
// q := make([]u8, p.len)
|
||||||
// copy(mut q, p)
|
// copy(mut q, p)
|
||||||
// return q
|
// return q
|
||||||
// }
|
// }
|
||||||
|
|
|
@ -16,27 +16,27 @@ import crypto.internal.subtle
|
||||||
struct Ctr {
|
struct Ctr {
|
||||||
mut:
|
mut:
|
||||||
b Block
|
b Block
|
||||||
next []byte
|
next []u8
|
||||||
out []byte
|
out []u8
|
||||||
out_used int
|
out_used int
|
||||||
}
|
}
|
||||||
|
|
||||||
// new_ctr returns a Ctr which encrypts/decrypts using the given Block in
|
// new_ctr returns a Ctr which encrypts/decrypts using the given Block in
|
||||||
// counter mode. The length of iv must be the same as the Block's block size.
|
// counter mode. The length of iv must be the same as the Block's block size.
|
||||||
pub fn new_ctr(b Block, iv []byte) Ctr {
|
pub fn new_ctr(b Block, iv []u8) Ctr {
|
||||||
block_size := b.block_size
|
block_size := b.block_size
|
||||||
if iv.len != block_size {
|
if iv.len != block_size {
|
||||||
panic('cipher.new_cfb: IV length must be equal block size')
|
panic('cipher.new_cfb: IV length must be equal block size')
|
||||||
}
|
}
|
||||||
return Ctr{
|
return Ctr{
|
||||||
b: b
|
b: b
|
||||||
out: []byte{len: b.block_size}
|
out: []u8{len: b.block_size}
|
||||||
next: iv.clone()
|
next: iv.clone()
|
||||||
out_used: block_size
|
out_used: block_size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (x &Ctr) xor_key_stream(mut dst_ []byte, src_ []byte) {
|
pub fn (x &Ctr) xor_key_stream(mut dst_ []u8, src_ []u8) {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut dst := *dst_
|
mut dst := *dst_
|
||||||
mut src := src_
|
mut src := src_
|
||||||
|
|
|
@ -29,25 +29,25 @@ fn test_des_cbc() {
|
||||||
println('test_des_cbc ok')
|
println('test_des_cbc ok')
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_cbc_en(mut src []byte, key []byte, iv []byte) {
|
fn des_cbc_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
mut mode := cipher.new_cbc(block, iv)
|
mut mode := cipher.new_cbc(block, iv)
|
||||||
mode.encrypt_blocks(mut src, src.clone())
|
mode.encrypt_blocks(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_cbc_de(mut src []byte, key []byte, iv []byte) {
|
fn des_cbc_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
mut mode := cipher.new_cbc(block, iv)
|
mut mode := cipher.new_cbc(block, iv)
|
||||||
mode.decrypt_blocks(mut src, src.clone())
|
mode.decrypt_blocks(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_cbc_en(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_cbc_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
mut mode := cipher.new_cbc(block, iv)
|
mut mode := cipher.new_cbc(block, iv)
|
||||||
mode.encrypt_blocks(mut src, src.clone())
|
mode.encrypt_blocks(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_cbc_de(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_cbc_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
mut mode := cipher.new_cbc(block, iv)
|
mut mode := cipher.new_cbc(block, iv)
|
||||||
mode.decrypt_blocks(mut src, src.clone())
|
mode.decrypt_blocks(mut src, src.clone())
|
||||||
|
|
|
@ -29,25 +29,25 @@ fn test_des_cfb() {
|
||||||
println('test_des_cfb ok')
|
println('test_des_cfb ok')
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_cfb_en(mut src []byte, key []byte, iv []byte) {
|
fn des_cfb_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
mut mode := cipher.new_cfb_encrypter(block, iv)
|
mut mode := cipher.new_cfb_encrypter(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_cfb_de(mut src []byte, key []byte, iv []byte) {
|
fn des_cfb_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
mut mode := cipher.new_cfb_decrypter(block, iv)
|
mut mode := cipher.new_cfb_decrypter(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_cfb_en(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_cfb_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
mut mode := cipher.new_cfb_encrypter(block, iv)
|
mut mode := cipher.new_cfb_encrypter(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_cfb_de(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_cfb_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
mut mode := cipher.new_cfb_decrypter(block, iv)
|
mut mode := cipher.new_cfb_decrypter(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
|
|
|
@ -29,25 +29,25 @@ fn test_des_ctr() {
|
||||||
println('test_des_ctr ok')
|
println('test_des_ctr ok')
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_ctr_en(mut src []byte, key []byte, iv []byte) {
|
fn des_ctr_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
mode := cipher.new_ctr(block, iv)
|
mode := cipher.new_ctr(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_ctr_de(mut src []byte, key []byte, iv []byte) {
|
fn des_ctr_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
mode := cipher.new_ctr(block, iv)
|
mode := cipher.new_ctr(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_ctr_en(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_ctr_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
mode := cipher.new_ctr(block, iv)
|
mode := cipher.new_ctr(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_ctr_de(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_ctr_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
mode := cipher.new_ctr(block, iv)
|
mode := cipher.new_ctr(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
|
|
|
@ -29,25 +29,25 @@ fn test_des_ofb() {
|
||||||
println('test_des_ofb ok')
|
println('test_des_ofb ok')
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_ofb_en(mut src []byte, key []byte, iv []byte) {
|
fn des_ofb_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
mut mode := cipher.new_ofb(block, iv)
|
mut mode := cipher.new_ofb(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_ofb_de(mut src []byte, key []byte, iv []byte) {
|
fn des_ofb_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
mut mode := cipher.new_ofb(block, iv)
|
mut mode := cipher.new_ofb(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_ofb_en(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_ofb_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
mut mode := cipher.new_ofb(block, iv)
|
mut mode := cipher.new_ofb(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_ofb_de(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_ofb_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
mut mode := cipher.new_ofb(block, iv)
|
mut mode := cipher.new_ofb(block, iv)
|
||||||
mode.xor_key_stream(mut src, src.clone())
|
mode.xor_key_stream(mut src, src.clone())
|
||||||
|
|
|
@ -12,30 +12,30 @@ import crypto.internal.subtle
|
||||||
struct Ofb {
|
struct Ofb {
|
||||||
mut:
|
mut:
|
||||||
b Block
|
b Block
|
||||||
next []byte
|
next []u8
|
||||||
out []byte
|
out []u8
|
||||||
out_used int
|
out_used int
|
||||||
}
|
}
|
||||||
|
|
||||||
// new_ofb returns a Ofb that encrypts or decrypts using the block cipher b
|
// new_ofb returns a Ofb that encrypts or decrypts using the block cipher b
|
||||||
// in output feedback mode. The initialization vector iv's length must be equal
|
// in output feedback mode. The initialization vector iv's length must be equal
|
||||||
// to b's block size.
|
// to b's block size.
|
||||||
pub fn new_ofb(b Block, iv []byte) Ofb {
|
pub fn new_ofb(b Block, iv []u8) Ofb {
|
||||||
block_size := b.block_size
|
block_size := b.block_size
|
||||||
if iv.len != block_size {
|
if iv.len != block_size {
|
||||||
panic('cipher.new_ofb: IV length must be equal block size')
|
panic('cipher.new_ofb: IV length must be equal block size')
|
||||||
}
|
}
|
||||||
mut x := Ofb{
|
mut x := Ofb{
|
||||||
b: b
|
b: b
|
||||||
out: []byte{len: b.block_size}
|
out: []u8{len: b.block_size}
|
||||||
next: []byte{len: b.block_size}
|
next: []u8{len: b.block_size}
|
||||||
out_used: block_size
|
out_used: block_size
|
||||||
}
|
}
|
||||||
copy(mut x.next, iv)
|
copy(mut x.next, iv)
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (mut x Ofb) xor_key_stream(mut dst_ []byte, src_ []byte) {
|
pub fn (mut x Ofb) xor_key_stream(mut dst_ []u8, src_ []u8) {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut dst := *dst_
|
mut dst := *dst_
|
||||||
mut src := src_
|
mut src := src_
|
||||||
|
|
|
@ -6,7 +6,7 @@ module cipher
|
||||||
// NOTE: Implement other versions (joe-c)
|
// NOTE: Implement other versions (joe-c)
|
||||||
// xor_bytes xors the bytes in a and b. The destination should have enough
|
// xor_bytes xors the bytes in a and b. The destination should have enough
|
||||||
// space, otherwise xor_bytes will panic. Returns the number of bytes xor'd.
|
// space, otherwise xor_bytes will panic. Returns the number of bytes xor'd.
|
||||||
pub fn xor_bytes(mut dst []byte, a []byte, b []byte) int {
|
pub fn xor_bytes(mut dst []u8, a []u8, b []u8) int {
|
||||||
mut n := a.len
|
mut n := a.len
|
||||||
if b.len < n {
|
if b.len < n {
|
||||||
n = b.len
|
n = b.len
|
||||||
|
@ -20,7 +20,7 @@ pub fn xor_bytes(mut dst []byte, a []byte, b []byte) int {
|
||||||
|
|
||||||
// safe_xor_bytes XORs the bytes in `a` and `b` into `dst` it does so `n` times.
|
// safe_xor_bytes XORs the bytes in `a` and `b` into `dst` it does so `n` times.
|
||||||
// Please note: `n` needs to be smaller or equal than the length of `a` and `b`.
|
// Please note: `n` needs to be smaller or equal than the length of `a` and `b`.
|
||||||
pub fn safe_xor_bytes(mut dst []byte, a []byte, b []byte, n int) {
|
pub fn safe_xor_bytes(mut dst []u8, a []u8, b []u8, n int) {
|
||||||
for i in 0 .. n {
|
for i in 0 .. n {
|
||||||
dst[i] = a[i] ^ b[i]
|
dst[i] = a[i] ^ b[i]
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,6 @@ pub fn safe_xor_bytes(mut dst []byte, a []byte, b []byte, n int) {
|
||||||
|
|
||||||
// xor_words XORs multiples of 4 or 8 bytes (depending on architecture.)
|
// xor_words XORs multiples of 4 or 8 bytes (depending on architecture.)
|
||||||
// The slice arguments `a` and `b` are assumed to be of equal length.
|
// The slice arguments `a` and `b` are assumed to be of equal length.
|
||||||
pub fn xor_words(mut dst []byte, a []byte, b []byte) {
|
pub fn xor_words(mut dst []u8, a []u8, b []u8) {
|
||||||
safe_xor_bytes(mut dst, a, b, b.len)
|
safe_xor_bytes(mut dst, a, b, b.len)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ fn feistel(ll u32, rr u32, k0 u64, k1 u64) (u32, u32) {
|
||||||
return l, r
|
return l, r
|
||||||
}
|
}
|
||||||
|
|
||||||
fn crypt_block(subkeys []u64, mut dst []byte, src []byte, decrypt bool) {
|
fn crypt_block(subkeys []u64, mut dst []u8, src []u8, decrypt bool) {
|
||||||
mut b := binary.big_endian_u64(src)
|
mut b := binary.big_endian_u64(src)
|
||||||
b = permute_initial_block(b)
|
b = permute_initial_block(b)
|
||||||
|
|
||||||
|
@ -51,17 +51,17 @@ fn crypt_block(subkeys []u64, mut dst []byte, src []byte, decrypt bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt one block from src into dst, using the subkeys.
|
// Encrypt one block from src into dst, using the subkeys.
|
||||||
pub fn encrypt_block(subkeys []u64, mut dst []byte, src []byte) {
|
pub fn encrypt_block(subkeys []u64, mut dst []u8, src []u8) {
|
||||||
crypt_block(subkeys, mut dst, src, false)
|
crypt_block(subkeys, mut dst, src, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt one block from src into dst, using the subkeys.
|
// Decrypt one block from src into dst, using the subkeys.
|
||||||
fn decrypt_block(subkeys []u64, mut dst []byte, src []byte) {
|
fn decrypt_block(subkeys []u64, mut dst []u8, src []u8) {
|
||||||
crypt_block(subkeys, mut dst, src, true)
|
crypt_block(subkeys, mut dst, src, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// general purpose function to perform DES block permutations
|
// general purpose function to perform DES block permutations
|
||||||
fn permute_block(src u64, permutation []byte) u64 {
|
fn permute_block(src u64, permutation []u8) u64 {
|
||||||
mut block := u64(0)
|
mut block := u64(0)
|
||||||
for position, n in permutation {
|
for position, n in permutation {
|
||||||
bit := (src >> u64(u8(n))) & 1
|
bit := (src >> u64(u8(n))) & 1
|
||||||
|
|
|
@ -25,7 +25,7 @@ mut:
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCipher creates and returns a new cipher.Block.
|
// NewCipher creates and returns a new cipher.Block.
|
||||||
pub fn new_cipher(key []byte) cipher.Block {
|
pub fn new_cipher(key []u8) cipher.Block {
|
||||||
if key.len != 8 {
|
if key.len != 8 {
|
||||||
panic('crypto.aes: invalid key size')
|
panic('crypto.aes: invalid key size')
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ pub fn new_cipher(key []byte) cipher.Block {
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates 16 56-bit subkeys from the original key
|
// creates 16 56-bit subkeys from the original key
|
||||||
fn (mut c DesCipher) generate_subkeys(key_bytes []byte) {
|
fn (mut c DesCipher) generate_subkeys(key_bytes []u8) {
|
||||||
// feistel_box_once.do(initFeistel_box)
|
// feistel_box_once.do(initFeistel_box)
|
||||||
|
|
||||||
// apply PC1 permutation to key
|
// apply PC1 permutation to key
|
||||||
|
@ -56,7 +56,7 @@ fn (mut c DesCipher) generate_subkeys(key_bytes []byte) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (c &DesCipher) encrypt(mut dst []byte, src []byte) {
|
pub fn (c &DesCipher) encrypt(mut dst []u8, src []u8) {
|
||||||
if src.len < des.block_size {
|
if src.len < des.block_size {
|
||||||
panic('crypto/des: input not full block')
|
panic('crypto/des: input not full block')
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ pub fn (c &DesCipher) encrypt(mut dst []byte, src []byte) {
|
||||||
encrypt_block(c.subkeys[..], mut dst, src)
|
encrypt_block(c.subkeys[..], mut dst, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (c &DesCipher) decrypt(mut dst []byte, src []byte) {
|
pub fn (c &DesCipher) decrypt(mut dst []u8, src []u8) {
|
||||||
if src.len < des.block_size {
|
if src.len < des.block_size {
|
||||||
panic('crypto/des: input not full block')
|
panic('crypto/des: input not full block')
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ pub fn (c &DesCipher) decrypt(mut dst []byte, src []byte) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTripleDesCipher creates and returns a new cipher.Block.
|
// NewTripleDesCipher creates and returns a new cipher.Block.
|
||||||
pub fn new_triple_des_cipher(key []byte) cipher.Block {
|
pub fn new_triple_des_cipher(key []u8) cipher.Block {
|
||||||
if key.len != 24 {
|
if key.len != 24 {
|
||||||
panic('crypto.des: invalid key size')
|
panic('crypto.des: invalid key size')
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ pub fn new_triple_des_cipher(key []byte) cipher.Block {
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (c &TripleDesCipher) encrypt(mut dst []byte, src []byte) {
|
pub fn (c &TripleDesCipher) encrypt(mut dst []u8, src []u8) {
|
||||||
if src.len < des.block_size {
|
if src.len < des.block_size {
|
||||||
panic('crypto/des: input not full block')
|
panic('crypto/des: input not full block')
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ pub fn (c &TripleDesCipher) encrypt(mut dst []byte, src []byte) {
|
||||||
binary.big_endian_put_u64(mut dst, permute_final_block(pre_output))
|
binary.big_endian_put_u64(mut dst, permute_final_block(pre_output))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (c &TripleDesCipher) decrypt(mut dst []byte, src []byte) {
|
pub fn (c &TripleDesCipher) decrypt(mut dst []u8, src []u8) {
|
||||||
if src.len < des.block_size {
|
if src.len < des.block_size {
|
||||||
panic('crypto/des: input not full block')
|
panic('crypto/des: input not full block')
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,22 +29,22 @@ fn test_des() {
|
||||||
println('test_des ok')
|
println('test_des ok')
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_en(mut src []byte, key []byte, iv []byte) {
|
fn des_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
block.encrypt(mut src, src.clone())
|
block.encrypt(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn des_de(mut src []byte, key []byte, iv []byte) {
|
fn des_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_cipher(key)
|
block := des.new_cipher(key)
|
||||||
block.decrypt(mut src, src.clone())
|
block.decrypt(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_en(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_en(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
block.encrypt(mut src, src.clone())
|
block.encrypt(mut src, src.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn triple_des_de(mut src []byte, key []byte, iv []byte) {
|
fn triple_des_de(mut src []u8, key []u8, iv []u8) {
|
||||||
block := des.new_triple_des_cipher(key)
|
block := des.new_triple_des_cipher(key)
|
||||||
inbuf := src.clone()
|
inbuf := src.clone()
|
||||||
block.decrypt(mut src, inbuf)
|
block.decrypt(mut src, inbuf)
|
||||||
|
|
|
@ -18,39 +18,39 @@ pub const signature_size = 64
|
||||||
pub const seed_size = 32
|
pub const seed_size = 32
|
||||||
|
|
||||||
// `PublicKey` is Ed25519 public keys.
|
// `PublicKey` is Ed25519 public keys.
|
||||||
pub type PublicKey = []byte
|
pub type PublicKey = []u8
|
||||||
|
|
||||||
// equal reports whether p and x have the same value.
|
// equal reports whether p and x have the same value.
|
||||||
pub fn (p PublicKey) equal(x []byte) bool {
|
pub fn (p PublicKey) equal(x []u8) bool {
|
||||||
return subtle.constant_time_compare(p, PublicKey(x)) == 1
|
return subtle.constant_time_compare(p, PublicKey(x)) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrivateKey is Ed25519 private keys
|
// PrivateKey is Ed25519 private keys
|
||||||
pub type PrivateKey = []byte
|
pub type PrivateKey = []u8
|
||||||
|
|
||||||
// seed returns the private key seed corresponding to priv.
|
// seed returns the private key seed corresponding to priv.
|
||||||
// RFC 8032's private keys correspond to seeds in this module.
|
// RFC 8032's private keys correspond to seeds in this module.
|
||||||
pub fn (priv PrivateKey) seed() []byte {
|
pub fn (priv PrivateKey) seed() []u8 {
|
||||||
mut seed := []byte{len: ed25519.seed_size}
|
mut seed := []u8{len: ed25519.seed_size}
|
||||||
copy(mut seed, priv[..32])
|
copy(mut seed, priv[..32])
|
||||||
return seed
|
return seed
|
||||||
}
|
}
|
||||||
|
|
||||||
// public_key returns the []byte corresponding to priv.
|
// public_key returns the []u8 corresponding to priv.
|
||||||
pub fn (priv PrivateKey) public_key() PublicKey {
|
pub fn (priv PrivateKey) public_key() PublicKey {
|
||||||
assert priv.len == ed25519.private_key_size
|
assert priv.len == ed25519.private_key_size
|
||||||
mut publickey := []byte{len: ed25519.public_key_size}
|
mut publickey := []u8{len: ed25519.public_key_size}
|
||||||
copy(mut publickey, priv[32..])
|
copy(mut publickey, priv[32..])
|
||||||
return PublicKey(publickey)
|
return PublicKey(publickey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// currentyly x not `crypto.PrivateKey`
|
// currentyly x not `crypto.PrivateKey`
|
||||||
pub fn (priv PrivateKey) equal(x []byte) bool {
|
pub fn (priv PrivateKey) equal(x []u8) bool {
|
||||||
return subtle.constant_time_compare(priv, PrivateKey(x)) == 1
|
return subtle.constant_time_compare(priv, PrivateKey(x)) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// sign signs the given message with priv.
|
// sign signs the given message with priv.
|
||||||
pub fn (priv PrivateKey) sign(message []byte) ?[]byte {
|
pub fn (priv PrivateKey) sign(message []u8) ?[]u8 {
|
||||||
/*
|
/*
|
||||||
if opts.HashFunc() != crypto.Hash(0) {
|
if opts.HashFunc() != crypto.Hash(0) {
|
||||||
return nil, errors.New("ed25519: cannot sign hashed message")
|
return nil, errors.New("ed25519: cannot sign hashed message")
|
||||||
|
@ -60,13 +60,13 @@ pub fn (priv PrivateKey) sign(message []byte) ?[]byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sign`signs the message with privatekey and returns a signature
|
// sign`signs the message with privatekey and returns a signature
|
||||||
pub fn sign(privatekey PrivateKey, message []byte) ?[]byte {
|
pub fn sign(privatekey PrivateKey, message []u8) ?[]u8 {
|
||||||
mut signature := []byte{len: ed25519.signature_size}
|
mut signature := []u8{len: ed25519.signature_size}
|
||||||
sign_generic(mut signature, privatekey, message) ?
|
sign_generic(mut signature, privatekey, message) ?
|
||||||
return signature
|
return signature
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sign_generic(mut signature []byte, privatekey []byte, message []byte) ? {
|
fn sign_generic(mut signature []u8, privatekey []u8, message []u8) ? {
|
||||||
if privatekey.len != ed25519.private_key_size {
|
if privatekey.len != ed25519.private_key_size {
|
||||||
panic('ed25519: bad private key length: $privatekey.len')
|
panic('ed25519: bad private key length: $privatekey.len')
|
||||||
}
|
}
|
||||||
|
@ -81,7 +81,7 @@ fn sign_generic(mut signature []byte, privatekey []byte, message []byte) ? {
|
||||||
mh.write(prefix) ?
|
mh.write(prefix) ?
|
||||||
mh.write(message) ?
|
mh.write(message) ?
|
||||||
|
|
||||||
mut msg_digest := []byte{cap: sha512.size}
|
mut msg_digest := []u8{cap: sha512.size}
|
||||||
msg_digest = mh.sum(msg_digest)
|
msg_digest = mh.sum(msg_digest)
|
||||||
|
|
||||||
mut r := edwards25519.new_scalar()
|
mut r := edwards25519.new_scalar()
|
||||||
|
@ -95,7 +95,7 @@ fn sign_generic(mut signature []byte, privatekey []byte, message []byte) ? {
|
||||||
kh.write(publickey) ?
|
kh.write(publickey) ?
|
||||||
kh.write(message) ?
|
kh.write(message) ?
|
||||||
|
|
||||||
mut hram_digest := []byte{cap: sha512.size}
|
mut hram_digest := []u8{cap: sha512.size}
|
||||||
hram_digest = kh.sum(hram_digest)
|
hram_digest = kh.sum(hram_digest)
|
||||||
mut k := edwards25519.new_scalar()
|
mut k := edwards25519.new_scalar()
|
||||||
k.set_uniform_bytes(hram_digest) ?
|
k.set_uniform_bytes(hram_digest) ?
|
||||||
|
@ -108,7 +108,7 @@ fn sign_generic(mut signature []byte, privatekey []byte, message []byte) ? {
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify reports whether sig is a valid signature of message by publickey.
|
// verify reports whether sig is a valid signature of message by publickey.
|
||||||
pub fn verify(publickey PublicKey, message []byte, sig []byte) ?bool {
|
pub fn verify(publickey PublicKey, message []u8, sig []u8) ?bool {
|
||||||
if publickey.len != ed25519.public_key_size {
|
if publickey.len != ed25519.public_key_size {
|
||||||
return error('ed25519: bad public key length: $publickey.len')
|
return error('ed25519: bad public key length: $publickey.len')
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ pub fn verify(publickey PublicKey, message []byte, sig []byte) ?bool {
|
||||||
kh.write(publickey) ?
|
kh.write(publickey) ?
|
||||||
kh.write(message) ?
|
kh.write(message) ?
|
||||||
|
|
||||||
mut hram_digest := []byte{cap: sha512.size}
|
mut hram_digest := []u8{cap: sha512.size}
|
||||||
hram_digest = kh.sum(hram_digest)
|
hram_digest = kh.sum(hram_digest)
|
||||||
|
|
||||||
mut k := edwards25519.new_scalar()
|
mut k := edwards25519.new_scalar()
|
||||||
|
@ -148,7 +148,7 @@ pub fn generate_key() ?(PublicKey, PrivateKey) {
|
||||||
mut seed := rand.bytes(ed25519.seed_size) ?
|
mut seed := rand.bytes(ed25519.seed_size) ?
|
||||||
|
|
||||||
privatekey := new_key_from_seed(seed)
|
privatekey := new_key_from_seed(seed)
|
||||||
mut publickey := []byte{len: ed25519.public_key_size}
|
mut publickey := []u8{len: ed25519.public_key_size}
|
||||||
copy(mut publickey, privatekey[32..])
|
copy(mut publickey, privatekey[32..])
|
||||||
|
|
||||||
return publickey, privatekey
|
return publickey, privatekey
|
||||||
|
@ -156,14 +156,14 @@ pub fn generate_key() ?(PublicKey, PrivateKey) {
|
||||||
|
|
||||||
// new_key_from_seed calculates a private key from a seed. private keys of RFC 8032
|
// new_key_from_seed calculates a private key from a seed. private keys of RFC 8032
|
||||||
// correspond to seeds in this module
|
// correspond to seeds in this module
|
||||||
pub fn new_key_from_seed(seed []byte) PrivateKey {
|
pub fn new_key_from_seed(seed []u8) PrivateKey {
|
||||||
// Outline the function body so that the returned key can be stack-allocated.
|
// Outline the function body so that the returned key can be stack-allocated.
|
||||||
mut privatekey := []byte{len: ed25519.private_key_size}
|
mut privatekey := []u8{len: ed25519.private_key_size}
|
||||||
new_key_from_seed_generic(mut privatekey, seed)
|
new_key_from_seed_generic(mut privatekey, seed)
|
||||||
return PrivateKey(privatekey)
|
return PrivateKey(privatekey)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_key_from_seed_generic(mut privatekey []byte, seed []byte) {
|
fn new_key_from_seed_generic(mut privatekey []u8, seed []u8) {
|
||||||
if seed.len != ed25519.seed_size {
|
if seed.len != ed25519.seed_size {
|
||||||
panic('ed25519: bad seed length: $seed.len')
|
panic('ed25519: bad seed length: $seed.len')
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ const contents = os.read_lines(os.join_path(testdata, 'sign.input')) or { panic(
|
||||||
/*
|
/*
|
||||||
struct ZeroReader {}
|
struct ZeroReader {}
|
||||||
|
|
||||||
fn (z ZeroReader) read(mut buf []byte) ?int {
|
fn (z ZeroReader) read(mut buf []u8) ?int {
|
||||||
for i, _ in buf {
|
for i, _ in buf {
|
||||||
buf[i] = 0
|
buf[i] = 0
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ fn works_check_on_sign_input_string(item string) bool {
|
||||||
// assert pubkey.len == public_key_size
|
// assert pubkey.len == public_key_size
|
||||||
|
|
||||||
sig = sig[..ed25519.signature_size]
|
sig = sig[..ed25519.signature_size]
|
||||||
mut priv := []byte{len: ed25519.private_key_size}
|
mut priv := []u8{len: ed25519.private_key_size}
|
||||||
copy(mut priv[..], privbytes)
|
copy(mut priv[..], privbytes)
|
||||||
copy(mut priv[32..], pubkey)
|
copy(mut priv[32..], pubkey)
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ fn test_input_from_djb_ed25519_crypto_sign_input_without_syncpool() ? {
|
||||||
assert pubkey.len == public_key_size
|
assert pubkey.len == public_key_size
|
||||||
|
|
||||||
sig = sig[..signature_size]
|
sig = sig[..signature_size]
|
||||||
mut priv := []byte{len: ed25519.private_key_size}
|
mut priv := []u8{len: ed25519.private_key_size}
|
||||||
copy(mut priv[..], privbytes)
|
copy(mut priv[..], privbytes)
|
||||||
copy(mut priv[32..], pubkey)
|
copy(mut priv[32..], pubkey)
|
||||||
|
|
||||||
|
|
|
@ -624,7 +624,7 @@ pub fn (mut v Element) set(a Element) Element {
|
||||||
// Consistent with RFC 7748, the most significant bit (the high bit of the
|
// Consistent with RFC 7748, the most significant bit (the high bit of the
|
||||||
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
|
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
|
||||||
// are accepted. Note that this is laxer than specified by RFC 8032.
|
// are accepted. Note that this is laxer than specified by RFC 8032.
|
||||||
pub fn (mut v Element) set_bytes(x []byte) ?Element {
|
pub fn (mut v Element) set_bytes(x []u8) ?Element {
|
||||||
if x.len != 32 {
|
if x.len != 32 {
|
||||||
return error('edwards25519: invalid edwards25519 element input size')
|
return error('edwards25519: invalid edwards25519 element input size')
|
||||||
}
|
}
|
||||||
|
@ -650,19 +650,19 @@ pub fn (mut v Element) set_bytes(x []byte) ?Element {
|
||||||
}
|
}
|
||||||
|
|
||||||
// bytes returns the canonical 32-byte little-endian encoding of v.
|
// bytes returns the canonical 32-byte little-endian encoding of v.
|
||||||
pub fn (mut v Element) bytes() []byte {
|
pub fn (mut v Element) bytes() []u8 {
|
||||||
// This function is outlined to make the allocations inline in the caller
|
// This function is outlined to make the allocations inline in the caller
|
||||||
// rather than happen on the heap.
|
// rather than happen on the heap.
|
||||||
// out := v.bytes_generic()
|
// out := v.bytes_generic()
|
||||||
return v.bytes_generic()
|
return v.bytes_generic()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut v Element) bytes_generic() []byte {
|
fn (mut v Element) bytes_generic() []u8 {
|
||||||
mut out := []byte{len: 32}
|
mut out := []u8{len: 32}
|
||||||
|
|
||||||
v = v.reduce()
|
v = v.reduce()
|
||||||
|
|
||||||
mut buf := []byte{len: 8}
|
mut buf := []u8{len: 8}
|
||||||
idxs := [v.l0, v.l1, v.l2, v.l3, v.l4]
|
idxs := [v.l0, v.l1, v.l2, v.l3, v.l4]
|
||||||
for i, l in idxs {
|
for i, l in idxs {
|
||||||
bits_offset := i * 51
|
bits_offset := i * 51
|
||||||
|
@ -725,7 +725,7 @@ pub fn (mut v Element) mult_32(x Element, y u32) Element {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
fn swap_endianness(mut buf []byte) []byte {
|
fn swap_endianness(mut buf []u8) []u8 {
|
||||||
for i := 0; i < buf.len / 2; i++ {
|
for i := 0; i < buf.len / 2; i++ {
|
||||||
buf[i], buf[buf.len - i - 1] = buf[buf.len - i - 1], buf[i]
|
buf[i], buf[buf.len - i - 1] = buf[buf.len - i - 1], buf[i]
|
||||||
}
|
}
|
||||||
|
|
|
@ -230,7 +230,7 @@ fn test_set_bytes_reduced() {
|
||||||
struct FeRTTest {
|
struct FeRTTest {
|
||||||
mut:
|
mut:
|
||||||
fe Element
|
fe Element
|
||||||
b []byte
|
b []u8
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_set_bytes_from_dalek_test_vectors() ? {
|
fn test_set_bytes_from_dalek_test_vectors() ? {
|
||||||
|
@ -395,7 +395,7 @@ fn test_bytes_big_equivalence() ? {
|
||||||
|
|
||||||
assert fe == fe1
|
assert fe == fe1
|
||||||
|
|
||||||
mut buf := []byte{len: 32} // pad with zeroes
|
mut buf := []u8{len: 32} // pad with zeroes
|
||||||
fedtobig := fe1.to_big_integer()
|
fedtobig := fe1.to_big_integer()
|
||||||
mut fedbig_bytes, _ := fedtobig.bytes()
|
mut fedbig_bytes, _ := fedtobig.bytes()
|
||||||
copy(mut buf, fedbig_bytes) // does not need to do swap_endianness
|
copy(mut buf, fedbig_bytes) // does not need to do swap_endianness
|
||||||
|
|
|
@ -86,14 +86,14 @@ fn is_on_curve(x Element, y Element, z Element, t Element) bool {
|
||||||
// Note that bytes_montgomery only encodes the u-coordinate, so v and -v encode
|
// Note that bytes_montgomery only encodes the u-coordinate, so v and -v encode
|
||||||
// to the same value. If v is the identity point, bytes_montgomery returns 32
|
// to the same value. If v is the identity point, bytes_montgomery returns 32
|
||||||
// zero bytes, analogously to the X25519 function.
|
// zero bytes, analogously to the X25519 function.
|
||||||
pub fn (mut v Point) bytes_montgomery() []byte {
|
pub fn (mut v Point) bytes_montgomery() []u8 {
|
||||||
// This function is outlined to make the allocations inline in the caller
|
// This function is outlined to make the allocations inline in the caller
|
||||||
// rather than happen on the heap.
|
// rather than happen on the heap.
|
||||||
mut buf := [32]byte{}
|
mut buf := [32]byte{}
|
||||||
return v.bytes_montgomery_generic(mut buf)
|
return v.bytes_montgomery_generic(mut buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut v Point) bytes_montgomery_generic(mut buf [32]byte) []byte {
|
fn (mut v Point) bytes_montgomery_generic(mut buf [32]byte) []u8 {
|
||||||
check_initialized(v)
|
check_initialized(v)
|
||||||
|
|
||||||
// RFC 7748, Section 4.1 provides the bilinear map to calculate the
|
// RFC 7748, Section 4.1 provides the bilinear map to calculate the
|
||||||
|
|
|
@ -70,7 +70,7 @@ const (
|
||||||
loworder_bytes = hex.decode(loworder_string) or { panic(err) }
|
loworder_bytes = hex.decode(loworder_string) or { panic(err) }
|
||||||
)
|
)
|
||||||
|
|
||||||
fn fn_cofactor(mut data []byte) bool {
|
fn fn_cofactor(mut data []u8) bool {
|
||||||
if data.len != 64 {
|
if data.len != 64 {
|
||||||
panic('data.len should be 64')
|
panic('data.len should be 64')
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ fn (mut v ProjectiveP2) zero() ProjectiveP2 {
|
||||||
// Note that set_bytes accepts all non-canonical encodings of valid points.
|
// Note that set_bytes accepts all non-canonical encodings of valid points.
|
||||||
// That is, it follows decoding rules that match most implementations in
|
// That is, it follows decoding rules that match most implementations in
|
||||||
// the ecosystem rather than RFC 8032.
|
// the ecosystem rather than RFC 8032.
|
||||||
pub fn (mut v Point) set_bytes(x []byte) ?Point {
|
pub fn (mut v Point) set_bytes(x []u8) ?Point {
|
||||||
// Specifically, the non-canonical encodings that are accepted are
|
// Specifically, the non-canonical encodings that are accepted are
|
||||||
// 1) the ones where the edwards25519 element is not reduced (see the
|
// 1) the ones where the edwards25519 element is not reduced (see the
|
||||||
// (*edwards25519.Element).set_bytes docs) and
|
// (*edwards25519.Element).set_bytes docs) and
|
||||||
|
@ -201,14 +201,14 @@ fn (mut v AffineCached) zero() AffineCached {
|
||||||
|
|
||||||
// bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
|
// bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
|
||||||
// Section 5.1.2.
|
// Section 5.1.2.
|
||||||
pub fn (mut v Point) bytes() []byte {
|
pub fn (mut v Point) bytes() []u8 {
|
||||||
// This function is outlined to make the allocations inline in the caller
|
// This function is outlined to make the allocations inline in the caller
|
||||||
// rather than happen on the heap.
|
// rather than happen on the heap.
|
||||||
mut buf := [32]byte{}
|
mut buf := [32]byte{}
|
||||||
return v.bytes_generic(mut buf)
|
return v.bytes_generic(mut buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut v Point) bytes_generic(mut buf [32]byte) []byte {
|
fn (mut v Point) bytes_generic(mut buf [32]byte) []u8 {
|
||||||
check_initialized(v)
|
check_initialized(v)
|
||||||
|
|
||||||
mut zinv := Element{}
|
mut zinv := Element{}
|
||||||
|
@ -226,7 +226,7 @@ fn (mut v Point) bytes_generic(mut buf [32]byte) []byte {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
fn copy_field_element(mut buf [32]byte, mut v Element) []byte {
|
fn copy_field_element(mut buf [32]byte, mut v Element) []u8 {
|
||||||
// this fail in test
|
// this fail in test
|
||||||
/*
|
/*
|
||||||
copy(mut buf[..], v.bytes())
|
copy(mut buf[..], v.bytes())
|
||||||
|
@ -234,7 +234,7 @@ fn copy_field_element(mut buf [32]byte, mut v Element) []byte {
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// this pass the test
|
// this pass the test
|
||||||
mut out := []byte{len: 32}
|
mut out := []u8{len: 32}
|
||||||
for i := 0; i <= buf.len - 1; i++ {
|
for i := 0; i <= buf.len - 1; i++ {
|
||||||
out[i] = v.bytes()[i]
|
out[i] = v.bytes()[i]
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,11 +86,11 @@ pub fn (mut s Scalar) set(x Scalar) Scalar {
|
||||||
// set_uniform_bytes sets s to an uniformly distributed value given 64 uniformly
|
// set_uniform_bytes sets s to an uniformly distributed value given 64 uniformly
|
||||||
// distributed random bytes. If x is not of the right length, set_uniform_bytes
|
// distributed random bytes. If x is not of the right length, set_uniform_bytes
|
||||||
// returns an error, and the receiver is unchanged.
|
// returns an error, and the receiver is unchanged.
|
||||||
pub fn (mut s Scalar) set_uniform_bytes(x []byte) ?Scalar {
|
pub fn (mut s Scalar) set_uniform_bytes(x []u8) ?Scalar {
|
||||||
if x.len != 64 {
|
if x.len != 64 {
|
||||||
return error('edwards25519: invalid set_uniform_bytes input length')
|
return error('edwards25519: invalid set_uniform_bytes input length')
|
||||||
}
|
}
|
||||||
mut wide_bytes := []byte{len: 64}
|
mut wide_bytes := []u8{len: 64}
|
||||||
copy(mut wide_bytes, x)
|
copy(mut wide_bytes, x)
|
||||||
// for i, item in x {
|
// for i, item in x {
|
||||||
// wide_bytes[i] = item
|
// wide_bytes[i] = item
|
||||||
|
@ -102,11 +102,11 @@ pub fn (mut s Scalar) set_uniform_bytes(x []byte) ?Scalar {
|
||||||
// set_canonical_bytes sets s = x, where x is a 32-byte little-endian encoding of
|
// set_canonical_bytes sets s = x, where x is a 32-byte little-endian encoding of
|
||||||
// s, and returns s. If x is not a canonical encoding of s, set_canonical_bytes
|
// s, and returns s. If x is not a canonical encoding of s, set_canonical_bytes
|
||||||
// returns an error, and the receiver is unchanged.
|
// returns an error, and the receiver is unchanged.
|
||||||
pub fn (mut s Scalar) set_canonical_bytes(x []byte) ?Scalar {
|
pub fn (mut s Scalar) set_canonical_bytes(x []u8) ?Scalar {
|
||||||
if x.len != 32 {
|
if x.len != 32 {
|
||||||
return error('invalid scalar length')
|
return error('invalid scalar length')
|
||||||
}
|
}
|
||||||
// mut bb := []byte{len:32}
|
// mut bb := []u8{len:32}
|
||||||
mut ss := Scalar{}
|
mut ss := Scalar{}
|
||||||
for i, item in x {
|
for i, item in x {
|
||||||
ss.s[i] = item
|
ss.s[i] = item
|
||||||
|
@ -152,7 +152,7 @@ fn is_reduced(s Scalar) bool {
|
||||||
// expected as long as it is applied to points on the prime order subgroup, like
|
// expected as long as it is applied to points on the prime order subgroup, like
|
||||||
// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
|
// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
|
||||||
// irrelevant RFC 7748 clamping, but it is now required for compatibility.
|
// irrelevant RFC 7748 clamping, but it is now required for compatibility.
|
||||||
pub fn (mut s Scalar) set_bytes_with_clamping(x []byte) ?Scalar {
|
pub fn (mut s Scalar) set_bytes_with_clamping(x []u8) ?Scalar {
|
||||||
// The description above omits the purpose of the high bits of the clamping
|
// The description above omits the purpose of the high bits of the clamping
|
||||||
// for brevity, but those are also lost to reductions, and are also
|
// for brevity, but those are also lost to reductions, and are also
|
||||||
// irrelevant to edwards25519 as they protect against a specific
|
// irrelevant to edwards25519 as they protect against a specific
|
||||||
|
@ -161,7 +161,7 @@ pub fn (mut s Scalar) set_bytes_with_clamping(x []byte) ?Scalar {
|
||||||
return error('edwards25519: invalid set_bytes_with_clamping input length')
|
return error('edwards25519: invalid set_bytes_with_clamping input length')
|
||||||
}
|
}
|
||||||
|
|
||||||
mut wide_bytes := []byte{len: 64, cap: 64}
|
mut wide_bytes := []u8{len: 64, cap: 64}
|
||||||
copy(mut wide_bytes, x)
|
copy(mut wide_bytes, x)
|
||||||
// for i, item in x {
|
// for i, item in x {
|
||||||
// wide_bytes[i] = item
|
// wide_bytes[i] = item
|
||||||
|
@ -174,8 +174,8 @@ pub fn (mut s Scalar) set_bytes_with_clamping(x []byte) ?Scalar {
|
||||||
}
|
}
|
||||||
|
|
||||||
// bytes returns the canonical 32-byte little-endian encoding of s.
|
// bytes returns the canonical 32-byte little-endian encoding of s.
|
||||||
pub fn (mut s Scalar) bytes() []byte {
|
pub fn (mut s Scalar) bytes() []u8 {
|
||||||
mut buf := []byte{len: 32}
|
mut buf := []u8{len: 32}
|
||||||
copy(mut buf, s.s[..])
|
copy(mut buf, s.s[..])
|
||||||
return buf
|
return buf
|
||||||
}
|
}
|
||||||
|
@ -187,14 +187,14 @@ pub fn (s Scalar) equal(t Scalar) int {
|
||||||
|
|
||||||
// sc_mul_add and sc_reduce are ported from the public domain, “ref10”
|
// sc_mul_add and sc_reduce are ported from the public domain, “ref10”
|
||||||
// implementation of ed25519 from SUPERCOP.
|
// implementation of ed25519 from SUPERCOP.
|
||||||
fn load3(inp []byte) i64 {
|
fn load3(inp []u8) i64 {
|
||||||
mut r := i64(inp[0])
|
mut r := i64(inp[0])
|
||||||
r |= i64(inp[1]) * 256 // << 8
|
r |= i64(inp[1]) * 256 // << 8
|
||||||
r |= i64(inp[2]) * 65536 // << 16
|
r |= i64(inp[2]) * 65536 // << 16
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load4(inp []byte) i64 {
|
fn load4(inp []u8) i64 {
|
||||||
mut r := i64(inp[0])
|
mut r := i64(inp[0])
|
||||||
r |= i64(inp[1]) * 256
|
r |= i64(inp[1]) * 256
|
||||||
r |= i64(inp[2]) * 65536
|
r |= i64(inp[2]) * 65536
|
||||||
|
@ -653,7 +653,7 @@ fn sc_mul_add(mut s [32]byte, a [32]byte, b [32]byte, c [32]byte) {
|
||||||
// Output:
|
// Output:
|
||||||
// s[0]+256*s[1]+...+256^31*s[31] = s mod l
|
// s[0]+256*s[1]+...+256^31*s[31] = s mod l
|
||||||
// where l = 2^252 + 27742317777372353535851937790883648493.
|
// where l = 2^252 + 27742317777372353535851937790883648493.
|
||||||
fn sc_reduce(mut out [32]byte, mut s []byte) {
|
fn sc_reduce(mut out [32]byte, mut s []u8) {
|
||||||
assert out.len == 32
|
assert out.len == 32
|
||||||
assert s.len == 64
|
assert s.len == 64
|
||||||
mut s0 := 2097151 & load3(s[..])
|
mut s0 := 2097151 & load3(s[..])
|
||||||
|
|
|
@ -134,7 +134,7 @@ fn test_scalar_set_uniform_bytes() ? {
|
||||||
assert m.abs_cmp(scbig) == 0 // NEED FIX
|
assert m.abs_cmp(scbig) == 0 // NEED FIX
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bigint_from_le_bytes(b []byte) big.Integer {
|
fn bigint_from_le_bytes(b []u8) big.Integer {
|
||||||
mut bc := b.clone()
|
mut bc := b.clone()
|
||||||
buf := swap_endianness(mut bc) // WITHOUT THIS, some test would fail
|
buf := swap_endianness(mut bc) // WITHOUT THIS, some test would fail
|
||||||
bg := big.integer_from_bytes(buf)
|
bg := big.integer_from_bytes(buf)
|
||||||
|
|
|
@ -5,14 +5,14 @@ module hmac
|
||||||
import crypto.internal.subtle
|
import crypto.internal.subtle
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ipad = []byte{len: 256, init: 0x36} // TODO is 256 enough??
|
ipad = []u8{len: 256, init: 0x36} // TODO is 256 enough??
|
||||||
opad = []byte{len: 256, init: 0x5C}
|
opad = []u8{len: 256, init: 0x5C}
|
||||||
npad = []byte{len: 256, init: 0}
|
npad = []u8{len: 256, init: 0}
|
||||||
)
|
)
|
||||||
|
|
||||||
// new returns a HMAC byte array, depending on the hash algorithm used.
|
// new returns a HMAC byte array, depending on the hash algorithm used.
|
||||||
pub fn new(key []byte, data []byte, hash_func fn ([]byte) []byte, blocksize int) []byte {
|
pub fn new(key []u8, data []u8, hash_func fn ([]u8) []u8, blocksize int) []u8 {
|
||||||
mut b_key := []byte{}
|
mut b_key := []u8{}
|
||||||
if key.len <= blocksize {
|
if key.len <= blocksize {
|
||||||
b_key = key.clone() // TODO: remove .clone() once https://github.com/vlang/v/issues/6604 gets fixed
|
b_key = key.clone() // TODO: remove .clone() once https://github.com/vlang/v/issues/6604 gets fixed
|
||||||
} else {
|
} else {
|
||||||
|
@ -21,13 +21,13 @@ pub fn new(key []byte, data []byte, hash_func fn ([]byte) []byte, blocksize int)
|
||||||
if b_key.len < blocksize {
|
if b_key.len < blocksize {
|
||||||
b_key << hmac.npad[..blocksize - b_key.len]
|
b_key << hmac.npad[..blocksize - b_key.len]
|
||||||
}
|
}
|
||||||
mut inner := []byte{}
|
mut inner := []u8{}
|
||||||
for i, b in hmac.ipad[..blocksize] {
|
for i, b in hmac.ipad[..blocksize] {
|
||||||
inner << b_key[i] ^ b
|
inner << b_key[i] ^ b
|
||||||
}
|
}
|
||||||
inner << data
|
inner << data
|
||||||
inner_hash := hash_func(inner)
|
inner_hash := hash_func(inner)
|
||||||
mut outer := []byte{cap: b_key.len}
|
mut outer := []u8{cap: b_key.len}
|
||||||
for i, b in hmac.opad[..blocksize] {
|
for i, b in hmac.opad[..blocksize] {
|
||||||
outer << b_key[i] ^ b
|
outer << b_key[i] ^ b
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,6 @@ pub fn new(key []byte, data []byte, hash_func fn ([]byte) []byte, blocksize int)
|
||||||
// equal compares 2 MACs for equality, without leaking timing info.
|
// equal compares 2 MACs for equality, without leaking timing info.
|
||||||
// Note: if the lengths of the 2 MACs are different, probably a completely different
|
// Note: if the lengths of the 2 MACs are different, probably a completely different
|
||||||
// hash function was used to generate them => no useful timing information.
|
// hash function was used to generate them => no useful timing information.
|
||||||
pub fn equal(mac1 []byte, mac2 []byte) bool {
|
pub fn equal(mac1 []u8, mac2 []u8) bool {
|
||||||
return subtle.constant_time_compare(mac1, mac2) == 1
|
return subtle.constant_time_compare(mac1, mac2) == 1
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@ module subtle
|
||||||
// NOTE: require unsafe in future
|
// NOTE: require unsafe in future
|
||||||
// any_overlap reports whether x and y share memory at any (not necessarily
|
// any_overlap reports whether x and y share memory at any (not necessarily
|
||||||
// corresponding) index. The memory beyond the slice length is ignored.
|
// corresponding) index. The memory beyond the slice length is ignored.
|
||||||
pub fn any_overlap(x []byte, y []byte) bool {
|
pub fn any_overlap(x []u8, y []u8) bool {
|
||||||
// NOTE: Remember to come back to this (joe-c)
|
// NOTE: Remember to come back to this (joe-c)
|
||||||
return x.len > 0 && y.len > 0 && // &x.data[0] <= &y.data[y.len-1] &&
|
return x.len > 0 && y.len > 0 && // &x.data[0] <= &y.data[y.len-1] &&
|
||||||
// &y.data[0] <= &x.data[x.len-1]
|
// &y.data[0] <= &x.data[x.len-1]
|
||||||
|
@ -21,7 +21,7 @@ pub fn any_overlap(x []byte, y []byte) bool {
|
||||||
//
|
//
|
||||||
// inexact_overlap can be used to implement the requirements of the crypto/cipher
|
// inexact_overlap can be used to implement the requirements of the crypto/cipher
|
||||||
// AEAD, Block, BlockMode and Stream interfaces.
|
// AEAD, Block, BlockMode and Stream interfaces.
|
||||||
pub fn inexact_overlap(x []byte, y []byte) bool {
|
pub fn inexact_overlap(x []u8, y []u8) bool {
|
||||||
if x.len == 0 || y.len == 0 || unsafe { &x[0] == &y[0] } {
|
if x.len == 0 || y.len == 0 || unsafe { &x[0] == &y[0] } {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ pub fn constant_time_select(v int, x int, y int) int {
|
||||||
// constant_time_compare returns 1 when x and y have equal contents.
|
// constant_time_compare returns 1 when x and y have equal contents.
|
||||||
// The runtime of this function is proportional of the length of x and y.
|
// The runtime of this function is proportional of the length of x and y.
|
||||||
// It is *NOT* dependent on their content.
|
// It is *NOT* dependent on their content.
|
||||||
pub fn constant_time_compare(x []byte, y []byte) int {
|
pub fn constant_time_compare(x []u8, y []u8) int {
|
||||||
if x.len != y.len {
|
if x.len != y.len {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ pub fn constant_time_compare(x []byte, y []byte) int {
|
||||||
// constant_time_copy copies the contents of y into x, when v == 1.
|
// constant_time_copy copies the contents of y into x, when v == 1.
|
||||||
// When v == 0, x is left unchanged. this function is undefined, when
|
// When v == 0, x is left unchanged. this function is undefined, when
|
||||||
// v takes any other value
|
// v takes any other value
|
||||||
pub fn constant_time_copy(v int, mut x []byte, y []byte) {
|
pub fn constant_time_copy(v int, mut x []u8, y []u8) {
|
||||||
if x.len != y.len {
|
if x.len != y.len {
|
||||||
panic('subtle: arrays have different lengths')
|
panic('subtle: arrays have different lengths')
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,14 +28,14 @@ const (
|
||||||
struct Digest {
|
struct Digest {
|
||||||
mut:
|
mut:
|
||||||
s []u32
|
s []u32
|
||||||
x []byte
|
x []u8
|
||||||
nx int
|
nx int
|
||||||
len u64
|
len u64
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut d Digest) reset() {
|
fn (mut d Digest) reset() {
|
||||||
d.s = []u32{len: (4)}
|
d.s = []u32{len: (4)}
|
||||||
d.x = []byte{len: md5.block_size}
|
d.x = []u8{len: md5.block_size}
|
||||||
d.s[0] = u32(md5.init0)
|
d.s[0] = u32(md5.init0)
|
||||||
d.s[1] = u32(md5.init1)
|
d.s[1] = u32(md5.init1)
|
||||||
d.s[2] = u32(md5.init2)
|
d.s[2] = u32(md5.init2)
|
||||||
|
@ -52,7 +52,7 @@ pub fn new() &Digest {
|
||||||
}
|
}
|
||||||
|
|
||||||
// write writes the contents of `p_` to the internal hash representation.
|
// write writes the contents of `p_` to the internal hash representation.
|
||||||
pub fn (mut d Digest) write(p_ []byte) ?int {
|
pub fn (mut d Digest) write(p_ []u8) ?int {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut p := p_
|
mut p := p_
|
||||||
nn := p.len
|
nn := p.len
|
||||||
|
@ -87,7 +87,7 @@ pub fn (mut d Digest) write(p_ []byte) ?int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum returns the md5 sum of the bytes in `b_in`.
|
// sum returns the md5 sum of the bytes in `b_in`.
|
||||||
pub fn (d &Digest) sum(b_in []byte) []byte {
|
pub fn (d &Digest) sum(b_in []u8) []u8 {
|
||||||
// Make a copy of d so that caller can keep writing and summing.
|
// Make a copy of d so that caller can keep writing and summing.
|
||||||
mut d0 := *d
|
mut d0 := *d
|
||||||
hash := d0.checksum()
|
hash := d0.checksum()
|
||||||
|
@ -99,14 +99,14 @@ pub fn (d &Digest) sum(b_in []byte) []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// checksum returns the byte checksum of the `Digest`.
|
// checksum returns the byte checksum of the `Digest`.
|
||||||
pub fn (mut d Digest) checksum() []byte {
|
pub fn (mut d Digest) checksum() []u8 {
|
||||||
// Append 0x80 to the end of the message and then append zeros
|
// Append 0x80 to the end of the message and then append zeros
|
||||||
// until the length is a multiple of 56 bytes. Finally append
|
// until the length is a multiple of 56 bytes. Finally append
|
||||||
// 8 bytes representing the message length in bits.
|
// 8 bytes representing the message length in bits.
|
||||||
//
|
//
|
||||||
// 1 byte end marker :: 0-63 padding bytes :: 8 byte length
|
// 1 byte end marker :: 0-63 padding bytes :: 8 byte length
|
||||||
// tmp := [1 + 63 + 8]byte{0x80}
|
// tmp := [1 + 63 + 8]byte{0x80}
|
||||||
mut tmp := []byte{len: (1 + 63 + 8)}
|
mut tmp := []u8{len: (1 + 63 + 8)}
|
||||||
tmp[0] = 0x80
|
tmp[0] = 0x80
|
||||||
pad := ((55 - d.len) % 64) // calculate number of padding bytes
|
pad := ((55 - d.len) % 64) // calculate number of padding bytes
|
||||||
binary.little_endian_put_u64(mut tmp[1 + pad..], d.len << 3) // append length in bits
|
binary.little_endian_put_u64(mut tmp[1 + pad..], d.len << 3) // append length in bits
|
||||||
|
@ -116,7 +116,7 @@ pub fn (mut d Digest) checksum() []byte {
|
||||||
if d.nx != 0 {
|
if d.nx != 0 {
|
||||||
panic('d.nx != 0')
|
panic('d.nx != 0')
|
||||||
}
|
}
|
||||||
mut digest := []byte{len: md5.size}
|
mut digest := []u8{len: md5.size}
|
||||||
binary.little_endian_put_u32(mut digest, d.s[0])
|
binary.little_endian_put_u32(mut digest, d.s[0])
|
||||||
binary.little_endian_put_u32(mut digest[4..], d.s[1])
|
binary.little_endian_put_u32(mut digest[4..], d.s[1])
|
||||||
binary.little_endian_put_u32(mut digest[8..], d.s[2])
|
binary.little_endian_put_u32(mut digest[8..], d.s[2])
|
||||||
|
@ -125,13 +125,13 @@ pub fn (mut d Digest) checksum() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum returns the MD5 checksum of the data.
|
// sum returns the MD5 checksum of the data.
|
||||||
pub fn sum(data []byte) []byte {
|
pub fn sum(data []u8) []u8 {
|
||||||
mut d := new()
|
mut d := new()
|
||||||
d.write(data) or { panic(err) }
|
d.write(data) or { panic(err) }
|
||||||
return d.checksum()
|
return d.checksum()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block(mut dig Digest, p []byte) {
|
fn block(mut dig Digest, p []u8) {
|
||||||
// For now just use block_generic until we have specific
|
// For now just use block_generic until we have specific
|
||||||
// architecture optimized versions
|
// architecture optimized versions
|
||||||
block_generic(mut dig, p)
|
block_generic(mut dig, p)
|
||||||
|
|
|
@ -11,7 +11,7 @@ module md5
|
||||||
import math.bits
|
import math.bits
|
||||||
import encoding.binary
|
import encoding.binary
|
||||||
|
|
||||||
fn block_generic(mut dig Digest, p []byte) {
|
fn block_generic(mut dig Digest, p []u8) {
|
||||||
// load state
|
// load state
|
||||||
mut a := dig.s[0]
|
mut a := dig.s[0]
|
||||||
mut b := dig.s[1]
|
mut b := dig.s[1]
|
||||||
|
|
|
@ -18,6 +18,6 @@ pub fn (err ReadError) msg() string {
|
||||||
// See also rand.bytes(), if you do not need really random bytes,
|
// See also rand.bytes(), if you do not need really random bytes,
|
||||||
// but instead pseudo random ones, from a pseudo random generator
|
// but instead pseudo random ones, from a pseudo random generator
|
||||||
// that can be seeded, and that is usually faster.
|
// that can be seeded, and that is usually faster.
|
||||||
pub fn bytes(bytes_needed int) ?[]byte {
|
pub fn bytes(bytes_needed int) ?[]u8 {
|
||||||
return read(bytes_needed)
|
return read(bytes_needed)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,8 @@ module rand
|
||||||
fn C.SecRandomCopyBytes(rnd C.SecRandomRef, count usize, bytes voidptr) int
|
fn C.SecRandomCopyBytes(rnd C.SecRandomRef, count usize, bytes voidptr) int
|
||||||
|
|
||||||
// read returns an array of `bytes_needed` random bytes read from the OS.
|
// read returns an array of `bytes_needed` random bytes read from the OS.
|
||||||
pub fn read(bytes_needed int) ?[]byte {
|
pub fn read(bytes_needed int) ?[]u8 {
|
||||||
mut buffer := []byte{len: bytes_needed}
|
mut buffer := []u8{len: bytes_needed}
|
||||||
status := C.SecRandomCopyBytes(C.SecRandomRef(0), bytes_needed, buffer.data)
|
status := C.SecRandomCopyBytes(C.SecRandomRef(0), bytes_needed, buffer.data)
|
||||||
if status != 0 {
|
if status != 0 {
|
||||||
return IError(&ReadError{})
|
return IError(&ReadError{})
|
||||||
|
|
|
@ -4,6 +4,6 @@
|
||||||
module rand
|
module rand
|
||||||
|
|
||||||
// read returns an array of `bytes_needed` random bytes read from the OS.
|
// read returns an array of `bytes_needed` random bytes read from the OS.
|
||||||
pub fn read(bytes_needed int) ?[]byte {
|
pub fn read(bytes_needed int) ?[]u8 {
|
||||||
return error('rand.read is not implemented on this platform')
|
return error('rand.read is not implemented on this platform')
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// read returns an array of `bytes_needed` random bytes read from the OS.
|
// read returns an array of `bytes_needed` random bytes read from the OS.
|
||||||
pub fn read(bytes_needed int) ?[]byte {
|
pub fn read(bytes_needed int) ?[]u8 {
|
||||||
mut buffer := unsafe { vcalloc_noscan(bytes_needed) }
|
mut buffer := unsafe { vcalloc_noscan(bytes_needed) }
|
||||||
mut bytes_read := 0
|
mut bytes_read := 0
|
||||||
mut remaining_bytes := bytes_needed
|
mut remaining_bytes := bytes_needed
|
||||||
|
|
|
@ -13,7 +13,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// read returns an array of `bytes_needed` random bytes read from the OS.
|
// read returns an array of `bytes_needed` random bytes read from the OS.
|
||||||
pub fn read(bytes_needed int) ?[]byte {
|
pub fn read(bytes_needed int) ?[]u8 {
|
||||||
mut buffer := unsafe { malloc_noscan(bytes_needed) }
|
mut buffer := unsafe { malloc_noscan(bytes_needed) }
|
||||||
mut bytes_read := 0
|
mut bytes_read := 0
|
||||||
mut remaining_bytes := bytes_needed
|
mut remaining_bytes := bytes_needed
|
||||||
|
|
|
@ -14,8 +14,8 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// read returns an array of `bytes_needed` random bytes read from the OS.
|
// read returns an array of `bytes_needed` random bytes read from the OS.
|
||||||
pub fn read(bytes_needed int) ?[]byte {
|
pub fn read(bytes_needed int) ?[]u8 {
|
||||||
mut buffer := []byte{len: bytes_needed}
|
mut buffer := []u8{len: bytes_needed}
|
||||||
// use bcrypt_use_system_preferred_rng because we passed null as algo
|
// use bcrypt_use_system_preferred_rng because we passed null as algo
|
||||||
status := C.BCryptGenRandom(0, buffer.data, bytes_needed, rand.bcrypt_use_system_preferred_rng)
|
status := C.BCryptGenRandom(0, buffer.data, bytes_needed, rand.bcrypt_use_system_preferred_rng)
|
||||||
if status != rand.status_success {
|
if status != rand.status_success {
|
||||||
|
|
|
@ -35,7 +35,7 @@ pub fn int_u64(max u64) ?u64 {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bytes_to_u64(b []byte) []u64 {
|
fn bytes_to_u64(b []u8) []u64 {
|
||||||
ws := 64 / 8
|
ws := 64 / 8
|
||||||
mut z := []u64{len: ((b.len + ws - 1) / ws)}
|
mut z := []u64{len: ((b.len + ws - 1) / ws)}
|
||||||
mut i := b.len
|
mut i := b.len
|
||||||
|
|
|
@ -22,7 +22,7 @@ mut:
|
||||||
|
|
||||||
// new_cipher creates and returns a new Cipher. The key argument should be the
|
// new_cipher creates and returns a new Cipher. The key argument should be the
|
||||||
// RC4 key, at least 1 byte and at most 256 bytes.
|
// RC4 key, at least 1 byte and at most 256 bytes.
|
||||||
pub fn new_cipher(key []byte) ?Cipher {
|
pub fn new_cipher(key []u8) ?Cipher {
|
||||||
if key.len < 1 || key.len > 256 {
|
if key.len < 1 || key.len > 256 {
|
||||||
return error('crypto.rc4: invalid key size ' + key.len.str())
|
return error('crypto.rc4: invalid key size ' + key.len.str())
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ pub fn (mut c Cipher) reset() {
|
||||||
|
|
||||||
// xor_key_stream sets dst to the result of XORing src with the key stream.
|
// xor_key_stream sets dst to the result of XORing src with the key stream.
|
||||||
// Dst and src must overlap entirely or not at all.
|
// Dst and src must overlap entirely or not at all.
|
||||||
pub fn (mut c Cipher) xor_key_stream(mut dst []byte, mut src []byte) {
|
pub fn (mut c Cipher) xor_key_stream(mut dst []u8, mut src []u8) {
|
||||||
if src.len == 0 {
|
if src.len == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,13 +30,13 @@ const (
|
||||||
struct Digest {
|
struct Digest {
|
||||||
mut:
|
mut:
|
||||||
h []u32
|
h []u32
|
||||||
x []byte
|
x []u8
|
||||||
nx int
|
nx int
|
||||||
len u64
|
len u64
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut d Digest) reset() {
|
fn (mut d Digest) reset() {
|
||||||
d.x = []byte{len: sha1.chunk}
|
d.x = []u8{len: sha1.chunk}
|
||||||
d.h = []u32{len: (5)}
|
d.h = []u32{len: (5)}
|
||||||
d.h[0] = u32(sha1.init0)
|
d.h[0] = u32(sha1.init0)
|
||||||
d.h[1] = u32(sha1.init1)
|
d.h[1] = u32(sha1.init1)
|
||||||
|
@ -56,7 +56,7 @@ pub fn new() &Digest {
|
||||||
|
|
||||||
// write writes the contents of `p_` to the internal hash representation.
|
// write writes the contents of `p_` to the internal hash representation.
|
||||||
[manualfree]
|
[manualfree]
|
||||||
pub fn (mut d Digest) write(p_ []byte) ?int {
|
pub fn (mut d Digest) write(p_ []u8) ?int {
|
||||||
nn := p_.len
|
nn := p_.len
|
||||||
unsafe {
|
unsafe {
|
||||||
mut p := p_
|
mut p := p_
|
||||||
|
@ -91,7 +91,7 @@ pub fn (mut d Digest) write(p_ []byte) ?int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum returns a copy of the generated sum of the bytes in `b_in`.
|
// sum returns a copy of the generated sum of the bytes in `b_in`.
|
||||||
pub fn (d &Digest) sum(b_in []byte) []byte {
|
pub fn (d &Digest) sum(b_in []u8) []u8 {
|
||||||
// Make a copy of d so that caller can keep writing and summing.
|
// Make a copy of d so that caller can keep writing and summing.
|
||||||
mut d0 := *d
|
mut d0 := *d
|
||||||
hash := d0.checksum()
|
hash := d0.checksum()
|
||||||
|
@ -103,10 +103,10 @@ pub fn (d &Digest) sum(b_in []byte) []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// checksum returns the current byte checksum of the `Digest`.
|
// checksum returns the current byte checksum of the `Digest`.
|
||||||
pub fn (mut d Digest) checksum() []byte {
|
pub fn (mut d Digest) checksum() []u8 {
|
||||||
mut len := d.len
|
mut len := d.len
|
||||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||||
mut tmp := []byte{len: (64)}
|
mut tmp := []u8{len: (64)}
|
||||||
tmp[0] = 0x80
|
tmp[0] = 0x80
|
||||||
if int(len) % 64 < 56 {
|
if int(len) % 64 < 56 {
|
||||||
d.write(tmp[..56 - int(len) % 64]) or { panic(err) }
|
d.write(tmp[..56 - int(len) % 64]) or { panic(err) }
|
||||||
|
@ -117,7 +117,7 @@ pub fn (mut d Digest) checksum() []byte {
|
||||||
len <<= 3
|
len <<= 3
|
||||||
binary.big_endian_put_u64(mut tmp, len)
|
binary.big_endian_put_u64(mut tmp, len)
|
||||||
d.write(tmp[..8]) or { panic(err) }
|
d.write(tmp[..8]) or { panic(err) }
|
||||||
mut digest := []byte{len: sha1.size}
|
mut digest := []u8{len: sha1.size}
|
||||||
binary.big_endian_put_u32(mut digest, d.h[0])
|
binary.big_endian_put_u32(mut digest, d.h[0])
|
||||||
binary.big_endian_put_u32(mut digest[4..], d.h[1])
|
binary.big_endian_put_u32(mut digest[4..], d.h[1])
|
||||||
binary.big_endian_put_u32(mut digest[8..], d.h[2])
|
binary.big_endian_put_u32(mut digest[8..], d.h[2])
|
||||||
|
@ -127,13 +127,13 @@ pub fn (mut d Digest) checksum() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum returns the SHA-1 checksum of the bytes passed in `data`.
|
// sum returns the SHA-1 checksum of the bytes passed in `data`.
|
||||||
pub fn sum(data []byte) []byte {
|
pub fn sum(data []u8) []u8 {
|
||||||
mut d := new()
|
mut d := new()
|
||||||
d.write(data) or { panic(err) }
|
d.write(data) or { panic(err) }
|
||||||
return d.checksum()
|
return d.checksum()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block(mut dig Digest, p []byte) {
|
fn block(mut dig Digest, p []u8) {
|
||||||
// For now just use block_generic until we have specific
|
// For now just use block_generic until we have specific
|
||||||
// architecture optimized versions
|
// architecture optimized versions
|
||||||
block_generic(mut dig, p)
|
block_generic(mut dig, p)
|
||||||
|
|
|
@ -15,7 +15,7 @@ const (
|
||||||
_k3 = 0xCA62C1D6
|
_k3 = 0xCA62C1D6
|
||||||
)
|
)
|
||||||
|
|
||||||
fn block_generic(mut dig Digest, p_ []byte) {
|
fn block_generic(mut dig Digest, p_ []u8) {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut p := p_
|
mut p := p_
|
||||||
mut w := []u32{len: (16)}
|
mut w := []u32{len: (16)}
|
||||||
|
|
|
@ -42,7 +42,7 @@ const (
|
||||||
struct Digest {
|
struct Digest {
|
||||||
mut:
|
mut:
|
||||||
h []u32
|
h []u32
|
||||||
x []byte
|
x []u8
|
||||||
nx int
|
nx int
|
||||||
len u64
|
len u64
|
||||||
is224 bool // mark if this digest is SHA-224
|
is224 bool // mark if this digest is SHA-224
|
||||||
|
@ -50,7 +50,7 @@ mut:
|
||||||
|
|
||||||
fn (mut d Digest) reset() {
|
fn (mut d Digest) reset() {
|
||||||
d.h = []u32{len: (8)}
|
d.h = []u32{len: (8)}
|
||||||
d.x = []byte{len: sha256.chunk}
|
d.x = []u8{len: sha256.chunk}
|
||||||
if !d.is224 {
|
if !d.is224 {
|
||||||
d.h[0] = u32(sha256.init0)
|
d.h[0] = u32(sha256.init0)
|
||||||
d.h[1] = u32(sha256.init1)
|
d.h[1] = u32(sha256.init1)
|
||||||
|
@ -90,7 +90,7 @@ pub fn new224() &Digest {
|
||||||
}
|
}
|
||||||
|
|
||||||
// write writes the contents of `p_` to the internal hash representation.
|
// write writes the contents of `p_` to the internal hash representation.
|
||||||
pub fn (mut d Digest) write(p_ []byte) ?int {
|
pub fn (mut d Digest) write(p_ []u8) ?int {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut p := p_
|
mut p := p_
|
||||||
nn := p.len
|
nn := p.len
|
||||||
|
@ -125,7 +125,7 @@ pub fn (mut d Digest) write(p_ []byte) ?int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum returns the SHA256 or SHA224 checksum of digest with the data.
|
// sum returns the SHA256 or SHA224 checksum of digest with the data.
|
||||||
pub fn (d &Digest) sum(b_in []byte) []byte {
|
pub fn (d &Digest) sum(b_in []u8) []u8 {
|
||||||
// Make a copy of d so that caller can keep writing and summing.
|
// Make a copy of d so that caller can keep writing and summing.
|
||||||
mut d0 := *d
|
mut d0 := *d
|
||||||
hash := d0.checksum()
|
hash := d0.checksum()
|
||||||
|
@ -143,10 +143,10 @@ pub fn (d &Digest) sum(b_in []byte) []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// checksum returns the current byte checksum of the Digest.
|
// checksum returns the current byte checksum of the Digest.
|
||||||
pub fn (mut d Digest) checksum() []byte {
|
pub fn (mut d Digest) checksum() []u8 {
|
||||||
mut len := d.len
|
mut len := d.len
|
||||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||||
mut tmp := []byte{len: (64)}
|
mut tmp := []u8{len: (64)}
|
||||||
tmp[0] = 0x80
|
tmp[0] = 0x80
|
||||||
if int(len) % 64 < 56 {
|
if int(len) % 64 < 56 {
|
||||||
d.write(tmp[..56 - int(len) % 64]) or { panic(err) }
|
d.write(tmp[..56 - int(len) % 64]) or { panic(err) }
|
||||||
|
@ -160,7 +160,7 @@ pub fn (mut d Digest) checksum() []byte {
|
||||||
if d.nx != 0 {
|
if d.nx != 0 {
|
||||||
panic('d.nx != 0')
|
panic('d.nx != 0')
|
||||||
}
|
}
|
||||||
mut digest := []byte{len: sha256.size}
|
mut digest := []u8{len: sha256.size}
|
||||||
binary.big_endian_put_u32(mut digest, d.h[0])
|
binary.big_endian_put_u32(mut digest, d.h[0])
|
||||||
binary.big_endian_put_u32(mut digest[4..], d.h[1])
|
binary.big_endian_put_u32(mut digest[4..], d.h[1])
|
||||||
binary.big_endian_put_u32(mut digest[8..], d.h[2])
|
binary.big_endian_put_u32(mut digest[8..], d.h[2])
|
||||||
|
@ -176,28 +176,28 @@ pub fn (mut d Digest) checksum() []byte {
|
||||||
|
|
||||||
// sum returns the SHA256 checksum of the bytes in `data`.
|
// sum returns the SHA256 checksum of the bytes in `data`.
|
||||||
// Example: assert sha256.sum('V'.bytes()).len > 0 == true
|
// Example: assert sha256.sum('V'.bytes()).len > 0 == true
|
||||||
pub fn sum(data []byte) []byte {
|
pub fn sum(data []u8) []u8 {
|
||||||
return sum256(data)
|
return sum256(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum256 returns the SHA256 checksum of the data.
|
// sum256 returns the SHA256 checksum of the data.
|
||||||
pub fn sum256(data []byte) []byte {
|
pub fn sum256(data []u8) []u8 {
|
||||||
mut d := new()
|
mut d := new()
|
||||||
d.write(data) or { panic(err) }
|
d.write(data) or { panic(err) }
|
||||||
return d.checksum()
|
return d.checksum()
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum224 returns the SHA224 checksum of the data.
|
// sum224 returns the SHA224 checksum of the data.
|
||||||
pub fn sum224(data []byte) []byte {
|
pub fn sum224(data []u8) []u8 {
|
||||||
mut d := new224()
|
mut d := new224()
|
||||||
d.write(data) or { panic(err) }
|
d.write(data) or { panic(err) }
|
||||||
sum := d.checksum()
|
sum := d.checksum()
|
||||||
mut sum224 := []byte{len: sha256.size224}
|
mut sum224 := []u8{len: sha256.size224}
|
||||||
copy(mut sum224, sum[..sha256.size224])
|
copy(mut sum224, sum[..sha256.size224])
|
||||||
return sum224
|
return sum224
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block(mut dig Digest, p []byte) {
|
fn block(mut dig Digest, p []u8) {
|
||||||
// For now just use block_generic until we have specific
|
// For now just use block_generic until we have specific
|
||||||
// architecture optimized versions
|
// architecture optimized versions
|
||||||
block_generic(mut dig, p)
|
block_generic(mut dig, p)
|
||||||
|
|
|
@ -78,7 +78,7 @@ const (
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
fn block_generic(mut dig Digest, p_ []byte) {
|
fn block_generic(mut dig Digest, p_ []u8) {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut p := p_
|
mut p := p_
|
||||||
mut w := []u32{len: (64)}
|
mut w := []u32{len: (64)}
|
||||||
|
|
|
@ -64,7 +64,7 @@ const (
|
||||||
struct Digest {
|
struct Digest {
|
||||||
mut:
|
mut:
|
||||||
h []u64
|
h []u64
|
||||||
x []byte
|
x []u8
|
||||||
nx int
|
nx int
|
||||||
len u64
|
len u64
|
||||||
function crypto.Hash
|
function crypto.Hash
|
||||||
|
@ -72,7 +72,7 @@ mut:
|
||||||
|
|
||||||
fn (mut d Digest) reset() {
|
fn (mut d Digest) reset() {
|
||||||
d.h = []u64{len: (8)}
|
d.h = []u64{len: (8)}
|
||||||
d.x = []byte{len: sha512.chunk}
|
d.x = []u8{len: sha512.chunk}
|
||||||
match d.function {
|
match d.function {
|
||||||
.sha384 {
|
.sha384 {
|
||||||
d.h[0] = sha512.init0_384
|
d.h[0] = sha512.init0_384
|
||||||
|
@ -149,7 +149,7 @@ fn new384() &Digest {
|
||||||
}
|
}
|
||||||
|
|
||||||
// write writes the contents of `p_` to the internal hash representation.
|
// write writes the contents of `p_` to the internal hash representation.
|
||||||
pub fn (mut d Digest) write(p_ []byte) ?int {
|
pub fn (mut d Digest) write(p_ []u8) ?int {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut p := p_
|
mut p := p_
|
||||||
nn := p.len
|
nn := p.len
|
||||||
|
@ -184,7 +184,7 @@ pub fn (mut d Digest) write(p_ []byte) ?int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum returns the SHA512 or SHA384 checksum of digest with the data bytes in `b_in`
|
// sum returns the SHA512 or SHA384 checksum of digest with the data bytes in `b_in`
|
||||||
pub fn (d &Digest) sum(b_in []byte) []byte {
|
pub fn (d &Digest) sum(b_in []u8) []u8 {
|
||||||
// Make a copy of d so that caller can keep writing and summing.
|
// Make a copy of d so that caller can keep writing and summing.
|
||||||
mut d0 := *d
|
mut d0 := *d
|
||||||
hash := d0.checksum()
|
hash := d0.checksum()
|
||||||
|
@ -215,10 +215,10 @@ pub fn (d &Digest) sum(b_in []byte) []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// checksum returns the current byte checksum of the Digest.
|
// checksum returns the current byte checksum of the Digest.
|
||||||
pub fn (mut d Digest) checksum() []byte {
|
pub fn (mut d Digest) checksum() []u8 {
|
||||||
// Padding. Add a 1 bit and 0 bits until 112 bytes mod 128.
|
// Padding. Add a 1 bit and 0 bits until 112 bytes mod 128.
|
||||||
mut len := d.len
|
mut len := d.len
|
||||||
mut tmp := []byte{len: (128)}
|
mut tmp := []u8{len: (128)}
|
||||||
tmp[0] = 0x80
|
tmp[0] = 0x80
|
||||||
if int(len) % 128 < 112 {
|
if int(len) % 128 < 112 {
|
||||||
d.write(tmp[..112 - int(len) % 128]) or { panic(err) }
|
d.write(tmp[..112 - int(len) % 128]) or { panic(err) }
|
||||||
|
@ -233,7 +233,7 @@ pub fn (mut d Digest) checksum() []byte {
|
||||||
if d.nx != 0 {
|
if d.nx != 0 {
|
||||||
panic('d.nx != 0')
|
panic('d.nx != 0')
|
||||||
}
|
}
|
||||||
mut digest := []byte{len: sha512.size}
|
mut digest := []u8{len: sha512.size}
|
||||||
binary.big_endian_put_u64(mut digest, d.h[0])
|
binary.big_endian_put_u64(mut digest, d.h[0])
|
||||||
binary.big_endian_put_u64(mut digest[8..], d.h[1])
|
binary.big_endian_put_u64(mut digest[8..], d.h[1])
|
||||||
binary.big_endian_put_u64(mut digest[16..], d.h[2])
|
binary.big_endian_put_u64(mut digest[16..], d.h[2])
|
||||||
|
@ -248,43 +248,43 @@ pub fn (mut d Digest) checksum() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum512 returns the SHA512 checksum of the data.
|
// sum512 returns the SHA512 checksum of the data.
|
||||||
pub fn sum512(data []byte) []byte {
|
pub fn sum512(data []u8) []u8 {
|
||||||
mut d := new_digest(.sha512)
|
mut d := new_digest(.sha512)
|
||||||
d.write(data) or { panic(err) }
|
d.write(data) or { panic(err) }
|
||||||
return d.checksum()
|
return d.checksum()
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum384 returns the SHA384 checksum of the data.
|
// sum384 returns the SHA384 checksum of the data.
|
||||||
pub fn sum384(data []byte) []byte {
|
pub fn sum384(data []u8) []u8 {
|
||||||
mut d := new_digest(.sha384)
|
mut d := new_digest(.sha384)
|
||||||
d.write(data) or { panic(err) }
|
d.write(data) or { panic(err) }
|
||||||
sum := d.checksum()
|
sum := d.checksum()
|
||||||
mut sum384 := []byte{len: sha512.size384}
|
mut sum384 := []u8{len: sha512.size384}
|
||||||
copy(mut sum384, sum[..sha512.size384])
|
copy(mut sum384, sum[..sha512.size384])
|
||||||
return sum384
|
return sum384
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum512_224 returns the Sum512/224 checksum of the data.
|
// sum512_224 returns the Sum512/224 checksum of the data.
|
||||||
pub fn sum512_224(data []byte) []byte {
|
pub fn sum512_224(data []u8) []u8 {
|
||||||
mut d := new_digest(.sha512_224)
|
mut d := new_digest(.sha512_224)
|
||||||
d.write(data) or { panic(err) }
|
d.write(data) or { panic(err) }
|
||||||
sum := d.checksum()
|
sum := d.checksum()
|
||||||
mut sum224 := []byte{len: sha512.size224}
|
mut sum224 := []u8{len: sha512.size224}
|
||||||
copy(mut sum224, sum[..sha512.size224])
|
copy(mut sum224, sum[..sha512.size224])
|
||||||
return sum224
|
return sum224
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum512_256 returns the Sum512/256 checksum of the data.
|
// sum512_256 returns the Sum512/256 checksum of the data.
|
||||||
pub fn sum512_256(data []byte) []byte {
|
pub fn sum512_256(data []u8) []u8 {
|
||||||
mut d := new_digest(.sha512_256)
|
mut d := new_digest(.sha512_256)
|
||||||
d.write(data) or { panic(err) }
|
d.write(data) or { panic(err) }
|
||||||
sum := d.checksum()
|
sum := d.checksum()
|
||||||
mut sum256 := []byte{len: sha512.size256}
|
mut sum256 := []u8{len: sha512.size256}
|
||||||
copy(mut sum256, sum[..sha512.size256])
|
copy(mut sum256, sum[..sha512.size256])
|
||||||
return sum256
|
return sum256
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block(mut dig Digest, p []byte) {
|
fn block(mut dig Digest, p []u8) {
|
||||||
// For now just use block_generic until we have specific
|
// For now just use block_generic until we have specific
|
||||||
// architecture optimized versions
|
// architecture optimized versions
|
||||||
block_generic(mut dig, p)
|
block_generic(mut dig, p)
|
||||||
|
|
|
@ -32,7 +32,7 @@ const (
|
||||||
u64(0x4cc5d4becb3e42b6), u64(0x597f299cfc657e2a), u64(0x5fcb6fab3ad6faec), u64(0x6c44198c4a475817)]
|
u64(0x4cc5d4becb3e42b6), u64(0x597f299cfc657e2a), u64(0x5fcb6fab3ad6faec), u64(0x6c44198c4a475817)]
|
||||||
)
|
)
|
||||||
|
|
||||||
fn block_generic(mut dig Digest, p_ []byte) {
|
fn block_generic(mut dig Digest, p_ []u8) {
|
||||||
unsafe {
|
unsafe {
|
||||||
mut p := p_
|
mut p := p_
|
||||||
mut w := []u64{len: (80)}
|
mut w := []u64{len: (80)}
|
||||||
|
|
|
@ -25,13 +25,13 @@ fn init_alphabets() map[string]Alphabet {
|
||||||
struct Alphabet {
|
struct Alphabet {
|
||||||
mut:
|
mut:
|
||||||
decode []i8 = []i8{len: 128, init: -1}
|
decode []i8 = []i8{len: 128, init: -1}
|
||||||
encode []byte = []byte{len: 58}
|
encode []u8 = []u8{len: 58}
|
||||||
}
|
}
|
||||||
|
|
||||||
// str returns an Alphabet encode table byte array as a string
|
// str returns an Alphabet encode table byte array as a string
|
||||||
pub fn (alphabet Alphabet) str() string {
|
pub fn (alphabet Alphabet) str() string {
|
||||||
// i guess i had a brain fart here. Why would I actually use this code?!
|
// i guess i had a brain fart here. Why would I actually use this code?!
|
||||||
// mut str := []byte{}
|
// mut str := []u8{}
|
||||||
// for entry in alphabet.encode {
|
// for entry in alphabet.encode {
|
||||||
// str << entry
|
// str << entry
|
||||||
// }
|
// }
|
||||||
|
|
|
@ -15,7 +15,7 @@ pub fn encode_int_walpha(input int, alphabet Alphabet) ?string {
|
||||||
return error(@MOD + '.' + @FN + ': input must be greater than zero')
|
return error(@MOD + '.' + @FN + ': input must be greater than zero')
|
||||||
}
|
}
|
||||||
|
|
||||||
mut buffer := []byte{}
|
mut buffer := []u8{}
|
||||||
|
|
||||||
mut i := input
|
mut i := input
|
||||||
for i > 0 {
|
for i > 0 {
|
||||||
|
@ -55,7 +55,7 @@ pub fn encode_walpha(input string, alphabet Alphabet) string {
|
||||||
// integer simplification of
|
// integer simplification of
|
||||||
// ceil(log(256)/log(58))
|
// ceil(log(256)/log(58))
|
||||||
|
|
||||||
mut out := []byte{len: sz}
|
mut out := []u8{len: sz}
|
||||||
mut i := 0
|
mut i := 0
|
||||||
mut high := 0
|
mut high := 0
|
||||||
mut carry := u32(0)
|
mut carry := u32(0)
|
||||||
|
@ -131,7 +131,7 @@ pub fn decode_walpha(str string, alphabet Alphabet) ?string {
|
||||||
mut c := u64(0)
|
mut c := u64(0)
|
||||||
|
|
||||||
// the 32-bit algorithm stretches the result up to 2x
|
// the 32-bit algorithm stretches the result up to 2x
|
||||||
mut binu := []byte{len: 2 * ((b58sz * 406 / 555) + 1)}
|
mut binu := []u8{len: 2 * ((b58sz * 406 / 555) + 1)}
|
||||||
mut outi := []u32{len: (b58sz + 3) / 4}
|
mut outi := []u32{len: (b58sz + 3) / 4}
|
||||||
|
|
||||||
for _, r in str {
|
for _, r in str {
|
||||||
|
|
|
@ -215,7 +215,7 @@ pub fn decode_str(data string) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// encode encodes the `[]byte` value passed in `data` to base64.
|
// encode encodes the `[]u8` value passed in `data` to base64.
|
||||||
// Please note: base64 encoding returns a `string` that is ~ 4/3 larger than the input.
|
// Please note: base64 encoding returns a `string` that is ~ 4/3 larger than the input.
|
||||||
// Please note: If you need to encode many strings repeatedly, take a look at `encode_in_buffer`.
|
// Please note: If you need to encode many strings repeatedly, take a look at `encode_in_buffer`.
|
||||||
// Example: assert base64.encode('V in base 64') == 'ViBpbiBiYXNlIDY0'
|
// Example: assert base64.encode('V in base 64') == 'ViBpbiBiYXNlIDY0'
|
||||||
|
|
|
@ -17,7 +17,7 @@ const (
|
||||||
|
|
||||||
// url_decode returns a decoded URL `string` version of
|
// url_decode returns a decoded URL `string` version of
|
||||||
// the a base64 url encoded `string` passed in `data`.
|
// the a base64 url encoded `string` passed in `data`.
|
||||||
pub fn url_decode(data string) []byte {
|
pub fn url_decode(data string) []u8 {
|
||||||
mut result := data.replace_each(['-', '+', '_', '/'])
|
mut result := data.replace_each(['-', '+', '_', '/'])
|
||||||
match result.len % 4 {
|
match result.len % 4 {
|
||||||
// Pad with trailing '='s
|
// Pad with trailing '='s
|
||||||
|
@ -42,7 +42,7 @@ pub fn url_decode_str(data string) string {
|
||||||
|
|
||||||
// url_encode returns a base64 URL encoded `string` version
|
// url_encode returns a base64 URL encoded `string` version
|
||||||
// of the value passed in `data`.
|
// of the value passed in `data`.
|
||||||
pub fn url_encode(data []byte) string {
|
pub fn url_encode(data []u8) string {
|
||||||
return encode(data).replace_each(['+', '-', '/', '_', '=', ''])
|
return encode(data).replace_each(['+', '-', '/', '_', '=', ''])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ fn test_long_encoding() {
|
||||||
repeats := 1000
|
repeats := 1000
|
||||||
input_size := 3000
|
input_size := 3000
|
||||||
|
|
||||||
s_original := []byte{len: input_size, init: `a`}
|
s_original := []u8{len: input_size, init: `a`}
|
||||||
s_encoded := base64.encode(s_original)
|
s_encoded := base64.encode(s_original)
|
||||||
s_encoded_bytes := s_encoded.bytes()
|
s_encoded_bytes := s_encoded.bytes()
|
||||||
s_decoded := base64.decode(s_encoded)
|
s_decoded := base64.decode(s_encoded)
|
||||||
|
@ -20,7 +20,7 @@ fn test_long_encoding() {
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
encoded_size := base64.encode_in_buffer(s_original, ebuffer)
|
encoded_size := base64.encode_in_buffer(s_original, ebuffer)
|
||||||
mut encoded_in_buf := []byte{len: encoded_size}
|
mut encoded_in_buf := []u8{len: encoded_size}
|
||||||
unsafe { C.memcpy(encoded_in_buf.data, ebuffer, encoded_size) }
|
unsafe { C.memcpy(encoded_in_buf.data, ebuffer, encoded_size) }
|
||||||
assert input_size * 4 / 3 == encoded_size
|
assert input_size * 4 / 3 == encoded_size
|
||||||
assert encoded_in_buf[0] == `Y`
|
assert encoded_in_buf[0] == `Y`
|
||||||
|
@ -37,7 +37,7 @@ fn test_long_encoding() {
|
||||||
|
|
||||||
decoded_size := base64.decode_in_buffer(s_encoded, dbuffer)
|
decoded_size := base64.decode_in_buffer(s_encoded, dbuffer)
|
||||||
assert decoded_size == input_size
|
assert decoded_size == input_size
|
||||||
mut decoded_in_buf := []byte{len: decoded_size}
|
mut decoded_in_buf := []u8{len: decoded_size}
|
||||||
unsafe { C.memcpy(decoded_in_buf.data, dbuffer, decoded_size) }
|
unsafe { C.memcpy(decoded_in_buf.data, dbuffer, decoded_size) }
|
||||||
assert decoded_in_buf == s_original
|
assert decoded_in_buf == s_original
|
||||||
|
|
||||||
|
|
|
@ -137,9 +137,9 @@ fn test_decode_in_buffer_bytes() {
|
||||||
TestPair{'fooba', 'Zm9vYmE='},
|
TestPair{'fooba', 'Zm9vYmE='},
|
||||||
TestPair{'foobar', 'Zm9vYmFy'},
|
TestPair{'foobar', 'Zm9vYmFy'},
|
||||||
]
|
]
|
||||||
mut src_dec_buf := []byte{len: 8}
|
mut src_dec_buf := []u8{len: 8}
|
||||||
mut src_enc_buf := []byte{len: 8}
|
mut src_enc_buf := []u8{len: 8}
|
||||||
mut out_buf := []byte{len: 8}
|
mut out_buf := []u8{len: 8}
|
||||||
|
|
||||||
for p in rfc4648_pairs {
|
for p in rfc4648_pairs {
|
||||||
src_dec_buf = p.decoded.bytes()
|
src_dec_buf = p.decoded.bytes()
|
||||||
|
|
|
@ -5,26 +5,26 @@ module binary
|
||||||
|
|
||||||
// Little Endian
|
// Little Endian
|
||||||
[inline]
|
[inline]
|
||||||
pub fn little_endian_u16(b []byte) u16 {
|
pub fn little_endian_u16(b []u8) u16 {
|
||||||
_ = b[1] // bounds check
|
_ = b[1] // bounds check
|
||||||
return u16(b[0]) | (u16(b[1]) << u16(8))
|
return u16(b[0]) | (u16(b[1]) << u16(8))
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn little_endian_put_u16(mut b []byte, v u16) {
|
pub fn little_endian_put_u16(mut b []u8, v u16) {
|
||||||
_ = b[1] // bounds check
|
_ = b[1] // bounds check
|
||||||
b[0] = u8(v)
|
b[0] = u8(v)
|
||||||
b[1] = u8(v >> u16(8))
|
b[1] = u8(v >> u16(8))
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn little_endian_u32(b []byte) u32 {
|
pub fn little_endian_u32(b []u8) u32 {
|
||||||
_ = b[3] // bounds check
|
_ = b[3] // bounds check
|
||||||
return u32(b[0]) | (u32(b[1]) << u32(8)) | (u32(b[2]) << u32(16)) | (u32(b[3]) << u32(24))
|
return u32(b[0]) | (u32(b[1]) << u32(8)) | (u32(b[2]) << u32(16)) | (u32(b[3]) << u32(24))
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn little_endian_put_u32(mut b []byte, v u32) {
|
pub fn little_endian_put_u32(mut b []u8, v u32) {
|
||||||
_ = b[3] // bounds check
|
_ = b[3] // bounds check
|
||||||
b[0] = u8(v)
|
b[0] = u8(v)
|
||||||
b[1] = u8(v >> u32(8))
|
b[1] = u8(v >> u32(8))
|
||||||
|
@ -33,13 +33,13 @@ pub fn little_endian_put_u32(mut b []byte, v u32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn little_endian_u64(b []byte) u64 {
|
pub fn little_endian_u64(b []u8) u64 {
|
||||||
_ = b[7] // bounds check
|
_ = b[7] // bounds check
|
||||||
return u64(b[0]) | (u64(b[1]) << u64(8)) | (u64(b[2]) << u64(16)) | (u64(b[3]) << u64(24)) | (u64(b[4]) << u64(32)) | (u64(b[5]) << u64(40)) | (u64(b[6]) << u64(48)) | (u64(b[7]) << u64(56))
|
return u64(b[0]) | (u64(b[1]) << u64(8)) | (u64(b[2]) << u64(16)) | (u64(b[3]) << u64(24)) | (u64(b[4]) << u64(32)) | (u64(b[5]) << u64(40)) | (u64(b[6]) << u64(48)) | (u64(b[7]) << u64(56))
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn little_endian_put_u64(mut b []byte, v u64) {
|
pub fn little_endian_put_u64(mut b []u8, v u64) {
|
||||||
_ = b[7] // bounds check
|
_ = b[7] // bounds check
|
||||||
b[0] = u8(v)
|
b[0] = u8(v)
|
||||||
b[1] = u8(v >> u64(8))
|
b[1] = u8(v >> u64(8))
|
||||||
|
@ -53,26 +53,26 @@ pub fn little_endian_put_u64(mut b []byte, v u64) {
|
||||||
|
|
||||||
// Big Endian
|
// Big Endian
|
||||||
[inline]
|
[inline]
|
||||||
pub fn big_endian_u16(b []byte) u16 {
|
pub fn big_endian_u16(b []u8) u16 {
|
||||||
_ = b[1] // bounds check
|
_ = b[1] // bounds check
|
||||||
return u16(b[1]) | (u16(b[0]) << u16(8))
|
return u16(b[1]) | (u16(b[0]) << u16(8))
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn big_endian_put_u16(mut b []byte, v u16) {
|
pub fn big_endian_put_u16(mut b []u8, v u16) {
|
||||||
_ = b[1] // bounds check
|
_ = b[1] // bounds check
|
||||||
b[0] = u8(v >> u16(8))
|
b[0] = u8(v >> u16(8))
|
||||||
b[1] = u8(v)
|
b[1] = u8(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn big_endian_u32(b []byte) u32 {
|
pub fn big_endian_u32(b []u8) u32 {
|
||||||
_ = b[3] // bounds check
|
_ = b[3] // bounds check
|
||||||
return u32(b[3]) | (u32(b[2]) << u32(8)) | (u32(b[1]) << u32(16)) | (u32(b[0]) << u32(24))
|
return u32(b[3]) | (u32(b[2]) << u32(8)) | (u32(b[1]) << u32(16)) | (u32(b[0]) << u32(24))
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn big_endian_put_u32(mut b []byte, v u32) {
|
pub fn big_endian_put_u32(mut b []u8, v u32) {
|
||||||
_ = b[3] // bounds check
|
_ = b[3] // bounds check
|
||||||
b[0] = u8(v >> u32(24))
|
b[0] = u8(v >> u32(24))
|
||||||
b[1] = u8(v >> u32(16))
|
b[1] = u8(v >> u32(16))
|
||||||
|
@ -81,13 +81,13 @@ pub fn big_endian_put_u32(mut b []byte, v u32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn big_endian_u64(b []byte) u64 {
|
pub fn big_endian_u64(b []u8) u64 {
|
||||||
_ = b[7] // bounds check
|
_ = b[7] // bounds check
|
||||||
return u64(b[7]) | (u64(b[6]) << u64(8)) | (u64(b[5]) << u64(16)) | (u64(b[4]) << u64(24)) | (u64(b[3]) << u64(32)) | (u64(b[2]) << u64(40)) | (u64(b[1]) << u64(48)) | (u64(b[0]) << u64(56))
|
return u64(b[7]) | (u64(b[6]) << u64(8)) | (u64(b[5]) << u64(16)) | (u64(b[4]) << u64(24)) | (u64(b[3]) << u64(32)) | (u64(b[2]) << u64(40)) | (u64(b[1]) << u64(48)) | (u64(b[0]) << u64(56))
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn big_endian_put_u64(mut b []byte, v u64) {
|
pub fn big_endian_put_u64(mut b []u8, v u64) {
|
||||||
_ = b[7] // bounds check
|
_ = b[7] // bounds check
|
||||||
b[0] = u8(v >> u64(56))
|
b[0] = u8(v >> u64(56))
|
||||||
b[1] = u8(v >> u64(48))
|
b[1] = u8(v >> u64(48))
|
||||||
|
|
|
@ -5,7 +5,7 @@ import strings
|
||||||
// decode converts a hex string into an array of bytes. The expected
|
// decode converts a hex string into an array of bytes. The expected
|
||||||
// input format is 2 ASCII characters for each output byte. If the provided
|
// input format is 2 ASCII characters for each output byte. If the provided
|
||||||
// string length is not a multiple of 2, an implicit `0` is prepended to it.
|
// string length is not a multiple of 2, an implicit `0` is prepended to it.
|
||||||
pub fn decode(s string) ?[]byte {
|
pub fn decode(s string) ?[]u8 {
|
||||||
mut hex_str := s
|
mut hex_str := s
|
||||||
if hex_str.len >= 2 {
|
if hex_str.len >= 2 {
|
||||||
if s[0] == `0` && (s[1] == `x` || s[1] == `X`) {
|
if s[0] == `0` && (s[1] == `x` || s[1] == `X`) {
|
||||||
|
@ -13,7 +13,7 @@ pub fn decode(s string) ?[]byte {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if hex_str.len == 0 {
|
if hex_str.len == 0 {
|
||||||
return []byte{}
|
return []u8{}
|
||||||
} else if hex_str.len == 1 {
|
} else if hex_str.len == 1 {
|
||||||
return [char2nibble(hex_str[0]) ?]
|
return [char2nibble(hex_str[0]) ?]
|
||||||
} else if hex_str.len == 2 {
|
} else if hex_str.len == 2 {
|
||||||
|
@ -27,7 +27,7 @@ pub fn decode(s string) ?[]byte {
|
||||||
val = (val << 4) | char2nibble(hex_str[1]) ?
|
val = (val << 4) | char2nibble(hex_str[1]) ?
|
||||||
}
|
}
|
||||||
// set cap to hex_str.len/2 rounded up
|
// set cap to hex_str.len/2 rounded up
|
||||||
mut bytes := []byte{len: 1, cap: (hex_str.len + 1) >> 1, init: val}
|
mut bytes := []u8{len: 1, cap: (hex_str.len + 1) >> 1, init: val}
|
||||||
// iterate over every 2 bytes
|
// iterate over every 2 bytes
|
||||||
// the start index depends on if hex_str.len is odd
|
// the start index depends on if hex_str.len is odd
|
||||||
for i := 2 - (hex_str.len & 1); i < hex_str.len; i += 2 {
|
for i := 2 - (hex_str.len & 1); i < hex_str.len; i += 2 {
|
||||||
|
@ -41,7 +41,7 @@ pub fn decode(s string) ?[]byte {
|
||||||
// encode converts an array of bytes into a string of ASCII hex bytes. The
|
// encode converts an array of bytes into a string of ASCII hex bytes. The
|
||||||
// output will always be a string with length a multiple of 2.
|
// output will always be a string with length a multiple of 2.
|
||||||
[manualfree]
|
[manualfree]
|
||||||
pub fn encode(bytes []byte) string {
|
pub fn encode(bytes []u8) string {
|
||||||
mut sb := strings.new_builder(bytes.len * 2)
|
mut sb := strings.new_builder(bytes.len * 2)
|
||||||
for b in bytes {
|
for b in bytes {
|
||||||
sb.write_string(b.hex())
|
sb.write_string(b.hex())
|
||||||
|
|
|
@ -92,7 +92,7 @@ pub fn (s &Context) add_fallback_font(base int, fallback int) int {
|
||||||
// `free_data` indicates if `data` should be freed after the font is added.
|
// `free_data` indicates if `data` should be freed after the font is added.
|
||||||
// The function returns the id of the font on success, `fontstash.invalid` otherwise.
|
// The function returns the id of the font on success, `fontstash.invalid` otherwise.
|
||||||
[inline]
|
[inline]
|
||||||
pub fn (s &Context) add_font_mem(name string, data []byte, free_data bool) int {
|
pub fn (s &Context) add_font_mem(name string, data []u8, free_data bool) int {
|
||||||
return C.fonsAddFontMem(s, &char(name.str), data.data, data.len, int(free_data))
|
return C.fonsAddFontMem(s, &char(name.str), data.data, data.len, int(free_data))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,10 +92,10 @@ pub:
|
||||||
custom_bold_font_path string
|
custom_bold_font_path string
|
||||||
ui_mode bool // refreshes only on events to save CPU usage
|
ui_mode bool // refreshes only on events to save CPU usage
|
||||||
// font bytes for embedding
|
// font bytes for embedding
|
||||||
font_bytes_normal []byte
|
font_bytes_normal []u8
|
||||||
font_bytes_bold []byte
|
font_bytes_bold []u8
|
||||||
font_bytes_mono []byte
|
font_bytes_mono []u8
|
||||||
font_bytes_italic []byte
|
font_bytes_italic []u8
|
||||||
native_rendering bool // Cocoa on macOS/iOS, GDI+ on Windows
|
native_rendering bool // Cocoa on macOS/iOS, GDI+ on Windows
|
||||||
// drag&drop
|
// drag&drop
|
||||||
enable_dragndrop bool // enable file dropping (drag'n'drop), default is false
|
enable_dragndrop bool // enable file dropping (drag'n'drop), default is false
|
||||||
|
|
|
@ -243,10 +243,10 @@ pub:
|
||||||
custom_bold_font_path string
|
custom_bold_font_path string
|
||||||
ui_mode bool // refreshes only on events to save CPU usage
|
ui_mode bool // refreshes only on events to save CPU usage
|
||||||
// font bytes for embedding
|
// font bytes for embedding
|
||||||
font_bytes_normal []byte
|
font_bytes_normal []u8
|
||||||
font_bytes_bold []byte
|
font_bytes_bold []u8
|
||||||
font_bytes_mono []byte
|
font_bytes_mono []u8
|
||||||
font_bytes_italic []byte
|
font_bytes_italic []u8
|
||||||
native_rendering bool // Cocoa on macOS/iOS, GDI+ on Windows
|
native_rendering bool // Cocoa on macOS/iOS, GDI+ on Windows
|
||||||
// drag&drop
|
// drag&drop
|
||||||
enable_dragndrop bool // enable file dropping (drag'n'drop), default is false
|
enable_dragndrop bool // enable file dropping (drag'n'drop), default is false
|
||||||
|
|
|
@ -237,7 +237,7 @@ pub fn (mut ctx Context) create_image_from_memory(buf &byte, bufsize int) Image
|
||||||
// byte array `b`.
|
// byte array `b`.
|
||||||
//
|
//
|
||||||
// See also: create_image_from_memory
|
// See also: create_image_from_memory
|
||||||
pub fn (mut ctx Context) create_image_from_byte_array(b []byte) Image {
|
pub fn (mut ctx Context) create_image_from_byte_array(b []u8) Image {
|
||||||
return ctx.create_image_from_memory(b.data, b.len)
|
return ctx.create_image_from_memory(b.data, b.len)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,10 +64,10 @@ fn new_ft(c FTConfig) ?&FT {
|
||||||
}
|
}
|
||||||
|
|
||||||
mut normal_path := c.font_path
|
mut normal_path := c.font_path
|
||||||
mut bytes := []byte{}
|
mut bytes := []u8{}
|
||||||
$if android {
|
$if android {
|
||||||
// First try any filesystem paths
|
// First try any filesystem paths
|
||||||
bytes = os.read_bytes(c.font_path) or { []byte{} }
|
bytes = os.read_bytes(c.font_path) or { []u8{} }
|
||||||
if bytes.len == 0 {
|
if bytes.len == 0 {
|
||||||
// ... then try the APK asset path
|
// ... then try the APK asset path
|
||||||
bytes = os.read_apk_asset(c.font_path) or {
|
bytes = os.read_apk_asset(c.font_path) or {
|
||||||
|
|
|
@ -9,10 +9,10 @@ struct FTConfig {
|
||||||
custom_bold_font_path string
|
custom_bold_font_path string
|
||||||
scale f32 = 1.0
|
scale f32 = 1.0
|
||||||
font_size int
|
font_size int
|
||||||
bytes_normal []byte
|
bytes_normal []u8
|
||||||
bytes_bold []byte
|
bytes_bold []u8
|
||||||
bytes_mono []byte
|
bytes_mono []u8
|
||||||
bytes_italic []byte
|
bytes_italic []u8
|
||||||
}
|
}
|
||||||
|
|
||||||
struct StringToRender {
|
struct StringToRender {
|
||||||
|
|
|
@ -37,7 +37,7 @@ fn (mut c Crc32) generate_table(poly int) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (c &Crc32) sum32(b []byte) u32 {
|
fn (c &Crc32) sum32(b []u8) u32 {
|
||||||
mut crc := ~u32(0)
|
mut crc := ~u32(0)
|
||||||
for i in 0 .. b.len {
|
for i in 0 .. b.len {
|
||||||
crc = c.table[u8(crc) ^ b[i]] ^ (crc >> 8)
|
crc = c.table[u8(crc) ^ b[i]] ^ (crc >> 8)
|
||||||
|
@ -45,7 +45,7 @@ fn (c &Crc32) sum32(b []byte) u32 {
|
||||||
return ~crc
|
return ~crc
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (c &Crc32) checksum(b []byte) u32 {
|
pub fn (c &Crc32) checksum(b []u8) u32 {
|
||||||
return c.sum32(b)
|
return c.sum32(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ pub fn new(poly int) &Crc32 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculate crc32 using ieee
|
// calculate crc32 using ieee
|
||||||
pub fn sum(b []byte) u32 {
|
pub fn sum(b []u8) u32 {
|
||||||
c := new(int(crc32.ieee))
|
c := new(int(crc32.ieee))
|
||||||
return c.sum32(b)
|
return c.sum32(b)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ pub fn sum32_string(data string) u32 {
|
||||||
// sum32 returns a fnv1a hash of the memory block, described by the dynamic
|
// sum32 returns a fnv1a hash of the memory block, described by the dynamic
|
||||||
// byte array `data`.
|
// byte array `data`.
|
||||||
[direct_array_access; inline]
|
[direct_array_access; inline]
|
||||||
pub fn sum32(data []byte) u32 {
|
pub fn sum32(data []u8) u32 {
|
||||||
mut hash := fnv1a.fnv32_offset_basis
|
mut hash := fnv1a.fnv32_offset_basis
|
||||||
for i in 0 .. data.len {
|
for i in 0 .. data.len {
|
||||||
hash = (hash ^ u32(data[i])) * fnv1a.fnv32_prime
|
hash = (hash ^ u32(data[i])) * fnv1a.fnv32_prime
|
||||||
|
@ -67,7 +67,7 @@ pub fn sum64_string(data string) u64 {
|
||||||
// sum64 returns a fnv1a hash of the memory block, described by the dynamic
|
// sum64 returns a fnv1a hash of the memory block, described by the dynamic
|
||||||
// byte array `data`.
|
// byte array `data`.
|
||||||
[direct_array_access; inline]
|
[direct_array_access; inline]
|
||||||
pub fn sum64(data []byte) u64 {
|
pub fn sum64(data []u8) u64 {
|
||||||
mut hash := fnv1a.fnv64_offset_basis
|
mut hash := fnv1a.fnv64_offset_basis
|
||||||
for i in 0 .. data.len {
|
for i in 0 .. data.len {
|
||||||
hash = (hash ^ u64(data[i])) * fnv1a.fnv64_prime
|
hash = (hash ^ u64(data[i])) * fnv1a.fnv64_prime
|
||||||
|
|
|
@ -6,7 +6,7 @@ module hash
|
||||||
interface Hasher {
|
interface Hasher {
|
||||||
// Sum appends the current hash to b and returns the resulting array.
|
// Sum appends the current hash to b and returns the resulting array.
|
||||||
// It does not change the underlying hash state.
|
// It does not change the underlying hash state.
|
||||||
sum(b []byte) []byte
|
sum(b []u8) []u8
|
||||||
size() int
|
size() int
|
||||||
block_size() int
|
block_size() int
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,6 @@ pub fn sum64_string(key string, seed u64) u64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
[inline]
|
[inline]
|
||||||
pub fn sum64(key []byte, seed u64) u64 {
|
pub fn sum64(key []u8, seed u64) u64 {
|
||||||
return wyhash_c(&u8(key.data), u64(key.len), seed)
|
return wyhash_c(&u8(key.data), u64(key.len), seed)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ module io
|
||||||
struct BufferedReader {
|
struct BufferedReader {
|
||||||
mut:
|
mut:
|
||||||
reader Reader
|
reader Reader
|
||||||
buf []byte
|
buf []u8
|
||||||
offset int // current offset in the buffer
|
offset int // current offset in the buffer
|
||||||
len int
|
len int
|
||||||
fails int // how many times fill_buffer has read 0 bytes in a row
|
fails int // how many times fill_buffer has read 0 bytes in a row
|
||||||
|
@ -28,7 +28,7 @@ pub fn new_buffered_reader(o BufferedReaderConfig) &BufferedReader {
|
||||||
// create
|
// create
|
||||||
r := &BufferedReader{
|
r := &BufferedReader{
|
||||||
reader: o.reader
|
reader: o.reader
|
||||||
buf: []byte{len: o.cap, cap: o.cap}
|
buf: []u8{len: o.cap, cap: o.cap}
|
||||||
offset: 0
|
offset: 0
|
||||||
mfails: o.retries
|
mfails: o.retries
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ pub fn new_buffered_reader(o BufferedReaderConfig) &BufferedReader {
|
||||||
}
|
}
|
||||||
|
|
||||||
// read fufills the Reader interface
|
// read fufills the Reader interface
|
||||||
pub fn (mut r BufferedReader) read(mut buf []byte) ?int {
|
pub fn (mut r BufferedReader) read(mut buf []u8) ?int {
|
||||||
if r.end_of_stream {
|
if r.end_of_stream {
|
||||||
return none
|
return none
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ pub fn (mut r BufferedReader) read_line() ?string {
|
||||||
if r.end_of_stream {
|
if r.end_of_stream {
|
||||||
return none
|
return none
|
||||||
}
|
}
|
||||||
mut line := []byte{}
|
mut line := []u8{}
|
||||||
for {
|
for {
|
||||||
if r.needs_fill() {
|
if r.needs_fill() {
|
||||||
// go fetch some new data
|
// go fetch some new data
|
||||||
|
|
|
@ -10,7 +10,7 @@ fn imin(a int, b int) int {
|
||||||
return if a < b { a } else { b }
|
return if a < b { a } else { b }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut s StringReader) read(mut buf []byte) ?int {
|
fn (mut s StringReader) read(mut buf []u8) ?int {
|
||||||
$if debug {
|
$if debug {
|
||||||
eprintln('>>>> StringReader.read output buf.len: $buf.len')
|
eprintln('>>>> StringReader.read output buf.len: $buf.len')
|
||||||
}
|
}
|
||||||
|
@ -24,14 +24,14 @@ fn (mut s StringReader) read(mut buf []byte) ?int {
|
||||||
return read
|
return read
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_from_string(text string, capacity int) []byte {
|
fn read_from_string(text string, capacity int) []u8 {
|
||||||
mut str := StringReader{
|
mut str := StringReader{
|
||||||
text: text
|
text: text
|
||||||
}
|
}
|
||||||
mut stream := io.new_buffered_reader(reader: str, cap: capacity)
|
mut stream := io.new_buffered_reader(reader: str, cap: capacity)
|
||||||
//
|
//
|
||||||
mut buf := []byte{len: 1}
|
mut buf := []u8{len: 1}
|
||||||
mut res := []byte{}
|
mut res := []u8{}
|
||||||
mut i := 0
|
mut i := 0
|
||||||
for {
|
for {
|
||||||
z := stream.read(mut buf) or { break }
|
z := stream.read(mut buf) or { break }
|
||||||
|
@ -50,7 +50,7 @@ pub fn test_reading_from_a_string() {
|
||||||
assert read_from_string('ab', capacity) == [u8(`a`), `b`]
|
assert read_from_string('ab', capacity) == [u8(`a`), `b`]
|
||||||
assert read_from_string('abc', capacity) == [u8(`a`), `b`, `c`]
|
assert read_from_string('abc', capacity) == [u8(`a`), `b`, `c`]
|
||||||
assert read_from_string('abcde', capacity) == [u8(`a`), `b`, `c`, `d`, `e`]
|
assert read_from_string('abcde', capacity) == [u8(`a`), `b`, `c`, `d`, `e`]
|
||||||
large_string_bytes := []byte{len: 1000, init: `x`}
|
large_string_bytes := []u8{len: 1000, init: `x`}
|
||||||
large_string := large_string_bytes.bytestr()
|
large_string := large_string_bytes.bytestr()
|
||||||
assert read_from_string(large_string, capacity) == large_string_bytes
|
assert read_from_string(large_string, capacity) == large_string_bytes
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
pub fn cp(mut src Reader, mut dst Writer) ? {
|
pub fn cp(mut src Reader, mut dst Writer) ? {
|
||||||
mut buf := []byte{len: io.buf_max_len}
|
mut buf := []u8{len: io.buf_max_len}
|
||||||
for {
|
for {
|
||||||
len := src.read(mut buf) or { break }
|
len := src.read(mut buf) or { break }
|
||||||
dst.write(buf[..len]) or { return err }
|
dst.write(buf[..len]) or { return err }
|
||||||
|
|
|
@ -2,17 +2,17 @@ import io
|
||||||
|
|
||||||
struct Buf {
|
struct Buf {
|
||||||
pub:
|
pub:
|
||||||
bytes []byte
|
bytes []u8
|
||||||
mut:
|
mut:
|
||||||
i int
|
i int
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Writ {
|
struct Writ {
|
||||||
pub mut:
|
pub mut:
|
||||||
bytes []byte
|
bytes []u8
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut b Buf) read(mut buf []byte) ?int {
|
fn (mut b Buf) read(mut buf []u8) ?int {
|
||||||
if !(b.i < b.bytes.len) {
|
if !(b.i < b.bytes.len) {
|
||||||
return none
|
return none
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ fn (mut b Buf) read(mut buf []byte) ?int {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut w Writ) write(buf []byte) ?int {
|
fn (mut w Writ) write(buf []u8) ?int {
|
||||||
if buf.len <= 0 {
|
if buf.len <= 0 {
|
||||||
return none
|
return none
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ fn test_copy() {
|
||||||
bytes: 'abcdefghij'.repeat(10).bytes()
|
bytes: 'abcdefghij'.repeat(10).bytes()
|
||||||
}
|
}
|
||||||
mut dst := Writ{
|
mut dst := Writ{
|
||||||
bytes: []byte{}
|
bytes: []u8{}
|
||||||
}
|
}
|
||||||
io.cp(mut src, mut dst) or { assert false }
|
io.cp(mut src, mut dst) or { assert false }
|
||||||
assert dst.bytes == src.bytes
|
assert dst.bytes == src.bytes
|
||||||
|
|
|
@ -22,7 +22,7 @@ pub mut:
|
||||||
// written. If any writer fails to write the full length an error is returned
|
// written. If any writer fails to write the full length an error is returned
|
||||||
// and writing to other writers stops. If any writer returns an error the error
|
// and writing to other writers stops. If any writer returns an error the error
|
||||||
// is returned immediately and writing to other writers stops.
|
// is returned immediately and writing to other writers stops.
|
||||||
pub fn (mut m MultiWriter) write(buf []byte) ?int {
|
pub fn (mut m MultiWriter) write(buf []u8) ?int {
|
||||||
for mut w in m.writers {
|
for mut w in m.writers {
|
||||||
n := w.write(buf) ?
|
n := w.write(buf) ?
|
||||||
if n != buf.len {
|
if n != buf.len {
|
||||||
|
|
|
@ -40,20 +40,20 @@ fn test_multi_writer_write_error() {
|
||||||
|
|
||||||
struct TestWriter {
|
struct TestWriter {
|
||||||
pub mut:
|
pub mut:
|
||||||
bytes []byte
|
bytes []u8
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut w TestWriter) write(buf []byte) ?int {
|
fn (mut w TestWriter) write(buf []u8) ?int {
|
||||||
w.bytes << buf
|
w.bytes << buf
|
||||||
return buf.len
|
return buf.len
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TestIncompleteWriter {
|
struct TestIncompleteWriter {
|
||||||
pub mut:
|
pub mut:
|
||||||
bytes []byte
|
bytes []u8
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut w TestIncompleteWriter) write(buf []byte) ?int {
|
fn (mut w TestIncompleteWriter) write(buf []u8) ?int {
|
||||||
b := buf[..buf.len - 1]
|
b := buf[..buf.len - 1]
|
||||||
w.bytes << b
|
w.bytes << b
|
||||||
return b.len
|
return b.len
|
||||||
|
@ -61,6 +61,6 @@ fn (mut w TestIncompleteWriter) write(buf []byte) ?int {
|
||||||
|
|
||||||
struct TestErrorWriter {}
|
struct TestErrorWriter {}
|
||||||
|
|
||||||
fn (mut w TestErrorWriter) write(buf []byte) ?int {
|
fn (mut w TestErrorWriter) write(buf []u8) ?int {
|
||||||
return error('error writer errored')
|
return error('error writer errored')
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@ pub interface Reader {
|
||||||
// A type that implements this should return
|
// A type that implements this should return
|
||||||
// `none` on end of stream (EOF) instead of just returning 0
|
// `none` on end of stream (EOF) instead of just returning 0
|
||||||
mut:
|
mut:
|
||||||
read(mut buf []byte) ?int
|
read(mut buf []u8) ?int
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -25,11 +25,11 @@ mut:
|
||||||
|
|
||||||
// read_all reads all bytes from a reader until either a 0 length read
|
// read_all reads all bytes from a reader until either a 0 length read
|
||||||
// or if read_to_end_of_stream is true then the end of the stream (`none`)
|
// or if read_to_end_of_stream is true then the end of the stream (`none`)
|
||||||
pub fn read_all(config ReadAllConfig) ?[]byte {
|
pub fn read_all(config ReadAllConfig) ?[]u8 {
|
||||||
mut r := config.reader
|
mut r := config.reader
|
||||||
read_till_eof := config.read_to_end_of_stream
|
read_till_eof := config.read_to_end_of_stream
|
||||||
|
|
||||||
mut b := []byte{len: io.read_all_len}
|
mut b := []u8{len: io.read_all_len}
|
||||||
mut read := 0
|
mut read := 0
|
||||||
for {
|
for {
|
||||||
new_read := r.read(mut b[read..]) or { break }
|
new_read := r.read(mut b[read..]) or { break }
|
||||||
|
@ -46,8 +46,8 @@ pub fn read_all(config ReadAllConfig) ?[]byte {
|
||||||
|
|
||||||
// read_any reads any available bytes from a reader
|
// read_any reads any available bytes from a reader
|
||||||
// (until the reader returns a read of 0 length)
|
// (until the reader returns a read of 0 length)
|
||||||
pub fn read_any(mut r Reader) ?[]byte {
|
pub fn read_any(mut r Reader) ?[]u8 {
|
||||||
mut b := []byte{len: io.read_all_len}
|
mut b := []u8{len: io.read_all_len}
|
||||||
mut read := 0
|
mut read := 0
|
||||||
for {
|
for {
|
||||||
new_read := r.read(mut b[read..]) or { break }
|
new_read := r.read(mut b[read..]) or { break }
|
||||||
|
@ -64,5 +64,5 @@ pub fn read_any(mut r Reader) ?[]byte {
|
||||||
|
|
||||||
// RandomReader represents a stream of data that can be read from at a random location
|
// RandomReader represents a stream of data that can be read from at a random location
|
||||||
pub interface RandomReader {
|
pub interface RandomReader {
|
||||||
read_from(pos u64, mut buf []byte) ?int
|
read_from(pos u64, mut buf []u8) ?int
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,12 +2,12 @@ module io
|
||||||
|
|
||||||
struct Buf {
|
struct Buf {
|
||||||
pub:
|
pub:
|
||||||
bytes []byte
|
bytes []u8
|
||||||
mut:
|
mut:
|
||||||
i int
|
i int
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut b Buf) read(mut buf []byte) ?int {
|
fn (mut b Buf) read(mut buf []u8) ?int {
|
||||||
if !(b.i < b.bytes.len) {
|
if !(b.i < b.bytes.len) {
|
||||||
return none
|
return none
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ mut:
|
||||||
place int
|
place int
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut s StringReader) read(mut buf []byte) ?int {
|
fn (mut s StringReader) read(mut buf []u8) ?int {
|
||||||
if s.place >= s.text.len {
|
if s.place >= s.text.len {
|
||||||
return none
|
return none
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,11 +14,11 @@ mut:
|
||||||
w Writer
|
w Writer
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (mut r ReaderWriterImpl) read(mut buf []byte) ?int {
|
pub fn (mut r ReaderWriterImpl) read(mut buf []u8) ?int {
|
||||||
return r.r.read(mut buf)
|
return r.r.read(mut buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (mut r ReaderWriterImpl) write(buf []byte) ?int {
|
pub fn (mut r ReaderWriterImpl) write(buf []u8) ?int {
|
||||||
return r.w.write(buf)
|
return r.w.write(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,11 +3,11 @@ module io
|
||||||
// Writer represents a stream of data that can be wrote to
|
// Writer represents a stream of data that can be wrote to
|
||||||
pub interface Writer {
|
pub interface Writer {
|
||||||
mut:
|
mut:
|
||||||
write(buf []byte) ?int
|
write(buf []u8) ?int
|
||||||
}
|
}
|
||||||
|
|
||||||
// RandomWriter represents a stream of data that can be wrote to
|
// RandomWriter represents a stream of data that can be wrote to
|
||||||
// at a random pos
|
// at a random pos
|
||||||
pub interface RandomWriter {
|
pub interface RandomWriter {
|
||||||
write_to(pos u64, buf []byte) ?int
|
write_to(pos u64, buf []u8) ?int
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ fn test_integer_from_bytes() {
|
||||||
assert big.integer_from_bytes([u8(0x13), 0x37, 0xca, 0xfe, 0xba]).hex() == '1337cafeba'
|
assert big.integer_from_bytes([u8(0x13), 0x37, 0xca, 0xfe, 0xba]).hex() == '1337cafeba'
|
||||||
assert big.integer_from_bytes([u8(0x13), 0x37, 0xca, 0xfe, 0xba, 0xbe]).hex() == '1337cafebabe'
|
assert big.integer_from_bytes([u8(0x13), 0x37, 0xca, 0xfe, 0xba, 0xbe]).hex() == '1337cafebabe'
|
||||||
|
|
||||||
mut bytes := []byte{cap: 1024}
|
mut bytes := []u8{cap: 1024}
|
||||||
mut expected := ''
|
mut expected := ''
|
||||||
for i := 0; i < bytes.cap; i++ {
|
for i := 0; i < bytes.cap; i++ {
|
||||||
bytes << u8(i)
|
bytes << u8(i)
|
||||||
|
@ -45,7 +45,7 @@ fn test_bytes() {
|
||||||
assert result1 == [u8(0x13), 0x37, 0xca, 0xfe, 0xba, 0xbe]
|
assert result1 == [u8(0x13), 0x37, 0xca, 0xfe, 0xba, 0xbe]
|
||||||
assert sign1 == 1
|
assert sign1 == 1
|
||||||
|
|
||||||
mut bytes := []byte{cap: 1024}
|
mut bytes := []u8{cap: 1024}
|
||||||
mut expected := ''
|
mut expected := ''
|
||||||
for i := 0; i < bytes.cap; i++ {
|
for i := 0; i < bytes.cap; i++ {
|
||||||
bytes << u8(i | 1)
|
bytes << u8(i | 1)
|
||||||
|
|
|
@ -108,13 +108,13 @@ pub struct IntegerConfig {
|
||||||
|
|
||||||
// integer_from_bytes creates a new `big.Integer` from the given byte array. By default, positive integers are assumed. If you want a negative integer, use in the following manner:
|
// integer_from_bytes creates a new `big.Integer` from the given byte array. By default, positive integers are assumed. If you want a negative integer, use in the following manner:
|
||||||
// `value := big.integer_from_bytes(bytes, signum: -1)`
|
// `value := big.integer_from_bytes(bytes, signum: -1)`
|
||||||
pub fn integer_from_bytes(input []byte, config IntegerConfig) Integer {
|
pub fn integer_from_bytes(input []u8, config IntegerConfig) Integer {
|
||||||
// Thank you to Miccah (@mcastorina) for this implementation and relevant unit tests.
|
// Thank you to Miccah (@mcastorina) for this implementation and relevant unit tests.
|
||||||
if input.len == 0 {
|
if input.len == 0 {
|
||||||
return integer_from_int(0)
|
return integer_from_int(0)
|
||||||
}
|
}
|
||||||
// pad input
|
// pad input
|
||||||
mut padded_input := []byte{len: ((input.len + 3) & ~0x3) - input.len, cap: (input.len + 3) & ~0x3, init: 0x0}
|
mut padded_input := []u8{len: ((input.len + 3) & ~0x3) - input.len, cap: (input.len + 3) & ~0x3, init: 0x0}
|
||||||
padded_input << input
|
padded_input << input
|
||||||
mut digits := []u32{len: padded_input.len / 4}
|
mut digits := []u32{len: padded_input.len / 4}
|
||||||
// combine every 4 bytes into a u32 and insert into n.digits
|
// combine every 4 bytes into a u32 and insert into n.digits
|
||||||
|
@ -778,11 +778,11 @@ pub fn (a Integer) int() int {
|
||||||
|
|
||||||
// bytes returns the a byte representation of the integer a, along with the signum int.
|
// bytes returns the a byte representation of the integer a, along with the signum int.
|
||||||
// NOTE: The byte array returned is in big endian order.
|
// NOTE: The byte array returned is in big endian order.
|
||||||
pub fn (a Integer) bytes() ([]byte, int) {
|
pub fn (a Integer) bytes() ([]u8, int) {
|
||||||
if a.signum == 0 {
|
if a.signum == 0 {
|
||||||
return []byte{len: 0}, 0
|
return []u8{len: 0}, 0
|
||||||
}
|
}
|
||||||
mut result := []byte{cap: a.digits.len * 4}
|
mut result := []u8{cap: a.digits.len * 4}
|
||||||
mut mask := u32(0xff000000)
|
mut mask := u32(0xff000000)
|
||||||
mut offset := 24
|
mut offset := 24
|
||||||
mut non_zero_found := false
|
mut non_zero_found := false
|
||||||
|
|
|
@ -403,7 +403,7 @@ pub fn (u_ Uint128) str() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// put_bytes stores u in b in little-endian order
|
// put_bytes stores u in b in little-endian order
|
||||||
pub fn (u Uint128) put_bytes(mut b []byte) {
|
pub fn (u Uint128) put_bytes(mut b []u8) {
|
||||||
binary.little_endian_put_u64(mut b, u.lo)
|
binary.little_endian_put_u64(mut b, u.lo)
|
||||||
binary.little_endian_put_u64(mut b, u.hi)
|
binary.little_endian_put_u64(mut b, u.hi)
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,9 +42,9 @@ mut:
|
||||||
port int
|
port int
|
||||||
}
|
}
|
||||||
|
|
||||||
fn (mut dtp DTP) read() ?[]byte {
|
fn (mut dtp DTP) read() ?[]u8 {
|
||||||
mut data := []byte{}
|
mut data := []u8{}
|
||||||
mut buf := []byte{len: 1024}
|
mut buf := []u8{len: 1024}
|
||||||
for {
|
for {
|
||||||
len := dtp.reader.read(mut buf) or { break }
|
len := dtp.reader.read(mut buf) or { break }
|
||||||
if len == 0 {
|
if len == 0 {
|
||||||
|
@ -227,7 +227,7 @@ pub fn (mut zftp FTP) dir() ?[]string {
|
||||||
return dir
|
return dir
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn (mut zftp FTP) get(file string) ?[]byte {
|
pub fn (mut zftp FTP) get(file string) ?[]u8 {
|
||||||
mut dtp := zftp.pasv() or { return error('Cannot stablish data connection') }
|
mut dtp := zftp.pasv() or { return error('Cannot stablish data connection') }
|
||||||
zftp.write('RETR $file') ?
|
zftp.write('RETR $file') ?
|
||||||
code, _ := zftp.read() ?
|
code, _ := zftp.read() ?
|
||||||
|
|
|
@ -182,11 +182,11 @@ pub fn parse_request(mut reader io.BufferedReader) ?Request {
|
||||||
mut request := parse_request_head(mut reader) ?
|
mut request := parse_request_head(mut reader) ?
|
||||||
|
|
||||||
// body
|
// body
|
||||||
mut body := []byte{}
|
mut body := []u8{}
|
||||||
if length := request.header.get(.content_length) {
|
if length := request.header.get(.content_length) {
|
||||||
n := length.int()
|
n := length.int()
|
||||||
if n > 0 {
|
if n > 0 {
|
||||||
body = []byte{len: n}
|
body = []u8{len: n}
|
||||||
mut count := 0
|
mut count := 0
|
||||||
for count < body.len {
|
for count < body.len {
|
||||||
count += reader.read(mut body[count..]) or { break }
|
count += reader.read(mut body[count..]) or { break }
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue