forked from vieter-v/vieter
				
			refactor: compile without warnings
							parent
							
								
									23632be7a4
								
							
						
					
					
						commit
						a2fda0d4b7
					
				
							
								
								
									
										2
									
								
								Makefile
								
								
								
								
							
							
						
						
									
										2
									
								
								Makefile
								
								
								
								
							|  | @ -3,7 +3,7 @@ SRC_DIR := src | |||
| SOURCES != find '$(SRC_DIR)' -iname '*.v' | ||||
| 
 | ||||
| V_PATH ?= v | ||||
| V := $(V_PATH) -showcc -gc boehm | ||||
| V := $(V_PATH) -showcc -gc boehm -W | ||||
| 
 | ||||
| all: vieter | ||||
| 
 | ||||
|  |  | |||
|  | @ -26,8 +26,8 @@ const ( | |||
| ) | ||||
| 
 | ||||
| // init initializes a database & adds the correct tables. | ||||
| pub fn init(db_path string) ?VieterDb { | ||||
| 	conn := sqlite.connect(db_path)? | ||||
| pub fn init(db_path string) !VieterDb { | ||||
| 	conn := sqlite.connect(db_path)! | ||||
| 
 | ||||
| 	sql conn { | ||||
| 		create table MigrationVersion | ||||
|  |  | |||
|  | @ -29,7 +29,7 @@ pub: | |||
| } | ||||
| 
 | ||||
| // new creates a new RepoGroupManager & creates the directories as needed | ||||
| pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupManager { | ||||
| pub fn new(repos_dir string, pkg_dir string, default_arch string) !RepoGroupManager { | ||||
| 	if !os.is_dir(repos_dir) { | ||||
| 		os.mkdir_all(repos_dir) or { return error('Failed to create repos directory: $err.msg()') } | ||||
| 	} | ||||
|  | @ -49,27 +49,27 @@ pub fn new(repos_dir string, pkg_dir string, default_arch string) ?RepoGroupMana | |||
| // pkg archive. It's a wrapper around add_pkg_in_repo that parses the archive | ||||
| // file, passes the result to add_pkg_in_repo, and hard links the archive to | ||||
| // the right subdirectories in r.pkg_dir if it was successfully added. | ||||
| pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?RepoAddResult { | ||||
| pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) !RepoAddResult { | ||||
| 	pkg := package.read_pkg_archive(pkg_path) or { | ||||
| 		return error('Failed to read package file: $err.msg()') | ||||
| 	} | ||||
| 
 | ||||
| 	archs := r.add_pkg_in_repo(repo, pkg)? | ||||
| 	archs := r.add_pkg_in_repo(repo, pkg)! | ||||
| 
 | ||||
| 	// If the add was successful, we move the file to the packages directory | ||||
| 	for arch in archs { | ||||
| 		repo_pkg_path := os.real_path(os.join_path(r.pkg_dir, repo, arch)) | ||||
| 		dest_path := os.join_path_single(repo_pkg_path, pkg.filename()) | ||||
| 
 | ||||
| 		os.mkdir_all(repo_pkg_path)? | ||||
| 		os.mkdir_all(repo_pkg_path)! | ||||
| 
 | ||||
| 		// We create hard links so that "any" arch packages aren't stored | ||||
| 		// multiple times | ||||
| 		os.link(pkg_path, dest_path)? | ||||
| 		os.link(pkg_path, dest_path)! | ||||
| 	} | ||||
| 
 | ||||
| 	// After linking, we can remove the original file | ||||
| 	os.rm(pkg_path)? | ||||
| 	os.rm(pkg_path)! | ||||
| 
 | ||||
| 	return RepoAddResult{ | ||||
| 		name: pkg.info.name | ||||
|  | @ -85,11 +85,11 @@ pub fn (r &RepoGroupManager) add_pkg_from_path(repo string, pkg_path string) ?Re | |||
| // r.default_arch. If this arch-repo doesn't exist yet, it is created. If the | ||||
| // architecture isn't 'any', the package is only added to the specific | ||||
| // architecture. | ||||
| fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]string { | ||||
| fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ![]string { | ||||
| 	// A package not of arch 'any' can be handled easily by adding it to the | ||||
| 	// respective repo | ||||
| 	if pkg.info.arch != 'any' { | ||||
| 		r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)? | ||||
| 		r.add_pkg_in_arch_repo(repo, pkg.info.arch, pkg)! | ||||
| 
 | ||||
| 		return [pkg.info.arch] | ||||
| 	} | ||||
|  | @ -104,7 +104,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin | |||
| 	// If this is the first package that's added to the repo, the directory | ||||
| 	// won't exist yet | ||||
| 	if os.exists(repo_dir) { | ||||
| 		arch_repos = os.ls(repo_dir)? | ||||
| 		arch_repos = os.ls(repo_dir)! | ||||
| 	} | ||||
| 
 | ||||
| 	// The default_arch should always be updated when a package with arch 'any' | ||||
|  | @ -118,7 +118,7 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin | |||
| 	// not know which arch-repositories did succeed in adding the package, if | ||||
| 	// any. | ||||
| 	for arch in arch_repos { | ||||
| 		r.add_pkg_in_arch_repo(repo, arch, pkg)? | ||||
| 		r.add_pkg_in_arch_repo(repo, arch, pkg)! | ||||
| 	} | ||||
| 
 | ||||
| 	return arch_repos | ||||
|  | @ -128,24 +128,24 @@ fn (r &RepoGroupManager) add_pkg_in_repo(repo string, pkg &package.Pkg) ?[]strin | |||
| // arch-repo. It records the package's data in the arch-repo's desc & files | ||||
| // files, and afterwards updates the db & files archives to reflect these | ||||
| // changes. | ||||
| fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ? { | ||||
| fn (r &RepoGroupManager) add_pkg_in_arch_repo(repo string, arch string, pkg &package.Pkg) ! { | ||||
| 	pkg_dir := os.join_path(r.repos_dir, repo, arch, '$pkg.info.name-$pkg.info.version') | ||||
| 
 | ||||
| 	// Remove the previous version of the package, if present | ||||
| 	r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)? | ||||
| 	r.remove_pkg_from_arch_repo(repo, arch, pkg.info.name, false)! | ||||
| 
 | ||||
| 	os.mkdir_all(pkg_dir) or { return error('Failed to create package directory.') } | ||||
| 
 | ||||
| 	os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()?) or { | ||||
| 		os.rmdir_all(pkg_dir)? | ||||
| 	os.write_file(os.join_path_single(pkg_dir, 'desc'), pkg.to_desc()!) or { | ||||
| 		os.rmdir_all(pkg_dir)! | ||||
| 
 | ||||
| 		return error('Failed to write desc file.') | ||||
| 	} | ||||
| 	os.write_file(os.join_path_single(pkg_dir, 'files'), pkg.to_files()) or { | ||||
| 		os.rmdir_all(pkg_dir)? | ||||
| 		os.rmdir_all(pkg_dir)! | ||||
| 
 | ||||
| 		return error('Failed to write files file.') | ||||
| 	} | ||||
| 
 | ||||
| 	r.sync(repo, arch)? | ||||
| 	r.sync(repo, arch)! | ||||
| } | ||||
|  |  | |||
|  | @ -5,7 +5,7 @@ import os | |||
| // remove_pkg_from_arch_repo removes a package from an arch-repo's database. It | ||||
| // returns false if the package wasn't present in the database. It also | ||||
| // optionally re-syncs the repo archives. | ||||
| pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) ?bool { | ||||
| pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, pkg_name string, sync bool) !bool { | ||||
| 	repo_dir := os.join_path(r.repos_dir, repo, arch) | ||||
| 
 | ||||
| 	// If the repository doesn't exist yet, the result is automatically false | ||||
|  | @ -15,7 +15,7 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, | |||
| 
 | ||||
| 	// We iterate over every directory in the repo dir | ||||
| 	// TODO filter so we only check directories | ||||
| 	for d in os.ls(repo_dir)? { | ||||
| 	for d in os.ls(repo_dir)! { | ||||
| 		// Because a repository only allows a single version of each package, | ||||
| 		// we need only compare whether the name of the package is the same, | ||||
| 		// not the version. | ||||
|  | @ -25,22 +25,22 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, | |||
| 			// We lock the mutex here to prevent other routines from creating a | ||||
| 			// new archive while we remove an entry | ||||
| 			lock r.mutex { | ||||
| 				os.rmdir_all(os.join_path_single(repo_dir, d))? | ||||
| 				os.rmdir_all(os.join_path_single(repo_dir, d))! | ||||
| 			} | ||||
| 
 | ||||
| 			// Also remove the package archive | ||||
| 			repo_pkg_dir := os.join_path(r.pkg_dir, repo, arch) | ||||
| 
 | ||||
| 			archives := os.ls(repo_pkg_dir)?.filter(it.split('-')#[..-3].join('-') == name) | ||||
| 			archives := os.ls(repo_pkg_dir)!.filter(it.split('-')#[..-3].join('-') == name) | ||||
| 
 | ||||
| 			for archive_name in archives { | ||||
| 				full_path := os.join_path_single(repo_pkg_dir, archive_name) | ||||
| 				os.rm(full_path)? | ||||
| 				os.rm(full_path)! | ||||
| 			} | ||||
| 
 | ||||
| 			// Sync the db archives if requested | ||||
| 			if sync { | ||||
| 				r.sync(repo, arch)? | ||||
| 				r.sync(repo, arch)! | ||||
| 			} | ||||
| 
 | ||||
| 			return true | ||||
|  | @ -51,7 +51,7 @@ pub fn (r &RepoGroupManager) remove_pkg_from_arch_repo(repo string, arch string, | |||
| } | ||||
| 
 | ||||
| // remove_arch_repo removes an arch-repo & its packages. | ||||
| pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) ?bool { | ||||
| pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) !bool { | ||||
| 	repo_dir := os.join_path(r.repos_dir, repo, arch) | ||||
| 
 | ||||
| 	// If the repository doesn't exist yet, the result is automatically false | ||||
|  | @ -59,16 +59,16 @@ pub fn (r &RepoGroupManager) remove_arch_repo(repo string, arch string) ?bool { | |||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	os.rmdir_all(repo_dir)? | ||||
| 	os.rmdir_all(repo_dir)! | ||||
| 
 | ||||
| 	pkg_dir := os.join_path(r.pkg_dir, repo, arch) | ||||
| 	os.rmdir_all(pkg_dir)? | ||||
| 	os.rmdir_all(pkg_dir)! | ||||
| 
 | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // remove_repo removes a repo & its packages. | ||||
| pub fn (r &RepoGroupManager) remove_repo(repo string) ?bool { | ||||
| pub fn (r &RepoGroupManager) remove_repo(repo string) !bool { | ||||
| 	repo_dir := os.join_path_single(r.repos_dir, repo) | ||||
| 
 | ||||
| 	// If the repository doesn't exist yet, the result is automatically false | ||||
|  | @ -76,10 +76,10 @@ pub fn (r &RepoGroupManager) remove_repo(repo string) ?bool { | |||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	os.rmdir_all(repo_dir)? | ||||
| 	os.rmdir_all(repo_dir)! | ||||
| 
 | ||||
| 	pkg_dir := os.join_path_single(r.pkg_dir, repo) | ||||
| 	os.rmdir_all(pkg_dir)? | ||||
| 	os.rmdir_all(pkg_dir)! | ||||
| 
 | ||||
| 	return true | ||||
| } | ||||
|  |  | |||
|  | @ -32,7 +32,7 @@ fn archive_add_entry(archive &C.archive, entry &C.archive_entry, file_path &stri | |||
| } | ||||
| 
 | ||||
| // sync regenerates the repository archive files. | ||||
| fn (r &RepoGroupManager) sync(repo string, arch string) ? { | ||||
| fn (r &RepoGroupManager) sync(repo string, arch string) ! { | ||||
| 	subrepo_path := os.join_path(r.repos_dir, repo, arch) | ||||
| 
 | ||||
| 	lock r.mutex { | ||||
|  | @ -54,7 +54,7 @@ fn (r &RepoGroupManager) sync(repo string, arch string) ? { | |||
| 		C.archive_write_open_filename(a_files, &char(files_path.str)) | ||||
| 
 | ||||
| 		// Iterate over each directory | ||||
| 		for d in os.ls(subrepo_path)?.filter(os.is_dir(os.join_path_single(subrepo_path, | ||||
| 		for d in os.ls(subrepo_path)!.filter(os.is_dir(os.join_path_single(subrepo_path, | ||||
| 			it))) { | ||||
| 			// desc | ||||
| 			mut inner_path := os.join_path_single(d, 'desc') | ||||
|  |  | |||
|  | @ -43,9 +43,9 @@ fn (mut app App) v1_get_log_content(id int) web.Result { | |||
| 
 | ||||
| // parse_query_time unescapes an HTTP query parameter & tries to parse it as a | ||||
| // time.Time struct. | ||||
| fn parse_query_time(query string) ?time.Time { | ||||
| 	unescaped := urllib.query_unescape(query)? | ||||
| 	t := time.parse(unescaped)? | ||||
| fn parse_query_time(query string) !time.Time { | ||||
| 	unescaped := urllib.query_unescape(query)! | ||||
| 	t := time.parse(unescaped)! | ||||
| 
 | ||||
| 	return t | ||||
| } | ||||
|  |  | |||
|  | @ -8,7 +8,7 @@ import net.http | |||
| const attrs_to_ignore = ['auth'] | ||||
| 
 | ||||
| // Parsing function attributes for methods and path. | ||||
| fn parse_attrs(name string, attrs []string) ?([]http.Method, string) { | ||||
| fn parse_attrs(name string, attrs []string) !([]http.Method, string) { | ||||
| 	if attrs.len == 0 { | ||||
| 		return [http.Method.get], '/$name' | ||||
| 	} | ||||
|  | @ -61,7 +61,7 @@ fn parse_query_from_url(url urllib.URL) map[string]string { | |||
| } | ||||
| 
 | ||||
| // Extract form data from an HTTP request. | ||||
| fn parse_form_from_request(request http.Request) ?(map[string]string, map[string][]http.FileData) { | ||||
| fn parse_form_from_request(request http.Request) !(map[string]string, map[string][]http.FileData) { | ||||
| 	mut form := map[string]string{} | ||||
| 	mut files := map[string][]http.FileData{} | ||||
| 	if request.method in methods_with_form { | ||||
|  |  | |||
|  | @ -24,7 +24,7 @@ pub: | |||
| pub mut: | ||||
| 	// TCP connection to client. | ||||
| 	// But beware, do not store it for further use, after request processing web will close connection. | ||||
| 	conn &net.TcpConn | ||||
| 	conn &net.TcpConn = unsafe { nil } | ||||
| 	// Gives access to a shared logger object | ||||
| 	logger shared log.Log | ||||
| 	// time.ticks() from start of web connection handle. | ||||
|  | @ -67,20 +67,20 @@ struct Route { | |||
| pub fn (ctx Context) before_request() {} | ||||
| 
 | ||||
| // send_string writes the given string to the TCP connection socket. | ||||
| fn (mut ctx Context) send_string(s string) ? { | ||||
| 	ctx.conn.write(s.bytes())? | ||||
| fn (mut ctx Context) send_string(s string) ! { | ||||
| 	ctx.conn.write(s.bytes())! | ||||
| } | ||||
| 
 | ||||
| // send_reader reads at most `size` bytes from the given reader & writes them | ||||
| // to the TCP connection socket. Internally, a 10KB buffer is used, to avoid | ||||
| // having to store all bytes in memory at once. | ||||
| fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ? { | ||||
| fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ! { | ||||
| 	mut buf := []u8{len: 10_000} | ||||
| 	mut bytes_left := size | ||||
| 
 | ||||
| 	// Repeat as long as the stream still has data | ||||
| 	for bytes_left > 0 { | ||||
| 		bytes_read := reader.read(mut buf)? | ||||
| 		bytes_read := reader.read(mut buf)! | ||||
| 		bytes_left -= u64(bytes_read) | ||||
| 
 | ||||
| 		mut to_write := bytes_read | ||||
|  | @ -96,20 +96,20 @@ fn (mut ctx Context) send_reader(mut reader io.Reader, size u64) ? { | |||
| // send_custom_response sends the given http.Response to the client. It can be | ||||
| // used to overwrite the Context object & send a completely custom | ||||
| // http.Response instead. | ||||
| fn (mut ctx Context) send_custom_response(resp &http.Response) ? { | ||||
| 	ctx.send_string(resp.bytestr())? | ||||
| fn (mut ctx Context) send_custom_response(resp &http.Response) ! { | ||||
| 	ctx.send_string(resp.bytestr())! | ||||
| } | ||||
| 
 | ||||
| // send_response_header constructs a valid HTTP response with an empty body & | ||||
| // sends it to the client. | ||||
| pub fn (mut ctx Context) send_response_header() ? { | ||||
| pub fn (mut ctx Context) send_response_header() ! { | ||||
| 	mut resp := http.new_response( | ||||
| 		header: ctx.header.join(headers_close) | ||||
| 	) | ||||
| 	resp.header.add(.content_type, ctx.content_type) | ||||
| 	resp.set_status(ctx.status) | ||||
| 
 | ||||
| 	ctx.send_custom_response(resp)? | ||||
| 	ctx.send_custom_response(resp)! | ||||
| } | ||||
| 
 | ||||
| // send is a convenience function for sending the HTTP response with an empty | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue