v/examples/news_fetcher.v

50 lines
1.5 KiB
V
Raw Normal View History

2020-01-23 21:04:46 +01:00
// Copyright (c) 2019-2020 Alexander Medvednikov. All rights reserved.
2019-06-23 04:21:30 +02:00
// Use of this source code is governed by an MIT license
// that can be found in the LICENSE file.
2019-12-30 05:42:23 +01:00
import net.http
2019-06-22 21:51:07 +02:00
import json
import sync
struct Story {
title string
2019-12-21 23:41:42 +01:00
url string
2019-06-22 21:51:07 +02:00
}
fn worker_fetch(p &sync.PoolProcessor, cursor int, worker_id int) voidptr {
id := p.get_int_item(cursor)
resp := http.get('https://hacker-news.firebaseio.com/v0/item/${id}.json') or {
println('failed to fetch data from /v0/item/${id}.json')
return sync.no_result
}
story := json.decode(Story,resp.text) or {
println('failed to decode a story')
return sync.no_result
2019-06-22 21:51:07 +02:00
}
println('# $cursor) $story.title | $story.url')
return sync.no_result
2019-06-22 21:51:07 +02:00
}
// Fetches top HN stories in parallel, depending on how many cores you have
2019-06-22 21:51:07 +02:00
fn main() {
2019-07-29 19:18:26 +02:00
resp := http.get('https://hacker-news.firebaseio.com/v0/topstories.json') or {
println('failed to fetch data from /v0/topstories.json')
return
}
2019-12-21 23:41:42 +01:00
mut ids := json.decode([]int,resp.text) or {
2019-07-29 19:18:26 +02:00
println('failed to decode topstories.json')
2019-06-22 21:51:07 +02:00
return
}
2019-07-30 15:06:16 +02:00
if ids.len > 10 {
ids = ids[0..10]
2019-07-30 15:06:16 +02:00
}
mut fetcher_pool := sync.new_pool_processor({
callback: worker_fetch
})
// NB: if you do not call set_max_jobs, the pool will try to use an optimal
// number of threads, one per each core in your system, which in most
// cases is what you want anyway... You can override the automatic choice
// by setting the VJOBS environment variable too.
// fetcher_pool.set_max_jobs( 4 )
fetcher_pool.work_on_items_i(ids)
2019-06-22 21:51:07 +02:00
}