metrics/prometheus.v

97 lines
2.0 KiB
Coq
Raw Normal View History

2022-12-26 21:49:07 +01:00
module metrics
import strings
import io
import arrays
pub struct PrometheusExporter {
buckets []f64
mut:
collector MetricsCollector
}
pub fn new_prometheus_exporter(buckets []f64) PrometheusExporter {
return PrometheusExporter{
buckets: buckets
}
}
pub fn (mut e PrometheusExporter) load(collector MetricsCollector) {
e.collector = collector
}
pub fn (mut e PrometheusExporter) export_to_string() !string {
mut builder := strings.new_builder(64)
e.export_to_writer(mut builder)!
return builder.str()
}
pub fn (mut e PrometheusExporter) export_to_writer(mut writer io.Writer) ! {
for counter in e.collector.counters() {
val := e.collector.counter_get(counter) or { return error("This can't happen.") }
line := '$counter $val\n'
writer.write(line.bytes())!
}
for gauge in e.collector.gauges() {
val := e.collector.gauge_get(gauge) or { return error("This can't happen.") }
line := '$gauge $val\n'
writer.write(line.bytes())!
}
for hist in e.collector.histograms() {
data := e.collector.histogram_get(hist) or { return error("This can't happen.") }
sum := arrays.sum(data) or { 0.0 }
total_count := data.len
mut bucket_counts := []u64{len: e.buckets.len}
mut i := bucket_counts.len - 1
// For each data point, increment all buckets that the value is
// contained in. Because the buckets are sorted, we can stop once we
// encounter one that it doesn't fit in
for val in data {
for i >= 0 && val <= e.buckets[i] {
bucket_counts[i]++
i -= 1
}
i = bucket_counts.len - 1
}
mut m := Metric{
...hist
name: '${hist.name}_count'
}
writer.write('$m $total_count\n'.bytes())!
m = Metric{
...hist
name: '${hist.name}_sum'
}
writer.write('$m $sum\n'.bytes())!
mut le_labels := [][2]string{}
le_labels.prepend(hist.labels)
le_labels << ['le', '']!
for j, bucket in e.buckets {
le_labels[le_labels.len - 1][1] = bucket.str()
m = Metric{
name: '${hist.name}_bucket'
labels: le_labels
}
writer.write('$m ${bucket_counts[j]}\n'.bytes())!
}
}
}