2022-04-27 01:00:20 -05:00
|
|
|
//go:build !metrics
|
|
|
|
// +build !metrics
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
package api_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2021-12-13 14:23:31 -05:00
|
|
|
"crypto/rand"
|
2021-10-15 10:05:00 -05:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2021-12-13 14:23:31 -05:00
|
|
|
"math/big"
|
2021-10-15 10:05:00 -05:00
|
|
|
"net/http"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
jsoniter "github.com/json-iterator/go"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
dto "github.com/prometheus/client_model/go"
|
|
|
|
. "github.com/smartystreets/goconvey/convey"
|
|
|
|
"gopkg.in/resty.v1"
|
2022-10-20 11:39:20 -05:00
|
|
|
|
2021-12-03 22:50:58 -05:00
|
|
|
zotapi "zotregistry.io/zot/pkg/api"
|
|
|
|
zotcfg "zotregistry.io/zot/pkg/api/config"
|
|
|
|
"zotregistry.io/zot/pkg/exporter/api"
|
|
|
|
"zotregistry.io/zot/pkg/extensions/monitoring"
|
2022-01-19 14:54:17 -05:00
|
|
|
. "zotregistry.io/zot/pkg/test"
|
2021-10-15 10:05:00 -05:00
|
|
|
)
|
|
|
|
|
2021-12-28 08:29:30 -05:00
|
|
|
func getRandomLatencyN(max int64) time.Duration {
|
|
|
|
nBig, err := rand.Int(rand.Reader, big.NewInt(max))
|
2021-12-13 14:23:31 -05:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return time.Duration(nBig.Int64())
|
2021-10-15 10:05:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func getRandomLatency() time.Duration {
|
2021-12-28 08:29:30 -05:00
|
|
|
return getRandomLatencyN(int64(2 * time.Minute)) // a random latency (in nanoseconds) that can be up to 2 minutes
|
2021-10-15 10:05:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestNew(t *testing.T) {
|
|
|
|
Convey("Make a new controller", t, func() {
|
|
|
|
config := api.DefaultConfig()
|
|
|
|
So(config, ShouldNotBeNil)
|
|
|
|
So(api.NewController(config), ShouldNotBeNil)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func isChannelDrained(ch chan prometheus.Metric) bool {
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
func readDefaultMetrics(collector *api.Collector, chMetric chan prometheus.Metric) {
|
2021-10-15 10:05:00 -05:00
|
|
|
var metric dto.Metric
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric := <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_up"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err := pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Gauge.Value, ShouldEqual, 1)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_info"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Gauge.Value, ShouldEqual, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestNewExporter(t *testing.T) {
|
|
|
|
Convey("Make an exporter controller", t, func() {
|
|
|
|
exporterConfig := api.DefaultConfig()
|
|
|
|
So(exporterConfig, ShouldNotBeNil)
|
2021-11-10 09:31:03 -05:00
|
|
|
exporterPort := GetFreePort()
|
|
|
|
serverPort := GetFreePort()
|
2021-10-15 10:05:00 -05:00
|
|
|
exporterConfig.Exporter.Port = exporterPort
|
2022-03-07 03:55:12 -05:00
|
|
|
exporterConfig.Exporter.Metrics.Path = strings.TrimPrefix(t.TempDir(), "/tmp/")
|
2021-10-15 10:05:00 -05:00
|
|
|
exporterConfig.Server.Port = serverPort
|
|
|
|
exporterController := api.NewController(exporterConfig)
|
|
|
|
|
|
|
|
Convey("Start the zot exporter", func() {
|
|
|
|
go func() {
|
|
|
|
// this blocks
|
|
|
|
exporterController.Run()
|
|
|
|
So(nil, ShouldNotBeNil) // Fail the test in case zot exporter unexpectedly exits
|
|
|
|
}()
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
collector := api.GetCollector(exporterController)
|
|
|
|
chMetric := make(chan prometheus.Metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
Convey("When zot server not running", func() {
|
|
|
|
go func() {
|
|
|
|
// this blocks
|
2021-12-13 14:23:31 -05:00
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
|
|
|
// Read from the channel expected values
|
2021-12-13 14:23:31 -05:00
|
|
|
pm := <-chMetric
|
|
|
|
So(pm.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_up"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
var metric dto.Metric
|
|
|
|
err := pm.Write(&metric)
|
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Gauge.Value, ShouldEqual, 0) // "zot_up=0" means zot server is not running
|
|
|
|
|
|
|
|
// Check that no more data was written to the channel
|
2021-12-13 14:23:31 -05:00
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
})
|
|
|
|
Convey("When zot server is running", func() {
|
|
|
|
servercConfig := zotcfg.New()
|
|
|
|
So(servercConfig, ShouldNotBeNil)
|
|
|
|
baseURL := fmt.Sprintf(BaseURL, serverPort)
|
|
|
|
servercConfig.HTTP.Port = serverPort
|
|
|
|
serverController := zotapi.NewController(servercConfig)
|
|
|
|
So(serverController, ShouldNotBeNil)
|
|
|
|
|
2022-03-07 03:55:12 -05:00
|
|
|
dir := t.TempDir()
|
2021-10-15 10:05:00 -05:00
|
|
|
serverController.Config.Storage.RootDirectory = dir
|
2023-02-10 00:04:52 -05:00
|
|
|
go func(ctrl *zotapi.Controller) {
|
|
|
|
if err := ctrl.Init(context.Background()); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2021-10-15 10:05:00 -05:00
|
|
|
// this blocks
|
2023-02-10 00:04:52 -05:00
|
|
|
if err := ctrl.Run(context.Background()); !errors.Is(err, http.ErrServerClosed) {
|
2021-10-15 10:05:00 -05:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}(serverController)
|
2023-02-10 00:04:52 -05:00
|
|
|
defer func(ctrl *zotapi.Controller) {
|
|
|
|
_ = ctrl.Server.Shutdown(context.TODO())
|
2021-10-15 10:05:00 -05:00
|
|
|
}(serverController)
|
|
|
|
// wait till ready
|
|
|
|
for {
|
|
|
|
_, err := resty.R().Get(baseURL)
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Side effect of calling this endpoint is that it will enable metrics
|
|
|
|
resp, err := resty.R().Get(baseURL + "/v2/metrics")
|
|
|
|
So(resp, ShouldNotBeNil)
|
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(resp.StatusCode(), ShouldEqual, 200)
|
|
|
|
|
|
|
|
Convey("Collecting data: default metrics", func() {
|
|
|
|
go func() {
|
|
|
|
// this blocks
|
2021-12-13 14:23:31 -05:00
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
2021-12-13 14:23:31 -05:00
|
|
|
readDefaultMetrics(collector, chMetric)
|
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
})
|
|
|
|
|
|
|
|
Convey("Collecting data: Test init value & that increment works on Counters", func() {
|
2021-12-13 14:23:31 -05:00
|
|
|
// Testing initial value of the counter to be 1 after first incrementation call
|
2021-10-15 10:05:00 -05:00
|
|
|
monitoring.IncUploadCounter(serverController.Metrics, "testrepo")
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
// this blocks
|
2021-12-13 14:23:31 -05:00
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
2021-12-13 14:23:31 -05:00
|
|
|
readDefaultMetrics(collector, chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric := <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_repo_uploads_total"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
var metric dto.Metric
|
2021-12-13 14:23:31 -05:00
|
|
|
err := pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 1)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
// Testing that counter is incremented by 1
|
2021-10-15 10:05:00 -05:00
|
|
|
monitoring.IncUploadCounter(serverController.Metrics, "testrepo")
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
// this blocks
|
2021-12-13 14:23:31 -05:00
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
2021-12-13 14:23:31 -05:00
|
|
|
readDefaultMetrics(collector, chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_repo_uploads_total"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 2)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
})
|
|
|
|
Convey("Collecting data: Test that concurent Counter increment requests works properly", func() {
|
2021-12-13 14:23:31 -05:00
|
|
|
nBig, err := rand.Int(rand.Reader, big.NewInt(1000))
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
reqsSize := int(nBig.Int64())
|
2021-10-15 10:05:00 -05:00
|
|
|
for i := 0; i < reqsSize; i++ {
|
|
|
|
monitoring.IncDownloadCounter(serverController.Metrics, "dummyrepo")
|
|
|
|
}
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
// this blocks
|
2021-12-13 14:23:31 -05:00
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
2021-12-13 14:23:31 -05:00
|
|
|
readDefaultMetrics(collector, chMetric)
|
|
|
|
pm := <-chMetric
|
|
|
|
So(pm.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_repo_downloads_total"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
var metric dto.Metric
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pm.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, reqsSize)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
})
|
|
|
|
Convey("Collecting data: Test init value & that observe works on Summaries", func() {
|
2021-12-13 14:23:31 -05:00
|
|
|
// Testing initial value of the summary counter to be 1 after first observation call
|
2021-10-15 10:05:00 -05:00
|
|
|
var latency1, latency2 time.Duration
|
|
|
|
latency1 = getRandomLatency()
|
|
|
|
monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/testrepo/blogs/dummydigest", latency1)
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
|
|
|
go func() {
|
2021-12-13 14:23:31 -05:00
|
|
|
// this blocks
|
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
2021-12-13 14:23:31 -05:00
|
|
|
readDefaultMetrics(collector, chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric := <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
var metric dto.Metric
|
2021-12-13 14:23:31 -05:00
|
|
|
err := pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 1)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, latency1.Seconds())
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
// Testing that summary counter is incremented by 1 and summary sum is properly updated
|
2021-10-15 10:05:00 -05:00
|
|
|
latency2 = getRandomLatency()
|
|
|
|
monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/testrepo/blogs/dummydigest", latency2)
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
// this blocks
|
2021-12-13 14:23:31 -05:00
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
2021-12-13 14:23:31 -05:00
|
|
|
readDefaultMetrics(collector, chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 2)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, (latency1.Seconds())+(latency2.Seconds()))
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
})
|
|
|
|
Convey("Collecting data: Test that concurent Summary observation requests works properly", func() {
|
|
|
|
var latencySum float64
|
2021-12-13 14:23:31 -05:00
|
|
|
nBig, err := rand.Int(rand.Reader, big.NewInt(1000))
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
reqsSize := int(nBig.Int64())
|
2021-10-15 10:05:00 -05:00
|
|
|
for i := 0; i < reqsSize; i++ {
|
|
|
|
latency := getRandomLatency()
|
|
|
|
latencySum += latency.Seconds()
|
|
|
|
monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/dummyrepo/manifests/testreference", latency)
|
|
|
|
}
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
// this blocks
|
2021-12-13 14:23:31 -05:00
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
2021-12-13 14:23:31 -05:00
|
|
|
readDefaultMetrics(collector, chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric := <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
var metric dto.Metric
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, reqsSize)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, latencySum)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
})
|
|
|
|
Convey("Collecting data: Test init value & that observe works on Histogram buckets", func() {
|
2021-12-13 14:23:31 -05:00
|
|
|
// Testing initial value of the histogram counter to be 1 after first observation call
|
2021-10-15 10:05:00 -05:00
|
|
|
latency := getRandomLatency()
|
|
|
|
monitoring.ObserveHTTPMethodLatency(serverController.Metrics, "GET", latency)
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
|
|
|
go func() {
|
2021-12-13 14:23:31 -05:00
|
|
|
// this blocks
|
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
2021-12-13 14:23:31 -05:00
|
|
|
readDefaultMetrics(collector, chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric := <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_method_latency_seconds_count"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
var metric dto.Metric
|
2021-12-13 14:23:31 -05:00
|
|
|
err := pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 1)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_method_latency_seconds_sum"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, latency.Seconds())
|
|
|
|
|
|
|
|
for _, fvalue := range monitoring.GetDefaultBuckets() {
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual,
|
|
|
|
collector.MetricsDesc["zot_http_method_latency_seconds_bucket"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
if latency.Seconds() < fvalue {
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 1)
|
|
|
|
} else {
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
})
|
2021-12-21 08:19:40 -05:00
|
|
|
Convey("Collecting data: Test init value & that observe works on Histogram buckets (lock latency)", func() {
|
|
|
|
// Testing initial value of the histogram counter to be 1 after first observation call
|
|
|
|
latency := getRandomLatency()
|
|
|
|
monitoring.ObserveStorageLockLatency(serverController.Metrics, latency, "/tmp/zot", "RWLock")
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
// this blocks
|
|
|
|
collector.Collect(chMetric)
|
|
|
|
}()
|
|
|
|
readDefaultMetrics(collector, chMetric)
|
|
|
|
|
|
|
|
pmMetric := <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_storage_lock_latency_seconds_count"].String())
|
|
|
|
|
|
|
|
var metric dto.Metric
|
|
|
|
err := pmMetric.Write(&metric)
|
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 1)
|
|
|
|
|
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_storage_lock_latency_seconds_sum"].String())
|
|
|
|
|
|
|
|
err = pmMetric.Write(&metric)
|
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, latency.Seconds())
|
|
|
|
|
|
|
|
for _, fvalue := range monitoring.GetBuckets("zot.storage.lock.latency.seconds") {
|
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual,
|
|
|
|
collector.MetricsDesc["zot_storage_lock_latency_seconds_bucket"].String())
|
|
|
|
|
|
|
|
err = pmMetric.Write(&metric)
|
|
|
|
So(err, ShouldBeNil)
|
|
|
|
if latency.Seconds() < fvalue {
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 1)
|
|
|
|
} else {
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, 0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
|
|
|
})
|
2021-10-15 10:05:00 -05:00
|
|
|
Convey("Collecting data: Test init Histogram buckets \n", func() {
|
2021-12-13 14:23:31 -05:00
|
|
|
// Generate a random latency within each bucket and finally test
|
2021-10-15 10:05:00 -05:00
|
|
|
// that "higher" rank bucket counter is incremented by 1
|
|
|
|
var latencySum float64
|
|
|
|
|
|
|
|
dBuckets := monitoring.GetDefaultBuckets()
|
2021-12-13 14:23:31 -05:00
|
|
|
for index, fvalue := range dBuckets {
|
2021-10-15 10:05:00 -05:00
|
|
|
var latency time.Duration
|
2021-12-13 14:23:31 -05:00
|
|
|
if index == 0 {
|
|
|
|
// first bucket value
|
2021-12-28 08:29:30 -05:00
|
|
|
latency = getRandomLatencyN(int64(fvalue * float64(time.Second)))
|
2021-10-15 10:05:00 -05:00
|
|
|
} else {
|
2021-12-13 14:23:31 -05:00
|
|
|
pvalue := dBuckets[index-1] // previous bucket value
|
2021-12-28 08:29:30 -05:00
|
|
|
latency = time.Duration(pvalue*float64(time.Second)) +
|
|
|
|
getRandomLatencyN(int64(dBuckets[0]*float64(time.Second)))
|
2021-10-15 10:05:00 -05:00
|
|
|
}
|
|
|
|
latencySum += latency.Seconds()
|
|
|
|
monitoring.ObserveHTTPMethodLatency(serverController.Metrics, "GET", latency)
|
|
|
|
}
|
|
|
|
time.Sleep(SleepTime)
|
|
|
|
|
|
|
|
go func() {
|
2021-12-13 14:23:31 -05:00
|
|
|
// this blocks
|
|
|
|
collector.Collect(chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
}()
|
2021-12-13 14:23:31 -05:00
|
|
|
readDefaultMetrics(collector, chMetric)
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric := <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_method_latency_seconds_count"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
var metric dto.Metric
|
2021-12-13 14:23:31 -05:00
|
|
|
err := pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, len(dBuckets))
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual,
|
|
|
|
collector.MetricsDesc["zot_http_method_latency_seconds_sum"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
|
|
|
So(*metric.Counter.Value, ShouldEqual, latencySum)
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
for index := range dBuckets {
|
|
|
|
pmMetric = <-chMetric
|
|
|
|
So(pmMetric.Desc().String(), ShouldEqual,
|
|
|
|
collector.MetricsDesc["zot_http_method_latency_seconds_bucket"].String())
|
2021-10-15 10:05:00 -05:00
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
err = pmMetric.Write(&metric)
|
2021-10-15 10:05:00 -05:00
|
|
|
So(err, ShouldBeNil)
|
2021-12-13 14:23:31 -05:00
|
|
|
So(*metric.Counter.Value, ShouldEqual, index+1)
|
2021-10-15 10:05:00 -05:00
|
|
|
}
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
So(isChannelDrained(chMetric), ShouldEqual, true)
|
2021-10-15 10:05:00 -05:00
|
|
|
})
|
|
|
|
Convey("Negative testing: Send unknown metric type to MetricServer", func() {
|
|
|
|
serverController.Metrics.SendMetric(getRandomLatency())
|
|
|
|
})
|
|
|
|
Convey("Concurrent metrics scrape", func() {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
2021-12-13 14:23:31 -05:00
|
|
|
nBig, err := rand.Int(rand.Reader, big.NewInt(100))
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
workersSize := int(nBig.Int64())
|
2021-10-15 10:05:00 -05:00
|
|
|
for i := 0; i < workersSize; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
m := serverController.Metrics.ReceiveMetrics()
|
2021-12-13 14:23:31 -05:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2021-10-15 10:05:00 -05:00
|
|
|
|
|
|
|
_, err := json.Marshal(m)
|
|
|
|
if err != nil {
|
|
|
|
exporterController.Log.Error().Err(err).Msg("Concurrent metrics scrape fail")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
})
|
|
|
|
Convey("Negative testing: Increment a counter that does not exist", func() {
|
|
|
|
cv := monitoring.CounterValue{Name: "dummyName"}
|
|
|
|
serverController.Metrics.SendMetric(cv)
|
|
|
|
})
|
|
|
|
Convey("Negative testing: Set a gauge for a metric with len(labelNames)!=len(knownLabelNames)", func() {
|
|
|
|
gv := monitoring.GaugeValue{
|
|
|
|
Name: "zot.info",
|
|
|
|
Value: 1,
|
|
|
|
LabelNames: []string{"commit", "binaryType", "version"},
|
|
|
|
}
|
|
|
|
serverController.Metrics.SendMetric(gv)
|
|
|
|
})
|
|
|
|
Convey("Negative testing: Summary observe for a metric with labelNames!=knownLabelNames", func() {
|
|
|
|
sv := monitoring.SummaryValue{
|
|
|
|
Name: "zot.repo.latency.seconds",
|
|
|
|
LabelNames: []string{"dummyRepoLabelName"},
|
|
|
|
LabelValues: []string{"dummyrepo"},
|
|
|
|
}
|
|
|
|
serverController.Metrics.SendMetric(sv)
|
|
|
|
})
|
|
|
|
Convey("Negative testing: Histogram observe for a metric with len(labelNames)!=len(LabelValues)", func() {
|
|
|
|
hv := monitoring.HistogramValue{
|
|
|
|
Name: "zot.method.latency.seconds",
|
|
|
|
LabelNames: []string{"method"},
|
|
|
|
LabelValues: []string{"GET", "POST", "DELETE"},
|
|
|
|
}
|
|
|
|
serverController.Metrics.SendMetric(hv)
|
|
|
|
})
|
|
|
|
Convey("Negative testing: error in getting the size for a repo directory", func() {
|
|
|
|
monitoring.SetStorageUsage(serverController.Metrics, "/tmp/zot", "dummyrepo")
|
|
|
|
})
|
|
|
|
Convey("Disabling metrics after idle timeout", func() {
|
|
|
|
So(serverController.Metrics.IsEnabled(), ShouldEqual, true)
|
|
|
|
time.Sleep(monitoring.GetMaxIdleScrapeInterval())
|
|
|
|
So(serverController.Metrics.IsEnabled(), ShouldEqual, false)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|