0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-16 21:56:37 -05:00

Implement an API for performance monitoring

Signed-off-by: Alexei Dodon <adodon@cisco.com>
This commit is contained in:
Alexei Dodon 2021-10-15 18:05:00 +03:00 committed by Ramkumar Chinchani
parent 061dfb333b
commit 8e4d828867
54 changed files with 27267 additions and 196 deletions

View file

@ -33,14 +33,14 @@ jobs:
run: |
cd ./zot
IP=`hostname -I | awk '{print $1}'`
echo "ZOT_URL=http://${IP}:5000" >> $GITHUB_ENV
ZOT_REF="local-zot:v$(date +%Y%m%d%H%M%S)"
docker build -f ./Dockerfile-conformance -t "${ZOT_REF}" .
docker run --rm -p 5000:5000 -v "$(pwd)":/go/src/github.com/anuvu/zot -idt "${ZOT_REF}"
echo "SERVER_URL=http://${IP}:5000" >> $GITHUB_ENV
IMAGE_REF="local-zot:v$(date +%Y%m%d%H%M%S)"
docker build -f ./Dockerfile-conformance -t "${IMAGE_REF}" .
docker run --rm -p 5000:5000 -v "$(pwd)":/go/src/github.com/anuvu/zot -idt "${IMAGE_REF}"
- name: Run OCI Distribution Spec conformance tests
uses: opencontainers/distribution-spec@main
env:
OCI_ROOT_URL: ${{ env.ZOT_URL }}
OCI_ROOT_URL: ${{ env.SERVER_URL }}
OCI_NAMESPACE: oci-conformance/distribution-test
OCI_TEST_PULL: 1
OCI_TEST_PUSH: 1

2
.gitignore vendored
View file

@ -14,6 +14,8 @@
bin/
bazel-*
coverage.txt
coverage-extended.txt
coverage-minimal.txt
test/data/
*.orig
.idea/

View file

@ -44,6 +44,12 @@ For a minimal dist-spec only zot,
make binary-minimal
```
For a node exporter used by minimal dist-spec only zot,
```
make exporter-minimal
```
## Using container builds (stacker)
```
@ -62,12 +68,14 @@ make binary-container
.
...
├── cmd/zot # Source code contains the main logic
├── cmd/exporter # Source code contains the main logic for node exporter
├── docs # Source code for Swagger docs
├── errors # Source code for errors
├── examples # Configuration examples to enable various features
├── pkg/api # Source code contains the HTTP handlers
├── pkg/cli # Source code that handles the commandline logic
├── pkg/compliance # Source code that handles the dist-spec compliance logic
├── pkg/exporter # Source code used by the node exporter
├── pkg/extensions # Source code that handles the feature extensions
├── pkg/log # Source code that handles logging
├── pkg/storage # Source code that handles image storage

View file

@ -1,6 +1,7 @@
export GO111MODULE=on
TOP_LEVEL=$(shell git rev-parse --show-toplevel)
COMMIT_HASH=$(shell git describe --always --tags --long)
GO_VERSION=$(shell go version | awk '{print $$3}')
COMMIT=$(if $(shell git status --porcelain --untracked-files=no),$(COMMIT_HASH)-dirty,$(COMMIT_HASH))
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
PATH := bin:$(PATH)
@ -8,26 +9,31 @@ TMPDIR := $(shell mktemp -d)
STACKER := $(shell which stacker)
.PHONY: all
all: swagger binary binary-minimal debug test test-clean check
all: swagger binary binary-minimal exporter-minimal debug test test-clean check
.PHONY: binary-minimal
binary-minimal: swagger
go build -tags minimal,containers_image_openpgp -v -ldflags "-X github.com/anuvu/zot/pkg/api/config.Commit=${COMMIT} -X github.com/anuvu/zot/pkg/api.BinaryType=minimal" -o bin/zot-minimal ./cmd/zot
go build -o bin/zot-minimal -tags minimal,containers_image_openpgp -v -trimpath -ldflags "-X github.com/anuvu/zot/pkg/api/config.Commit=${COMMIT} -X github.com/anuvu/zot/pkg/api/config.BinaryType=minimal -X github.com/anuvu/zot/pkg/api/config.GoVersion=${GO_VERSION}" ./cmd/zot
.PHONY: binary
binary: swagger
go build -tags extended,containers_image_openpgp -v -ldflags "-X github.com/anuvu/zot/pkg/api/config.Commit=${COMMIT} -X github.com/anuvu/zot/pkg/api.BinaryType=extended" -o bin/zot ./cmd/zot
go build -o bin/zot -tags extended,containers_image_openpgp -v -trimpath -ldflags "-X github.com/anuvu/zot/pkg/api/config.Commit=${COMMIT} -X github.com/anuvu/zot/pkg/api/config.BinaryType=extended -X github.com/anuvu/zot/pkg/api/config.GoVersion=${GO_VERSION}" ./cmd/zot
.PHONY: debug
debug: swagger
go build -tags extended,containers_image_openpgp -v -gcflags all='-N -l' -ldflags "-X github.com/anuvu/zot/pkg/api.Commit=${COMMIT} -X github.com/anuvu/zot/pkg/api.BinaryType=extended" -o bin/zot-debug ./cmd/zot
go build -o bin/zot-debug -tags extended,containers_image_openpgp -v -gcflags all='-N -l' -ldflags "-X github.com/anuvu/zot/pkg/api/config.Commit=${COMMIT} -X github.com/anuvu/zot/pkg/api/config.BinaryType=extended -X github.com/anuvu/zot/pkg/api/config.GoVersion=${GO_VERSION}" ./cmd/zot
.PHONY: exporter-minimal
exporter-minimal: swagger
go build -o bin/zot-exporter -tags minimal,containers_image_openpgp -v -trimpath ./cmd/exporter
.PHONY: test
test:
$(shell mkdir -p test/data; cd test/data; ../scripts/gen_certs.sh; cd ${TOP_LEVEL}; sudo skopeo --insecure-policy copy -q docker://public.ecr.aws/t0x7q1g8/centos:7 oci:${TOP_LEVEL}/test/data/zot-test:0.0.1;sudo skopeo --insecure-policy copy -q docker://public.ecr.aws/t0x7q1g8/centos:8 oci:${TOP_LEVEL}/test/data/zot-cve-test:0.0.1)
$(shell sudo mkdir -p /etc/containers/certs.d/127.0.0.1:8089/; sudo cp test/data/client.* /etc/containers/certs.d/127.0.0.1:8089/; sudo cp test/data/ca.* /etc/containers/certs.d/127.0.0.1:8089/;)
$(shell sudo chmod a=rwx /etc/containers/certs.d/127.0.0.1:8089/*.key)
go test -tags extended,containers_image_openpgp -v -race -cover -coverpkg ./... -coverprofile=coverage.txt -covermode=atomic ./...
go test -tags extended,containers_image_openpgp -v -trimpath -race -cover -coverpkg ./... -coverprofile=coverage-extended.txt -covermode=atomic ./...
go test -tags minimal,containers_image_openpgp -v -trimpath -race -cover -coverpkg ./... -coverprofile=coverage-minimal.txt -covermode=atomic ./...
.PHONY: test-clean
test-clean:
@ -35,12 +41,15 @@ test-clean:
.PHONY: covhtml
covhtml:
tail -n +2 coverage-minimal.txt > tmp.txt && mv tmp.txt coverage-minimal.txt
cat coverage-extended.txt coverage-minimal.txt > coverage.txt
go tool cover -html=coverage.txt -o coverage.html
.PHONY: check
check: ./golangcilint.yaml
golangci-lint --version || curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.26.0
golangci-lint --config ./golangcilint.yaml run --enable-all --build-tags extended,containers_image_openpgp ./cmd/... ./pkg/...
golangci-lint --config ./golangcilint.yaml run --enable-all --build-tags extended,containers_image_openpgp ./...
golangci-lint --config ./golangcilint.yaml run --enable-all --build-tags minimal,containers_image_openpgp ./...
swagger/docs.go:
swag -v || go install github.com/swaggo/swag/cmd/swag

View file

@ -41,6 +41,8 @@ https://anuvu.github.io/zot/
* Swagger based documentation
* Single binary for _all_ the above features
* Released under Apache 2.0 License
* [Metrics](#metrics) with Prometheus
* Using a node exporter in case of dist-spec-only zot
* ```go get -u github.com/anuvu/zot/cmd/zot```
# Presentations
@ -267,9 +269,24 @@ runtime interface.
Works with "docker://" transport which is the default.
# Metrics
Can be used for both dist-spec-only zot & the zot with all extensions enabled
## Node Exporter
The dist-spec-only zot exposes internal metrics into a Prometheus format through a node exporter.
The configuration of node exporter contains connection details for the zot server it is intend to scrape metrics from. See a [configuration example](./examples/metrics/exporter/config-minimal.json). The metrics are automatically enabled in the zot server on first scrape from the Node Exporter (no extra configuration option is needed). Similarly, the metrics are automatically disabled when Node Exporter did not perform any scrapings in a while.
```
bin/zot-exporter config _config-file_
```
## Enable Metrics
In the zot with all extensions case see [configuration example](./examples/config-metrics.json) for enabling metrics
# Caveats
* go 1.12+
* go 1.15+
* The OCI distribution spec is still WIP, and we try to keep up
# Contributing

15
cmd/exporter/main.go Normal file
View file

@ -0,0 +1,15 @@
// +build minimal
package main
import (
"os"
"github.com/anuvu/zot/pkg/exporter/cli"
)
func main() {
if err := cli.NewExporterCmd().Execute(); err != nil {
os.Exit(1)
}
}

View file

@ -23,6 +23,7 @@ Examples of working configurations for various use cases are available [here](..
* [Authentication](#authentication)
* [Identity-based Authorization](#identity-based-authorization)
* [Logging](#logging)
* [Metrics](#metrics)
## Network
@ -245,3 +246,24 @@ Enable audit logs and set output file with:
"audit": "/tmp/zot-audit.log"
}
```
## Metrics
Enable and configure metrics with:
```
"metrics":{
"enable":"true",
```
Set server path on which metrics will be exposed:
```
"prometheus": {
"path": "/metrics"
}
}
```
In order to test the Metrics feature locally in a [Kind](https://kind.sigs.k8s.io/) cluster, folow [this guide](metrics/README.md).

View file

@ -0,0 +1,21 @@
{
"version": "0.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot"
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug"
},
"extensions": {
"metrics": {
"enable": true,
"prometheus": {
"path": "/metrics"
}
}
}
}

0
examples/config-policy.json Executable file → Normal file
View file

View file

@ -0,0 +1,40 @@
# ---
# Stage 1: Install certs, build binary, create default config file
# ---
FROM docker.io/golang:1.16 AS builder
RUN mkdir -p /go/src/github.com/anuvu/zot
WORKDIR /go/src/github.com/anuvu/zot
COPY . .
RUN CGO_ENABLED=0 make clean binary
RUN echo '{\n\
"storage": {\n\
"rootDirectory": "/var/lib/registry"\n\
},\n\
"http": {\n\
"address": "0.0.0.0",\n\
"port": "5000"\n\
},\n\
"log": {\n\
"level": "debug"\n\
},\n\
"extensions": {\n\
"metrics": {\n\
"enable": true,\n\
"prometheus": {\n\
"path": "/metrics"\n\
}\n\
}\n\
}\n\
}\n' > config.json && cat config.json
# ---
# Stage 2: Final image with nothing but certs, binary, and default config file
# ---
FROM scratch AS final
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY --from=builder /go/src/github.com/anuvu/zot/bin/zot /zot
COPY --from=builder /go/src/github.com/anuvu/zot/config.json /etc/zot/config.json
ENTRYPOINT ["/zot"]
EXPOSE 5000
VOLUME ["/var/lib/registry"]
CMD ["serve", "/etc/zot/config.json"]

View file

@ -0,0 +1,32 @@
# ---
# Stage 1: Install certs, build binary, create default config file
# ---
FROM docker.io/golang:1.16 AS builder
RUN mkdir -p /go/src/github.com/anuvu/zot
WORKDIR /go/src/github.com/anuvu/zot
COPY . .
RUN CGO_ENABLED=0 make clean exporter-minimal
RUN echo '{\n\
"Server": {\n\
"protocol": "http",\n\
"host": "127.0.0.1",\n\
"port": "5050"\n\
},\n\
"Exporter": {\n\
"port": "5051",\n\
"log": {\n\
"level": "debug"\n\
}\n\
}\n\
}\n' > config.json && cat config.json
# ---
# Stage 2: Final image with nothing but certs, binary, and default config file
# ---
FROM scratch AS final
COPY --from=builder /go/src/github.com/anuvu/zot/bin/zot-exporter /zot-exporter
COPY --from=builder /go/src/github.com/anuvu/zot/config.json /etc/zot/config.json
ENTRYPOINT ["/zot-exporter"]
EXPOSE 5051
VOLUME ["/var/lib/registry"]
CMD ["config", "/etc/zot/config.json"]

View file

@ -0,0 +1,32 @@
# ---
# Stage 1: Install certs, build binary, create default config file
# ---
FROM docker.io/golang:1.16 AS builder
RUN mkdir -p /go/src/github.com/anuvu/zot
WORKDIR /go/src/github.com/anuvu/zot
COPY . .
RUN CGO_ENABLED=0 make clean binary-minimal
RUN echo '{\n\
"storage": {\n\
"rootDirectory": "/var/lib/registry"\n\
},\n\
"http": {\n\
"address": "0.0.0.0",\n\
"port": "5050"\n\
},\n\
"log": {\n\
"level": "debug"\n\
}\n\
}\n' > config.json && cat config.json
# ---
# Stage 2: Final image with nothing but certs, binary, and default config file
# ---
FROM scratch AS final
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY --from=builder /go/src/github.com/anuvu/zot/bin/zot-minimal /zot
COPY --from=builder /go/src/github.com/anuvu/zot/config.json /etc/zot/config.json
ENTRYPOINT ["/zot"]
EXPOSE 5050
VOLUME ["/var/lib/registry"]
CMD ["serve", "/etc/zot/config.json"]

28
examples/metrics/Makefile Normal file
View file

@ -0,0 +1,28 @@
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
.PHONY: binary-container
binary-container:
${CONTAINER_RUNTIME} build -f Dockerfile -t zot-build:latest ../../.
.PHONY: run-container
run-container:
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd)/../..:/go/src/github.com/anuvu/zot \
zot-build:latest
.PHONY: binary-minimal-container
binary-minimal-container:
${CONTAINER_RUNTIME} build -f Dockerfile-minimal -t zot-minimal:latest ../../.
.PHONY: run-minimal-container
run-minimal-container:
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd)/../..:/go/src/github.com/anuvu/zot \
zot-minimal:latest
.PHONY: binary-exporter-container
binary-exporter-container:
${CONTAINER_RUNTIME} build -f Dockerfile-exporter -t zot-exporter:latest ../../.
.PHONY: run-exporter-container
run-exporter-container:
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd)/../..:/go/src/github.com/anuvu/zot \
zot-exporter:latest

View file

@ -0,0 +1,28 @@
A quick zot Metrics setup can be deployed locally in a kind cluster.
It contains:
* a Prometheus server deployed through an Operator
* a dist-spec-only zot deployment (a pod with 2 containers: the zot server & the node exporter)
* a zot with all extensions enabled
## Prerequisites
* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
* [Kind](https://kind.sigs.k8s.io/)
* [Docker](https://www.docker.com/)
In case the prerequisites tool list is not fulfilled the script will install them (needs root privileges)
## Metrics setup
To run a quick setup:
```
./kind-setup.sh
```
At the end of the script below ports are locally available (using *kubectl port-forward*) to easy access the Prometheus & zot servers on the host:
* 9090 - for accessing Prometheus server
* 5000 - for zot with all extensions enabled
* 5050 - for accessing dist-spec-only zot server
* 5051 - for zot-exporter access (a Prometheus Node exporter)

View file

@ -0,0 +1,18 @@
{
"Server": {
"protocol": "http",
"host": "127.0.0.1",
"port": "8080"
},
"Exporter": {
"port": "8081",
"log": {
"level": "info",
"output": "/tmp/zot_exporter.log"
},
"metrics": {
"path": "/mymetrics"
}
}
}

View file

@ -0,0 +1,14 @@
{
"Server": {
"protocol": "http",
"host": "127.0.0.1",
"port": "8080"
},
"Exporter": {
"port": "8081",
"log": {
"level": "debug"
}
}
}

78
examples/metrics/kind_setup.sh Executable file
View file

@ -0,0 +1,78 @@
#!/bin/bash
#set -x
set -e
CLUSTER_NAME=zot
# Script tested with below kubectl & kind versions
KUBECTL_VERSION=v1.17.5
KIND_VERSION=v0.7.0
function install_bin() {
if [ "$EUID" -ne 0 ]
then echo "Please run as root/sudo"
exit 1
fi
curl -Lo ./$2 $1
chmod +x ./$2
yes | mv ./$2 /usr/local/bin/$2
}
## Install kubectl & kind if not available on the system
# Kubectl
kubectl > /dev/null 2>&1 || install_bin https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/`uname | awk '{print tolower($0)}'`/amd64/kubectl kubectl
# Kind
kind version || install_bin https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-$(uname)-amd64 kind
## Delete the cluster if it already exist
kind get clusters | grep ${CLUSTER_NAME} && kind delete cluster --name ${CLUSTER_NAME}
kind create cluster --name ${CLUSTER_NAME}
docker pull quay.io/prometheus-operator/prometheus-operator:v0.51.2
docker pull quay.io/prometheus-operator/prometheus-config-reloader:v0.51.2
docker pull quay.io/prometheus/prometheus:v2.22.1
kind load docker-image quay.io/prometheus-operator/prometheus-operator:v0.51.2 --name ${CLUSTER_NAME}
kind load docker-image quay.io/prometheus-operator/prometheus-config-reloader:v0.51.2 --name ${CLUSTER_NAME}
kind load docker-image quay.io/prometheus/prometheus:v2.22.1 --name ${CLUSTER_NAME}
## Build zot & zot-exporter related images
make binary-container
make binary-minimal-container
make binary-exporter-container
kind load docker-image zot-build:latest --name ${CLUSTER_NAME}
kind load docker-image zot-minimal:latest --name ${CLUSTER_NAME}
kind load docker-image zot-exporter:latest --name ${CLUSTER_NAME}
## Deploy prometheus operator
kubectl create -f kubernetes/prometheus/bundle.yaml
## Deploy the Kubernetes objects for RBAC, prometheus CRD and deploy the service
kubectl apply -f kubernetes/prometheus/prom_rbac.yaml
kubectl apply -f kubernetes/prometheus/prometheus.yaml
kubectl apply -f kubernetes/prometheus/prom_service.yaml
sleep 10
## Deploy zot extended & minimal in 2 separate deployments
## Deploy Prometheus operator servicemonitor CRD instances for prometheus to be able to scrape metrics from zot extended & the node exporter
kubectl apply -f kubernetes/zot-extended/deployment.yaml
kubectl apply -f kubernetes/zot-extended/service.yaml
kubectl apply -f kubernetes/zot-extended/servicemonitor.yaml
kubectl apply -f kubernetes/zot-minimal/deployment.yaml
kubectl apply -f kubernetes/zot-minimal/service.yaml
kubectl apply -f kubernetes/zot-minimal/exporter-service.yaml
kubectl apply -f kubernetes/zot-minimal/exporter-servicemonitor.yaml
sleep 10
## For being able to access prometheus, zot & exporter on localhost ports
kubectl port-forward svc/prometheus 9090 --address='0.0.0.0' &
kubectl port-forward svc/zot-extended 5000 --address='0.0.0.0' &
kubectl port-forward svc/zot-minimal 5050 --address='0.0.0.0' &
kubectl port-forward svc/zot-exporter 5051 --address='0.0.0.0' &

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,43 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: default

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: prometheus
labels:
app: prometheus
spec:
ports:
- name: web
port: 9090
targetPort: web
selector:
app: prometheus
sessionAffinity: ClientIP

View file

@ -0,0 +1,22 @@
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
labels:
app: prometheus
spec:
image: quay.io/prometheus/prometheus:v2.22.1
nodeSelector:
kubernetes.io/os: linux
replicas: 1
resources:
requests:
memory: 400Mi
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
serviceAccountName: prometheus
version: v2.22.1
serviceMonitorSelector: {}

View file

@ -0,0 +1,24 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zot-extended
labels:
app: zot-extended
spec:
replicas: 1
selector:
matchLabels:
app: zot-extended
template:
metadata:
labels:
app: zot-extended
spec:
containers:
- name: zot-extended
image: zot-build:latest
imagePullPolicy: IfNotPresent
ports:
- name: zot-extended
containerPort: 5000

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: zot-extended
labels:
app: zot-extended
spec:
ports:
- name: zot-extended
port: 5000
targetPort: zot-extended
selector:
app: zot-extended
sessionAffinity: ClientIP

View file

@ -0,0 +1,15 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: zot-extended
labels:
app: zot-extended
spec:
endpoints:
- interval: 10s
port: zot-extended
scrapeTimeout: 5s
selector:
matchLabels:
app: zot-extended

View file

@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zot-minimal
labels:
app: zot-minimal
spec:
replicas: 1
selector:
matchLabels:
app: zot-minimal
template:
metadata:
labels:
app: zot-minimal
spec:
containers:
- name: zot-minimal
image: zot-minimal:latest
imagePullPolicy: IfNotPresent
ports:
- name: zot-minimal
containerPort: 5050
- name: zot-exporter
image: zot-exporter:latest
imagePullPolicy: IfNotPresent
ports:
- name: zot-exporter
containerPort: 5051

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: zot-exporter
labels:
app: zot-minimal
spec:
ports:
- name: zot-exporter
port: 5051
targetPort: zot-exporter
selector:
app: zot-minimal
sessionAffinity: ClientIP

View file

@ -0,0 +1,15 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: zot-exporter
labels:
app: zot-minimal
spec:
endpoints:
- interval: 10s
port: zot-exporter
scrapeTimeout: 5s
selector:
matchLabels:
app: zot-minimal

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: zot-minimal
labels:
app: zot-minimal
spec:
ports:
- name: zot-minimal
port: 5050
targetPort: zot-minimal
selector:
app: zot-minimal
sessionAffinity: ClientIP

2
go.mod
View file

@ -34,6 +34,8 @@ require (
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/umoci v0.4.8-0.20210922062158-e60a0cc726e6
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_model v0.2.0
github.com/rs/zerolog v1.22.0
github.com/smartystreets/goconvey v1.6.4
github.com/spf13/cobra v1.2.1

0
pkg/api/authz.go Executable file → Normal file
View file

View file

@ -14,6 +14,7 @@ import (
var (
Commit string // nolint: gochecknoglobals
BinaryType string // nolint: gochecknoglobals
GoVersion string // nolint: gochecknoglobals
)
type StorageConfig struct {
@ -102,6 +103,7 @@ type Policy struct {
type Config struct {
Version string
GoVersion string
Commit string
BinaryType string
AccessControl *AccessControlConfig
@ -114,6 +116,7 @@ type Config struct {
func New() *Config {
return &Config{
Version: distspec.Version,
GoVersion: GoVersion,
Commit: Commit,
BinaryType: BinaryType,
Storage: GlobalStorageConfig{GC: true, Dedupe: true},

View file

@ -12,6 +12,7 @@ import (
"github.com/anuvu/zot/errors"
"github.com/anuvu/zot/pkg/api/config"
ext "github.com/anuvu/zot/pkg/extensions"
"github.com/anuvu/zot/pkg/extensions/monitoring"
"github.com/anuvu/zot/pkg/log"
"github.com/anuvu/zot/pkg/storage"
"github.com/gorilla/handlers"
@ -29,6 +30,7 @@ type Controller struct {
Log log.Logger
Audit *log.Logger
Server *http.Server
Metrics monitoring.MetricServer
}
func NewController(config *config.Config) *Controller {
@ -72,17 +74,26 @@ func (c *Controller) Run() error {
engine := mux.NewRouter()
engine.Use(DefaultHeaders(),
log.SessionLogger(c.Log),
SessionLogger(c),
handlers.RecoveryHandler(handlers.RecoveryLogger(c.Log),
handlers.PrintRecoveryStack(false)))
if c.Audit != nil {
engine.Use(log.SessionAuditLogger(c.Audit))
engine.Use(SessionAuditLogger(c.Audit))
}
c.Router = engine
c.Router.UseEncodedPath()
var enabled bool
if c.Config != nil &&
c.Config.Extensions != nil &&
c.Config.Extensions.Metrics != nil &&
c.Config.Extensions.Metrics.Enable {
enabled = true
}
c.Metrics = monitoring.NewMetricsServer(enabled, c.Log)
c.StoreController = storage.StoreController{}
if c.Config.Storage.RootDirectory != "" {
@ -97,7 +108,7 @@ func (c *Controller) Run() error {
}
defaultStore := storage.NewImageStore(c.Config.Storage.RootDirectory,
c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Log)
c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Log, c.Metrics)
c.StoreController.DefaultStore = defaultStore
@ -131,7 +142,7 @@ func (c *Controller) Run() error {
}
subImageStore[route] = storage.NewImageStore(storageConfig.RootDirectory,
storageConfig.GC, storageConfig.Dedupe, c.Log)
storageConfig.GC, storageConfig.Dedupe, c.Log, c.Metrics)
// Enable extensions if extension config is provided
if c.Config != nil && c.Config.Extensions != nil {
@ -143,6 +154,7 @@ func (c *Controller) Run() error {
}
}
monitoring.SetServerInfo(c.Metrics, c.Config.Commit, c.Config.BinaryType, c.Config.GoVersion, c.Config.Version)
_ = NewRouteHandler(c)
addr := fmt.Sprintf("%s:%s", c.Config.HTTP.Address, c.Config.HTTP.Port)

14
pkg/api/errors_test.go Normal file
View file

@ -0,0 +1,14 @@
package api_test
import (
"testing"
"github.com/anuvu/zot/pkg/api"
. "github.com/smartystreets/goconvey/convey"
)
func TestUnknownCodeError(t *testing.T) {
Convey("Retrieve a new error with unknown code", t, func() {
So(func() { _ = api.NewError(123456789, nil) }, ShouldPanic)
})
}

View file

@ -96,9 +96,15 @@ func (rh *RouteHandler) SetupRoutes() {
// swagger swagger "/swagger/v2/index.html"
rh.c.Router.PathPrefix("/swagger/v2/").Methods("GET").Handler(httpSwagger.WrapHandler)
// Setup Extensions Routes
if rh.c.Config != nil && rh.c.Config.Extensions != nil {
if rh.c.Config != nil {
if rh.c.Config.Extensions == nil {
// minimal build
g.HandleFunc("/metrics", rh.GetMetrics).Methods("GET")
} else {
// extended build
ext.SetupRoutes(rh.c.Config, rh.c.Router, rh.c.StoreController, rh.c.Log)
}
}
}
// Method handlers
@ -1177,6 +1183,11 @@ func (rh *RouteHandler) ListRepositories(w http.ResponseWriter, r *http.Request)
WriteJSON(w, http.StatusOK, is)
}
func (rh *RouteHandler) GetMetrics(w http.ResponseWriter, r *http.Request) {
m := rh.c.Metrics.ReceiveMetrics()
WriteJSON(w, http.StatusOK, m)
}
// helper routines
func getContentRange(r *http.Request) (int64 /* from */, int64 /* to */, error) {

158
pkg/api/session.go Normal file
View file

@ -0,0 +1,158 @@
package api
import (
"encoding/base64"
"net/http"
"strconv"
"strings"
"time"
"github.com/anuvu/zot/pkg/extensions/monitoring"
"github.com/anuvu/zot/pkg/log"
"github.com/gorilla/mux"
)
type statusWriter struct {
http.ResponseWriter
status int
length int
}
func (w *statusWriter) WriteHeader(status int) {
w.status = status
w.ResponseWriter.WriteHeader(status)
}
func (w *statusWriter) Write(b []byte) (int, error) {
if w.status == 0 {
w.status = 200
}
n, err := w.ResponseWriter.Write(b)
w.length += n
return n, err
}
// SessionLogger logs session details.
func SessionLogger(c *Controller) mux.MiddlewareFunc {
l := c.Log.With().Str("module", "http").Logger()
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Start timer
start := time.Now()
path := r.URL.Path
raw := r.URL.RawQuery
sw := statusWriter{ResponseWriter: w}
// Process request
next.ServeHTTP(&sw, r)
// Stop timer
end := time.Now()
latency := end.Sub(start)
if latency > time.Minute {
// Truncate in a golang < 1.8 safe way
latency -= latency % time.Second
}
clientIP := r.RemoteAddr
method := r.Method
headers := map[string][]string{}
username := ""
log := l.Info()
for key, value := range r.Header {
if key == "Authorization" { // anonymize from logs
s := strings.SplitN(value[0], " ", 2)
if len(s) == 2 && strings.EqualFold(s[0], "basic") {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
pair := strings.SplitN(string(b), ":", 2)
// nolint:gomnd
if len(pair) == 2 {
username = pair[0]
log = log.Str("username", username)
}
}
}
value = []string{"******"}
}
headers[key] = value
}
statusCode := sw.status
bodySize := sw.length
if raw != "" {
path = path + "?" + raw
}
if path != "/v2/metrics" {
// In order to test metrics feture,the instrumentation related to node exporter
// should be handled by node exporter itself (ex: latency)
monitoring.IncHTTPConnRequests(c.Metrics, method, strconv.Itoa(statusCode))
monitoring.ObserveHTTPRepoLatency(c.Metrics, path, latency) // summary
monitoring.ObserveHTTPMethodLatency(c.Metrics, method, latency) // histogram
}
log.Str("clientIP", clientIP).
Str("method", method).
Str("path", path).
Int("statusCode", statusCode).
Str("latency", latency.String()).
Int("bodySize", bodySize).
Interface("headers", headers).
Msg("HTTP API")
})
}
}
func SessionAuditLogger(audit *log.Logger) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
raw := r.URL.RawQuery
sw := statusWriter{ResponseWriter: w}
// Process request
next.ServeHTTP(&sw, r)
clientIP := r.RemoteAddr
method := r.Method
username := ""
for key, value := range r.Header {
if key == "Authorization" { // anonymize from logs
s := strings.SplitN(value[0], " ", 2)
if len(s) == 2 && strings.EqualFold(s[0], "basic") {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
pair := strings.SplitN(string(b), ":", 2)
// nolint:gomnd
if len(pair) == 2 {
username = pair[0]
}
}
}
}
}
statusCode := sw.status
if raw != "" {
path = path + "?" + raw
}
if (method == http.MethodPost || method == http.MethodPut ||
method == http.MethodPatch || method == http.MethodDelete) &&
(statusCode == http.StatusOK || statusCode == http.StatusCreated || statusCode == http.StatusAccepted) {
audit.Info().
Str("clientIP", clientIP).
Str("subject", username).
Str("action", method).
Str("object", path).
Int("status", statusCode).
Msg("HTTP API Audit")
}
})
}
}

View file

@ -0,0 +1,37 @@
// +build minimal
package api
// We export below types in order for cli package to be able to read it from configuration file.
type LogConfig struct {
Level string
Output string
}
type MetricsConfig struct {
Path string
}
type ServerConfig struct {
Protocol string
Host string
Port string
}
type ExporterConfig struct {
Port string
Log *LogConfig
Metrics *MetricsConfig
}
type Config struct {
Server ServerConfig
Exporter ExporterConfig
}
func DefaultConfig() *Config {
return &Config{
Server: ServerConfig{Protocol: "http", Host: "localhost", Port: "8080"},
Exporter: ExporterConfig{Port: "8081", Log: &LogConfig{Level: "debug"}, Metrics: &MetricsConfig{Path: "/metrics"}},
}
}

View file

@ -0,0 +1,21 @@
// +build minimal
package api
import (
"github.com/anuvu/zot/pkg/log"
)
type Controller struct {
Config *Config
Log log.Logger
}
func NewController(cfg *Config) *Controller {
logger := log.NewLogger(cfg.Exporter.Log.Level, cfg.Exporter.Log.Output)
return &Controller{Config: cfg, Log: logger}
}
func (c *Controller) Run() {
runExporter(c)
}

View file

@ -0,0 +1,483 @@
// +build minimal
package api_test
import (
"context"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"os"
"strings"
"sync"
"testing"
"time"
zotapi "github.com/anuvu/zot/pkg/api"
zotcfg "github.com/anuvu/zot/pkg/api/config"
"github.com/anuvu/zot/pkg/exporter/api"
"github.com/anuvu/zot/pkg/extensions/monitoring"
jsoniter "github.com/json-iterator/go"
"github.com/phayes/freeport"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
. "github.com/smartystreets/goconvey/convey"
"gopkg.in/resty.v1"
)
const (
BaseURL = "http://127.0.0.1:%s"
SleepTime = 50 * time.Millisecond
SecondToNanoseconds = 1000000000
)
func getRandomLatencyN(maxNanoSeconds int64) time.Duration {
rand.Seed(time.Now().UnixNano())
return time.Duration(rand.Int63n(maxNanoSeconds))
}
func getRandomLatency() time.Duration {
return getRandomLatencyN(120 * SecondToNanoseconds) // a random latency (in nanoseconds) that can be up to 2 minutes
}
func getFreePort() string {
port, err := freeport.GetFreePort()
if err != nil {
panic(err)
}
return fmt.Sprint(port)
}
func TestNew(t *testing.T) {
Convey("Make a new controller", t, func() {
config := api.DefaultConfig()
So(config, ShouldNotBeNil)
So(api.NewController(config), ShouldNotBeNil)
})
}
func isChannelDrained(ch chan prometheus.Metric) bool {
time.Sleep(SleepTime)
select {
case <-ch:
return false
default:
return true
}
}
func readDefaultMetrics(zc *api.Collector, ch chan prometheus.Metric) {
var metric dto.Metric
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_up"].String())
err := pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Gauge.Value, ShouldEqual, 1)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_info"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Gauge.Value, ShouldEqual, 0)
}
func TestNewExporter(t *testing.T) {
Convey("Make an exporter controller", t, func() {
exporterConfig := api.DefaultConfig()
So(exporterConfig, ShouldNotBeNil)
exporterPort := getFreePort()
serverPort := getFreePort()
exporterConfig.Exporter.Port = exporterPort
dir, _ := ioutil.TempDir("", "metrics")
exporterConfig.Exporter.Metrics.Path = strings.TrimPrefix(dir, "/tmp/")
exporterConfig.Server.Port = serverPort
exporterController := api.NewController(exporterConfig)
Convey("Start the zot exporter", func() {
go func() {
// this blocks
exporterController.Run()
So(nil, ShouldNotBeNil) // Fail the test in case zot exporter unexpectedly exits
}()
time.Sleep(SleepTime)
zc := api.GetCollector(exporterController)
ch := make(chan prometheus.Metric)
Convey("When zot server not running", func() {
go func() {
// this blocks
zc.Collect(ch)
}()
// Read from the channel expected values
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_up"].String())
var metric dto.Metric
err := pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Gauge.Value, ShouldEqual, 0) // "zot_up=0" means zot server is not running
// Check that no more data was written to the channel
So(isChannelDrained(ch), ShouldEqual, true)
})
Convey("When zot server is running", func() {
servercConfig := zotcfg.New()
So(servercConfig, ShouldNotBeNil)
baseURL := fmt.Sprintf(BaseURL, serverPort)
servercConfig.HTTP.Port = serverPort
serverController := zotapi.NewController(servercConfig)
So(serverController, ShouldNotBeNil)
dir, err := ioutil.TempDir("", "exporter-test")
So(err, ShouldBeNil)
defer os.RemoveAll(dir)
serverController.Config.Storage.RootDirectory = dir
go func(c *zotapi.Controller) {
// this blocks
if err := c.Run(); !errors.Is(err, http.ErrServerClosed) {
panic(err)
}
}(serverController)
defer func(c *zotapi.Controller) {
_ = c.Server.Shutdown(context.TODO())
}(serverController)
// wait till ready
for {
_, err := resty.R().Get(baseURL)
if err == nil {
break
}
time.Sleep(SleepTime)
}
// Side effect of calling this endpoint is that it will enable metrics
resp, err := resty.R().Get(baseURL + "/v2/metrics")
So(resp, ShouldNotBeNil)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
Convey("Collecting data: default metrics", func() {
go func() {
// this blocks
zc.Collect(ch)
}()
readDefaultMetrics(zc, ch)
So(isChannelDrained(ch), ShouldEqual, true)
})
Convey("Collecting data: Test init value & that increment works on Counters", func() {
//Testing initial value of the counter to be 1 after first incrementation call
monitoring.IncUploadCounter(serverController.Metrics, "testrepo")
time.Sleep(SleepTime)
go func() {
// this blocks
zc.Collect(ch)
}()
readDefaultMetrics(zc, ch)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_repo_uploads_total"].String())
var metric dto.Metric
err := pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 1)
So(isChannelDrained(ch), ShouldEqual, true)
//Testing that counter is incremented by 1
monitoring.IncUploadCounter(serverController.Metrics, "testrepo")
time.Sleep(SleepTime)
go func() {
// this blocks
zc.Collect(ch)
}()
readDefaultMetrics(zc, ch)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_repo_uploads_total"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 2)
So(isChannelDrained(ch), ShouldEqual, true)
})
Convey("Collecting data: Test that concurent Counter increment requests works properly", func() {
reqsSize := rand.Intn(1000)
for i := 0; i < reqsSize; i++ {
monitoring.IncDownloadCounter(serverController.Metrics, "dummyrepo")
}
time.Sleep(SleepTime)
go func() {
// this blocks
zc.Collect(ch)
}()
readDefaultMetrics(zc, ch)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_repo_downloads_total"].String())
var metric dto.Metric
err := pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, reqsSize)
So(isChannelDrained(ch), ShouldEqual, true)
})
Convey("Collecting data: Test init value & that observe works on Summaries", func() {
//Testing initial value of the summary counter to be 1 after first observation call
var latency1, latency2 time.Duration
latency1 = getRandomLatency()
monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/testrepo/blogs/dummydigest", latency1)
time.Sleep(SleepTime)
go func() {
//this blocks
zc.Collect(ch)
}()
readDefaultMetrics(zc, ch)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
var metric dto.Metric
err := pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 1)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, latency1.Seconds())
So(isChannelDrained(ch), ShouldEqual, true)
//Testing that summary counter is incremented by 1 and summary sum is properly updated
latency2 = getRandomLatency()
monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/testrepo/blogs/dummydigest", latency2)
time.Sleep(SleepTime)
go func() {
// this blocks
zc.Collect(ch)
}()
readDefaultMetrics(zc, ch)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 2)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, (latency1.Seconds())+(latency2.Seconds()))
So(isChannelDrained(ch), ShouldEqual, true)
})
Convey("Collecting data: Test that concurent Summary observation requests works properly", func() {
var latencySum float64
reqsSize := rand.Intn(1000)
for i := 0; i < reqsSize; i++ {
latency := getRandomLatency()
latencySum += latency.Seconds()
monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/dummyrepo/manifests/testreference", latency)
}
time.Sleep(SleepTime)
go func() {
// this blocks
zc.Collect(ch)
}()
readDefaultMetrics(zc, ch)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_count"].String())
var metric dto.Metric
err := pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, reqsSize)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_repo_latency_seconds_sum"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, latencySum)
So(isChannelDrained(ch), ShouldEqual, true)
})
Convey("Collecting data: Test init value & that observe works on Histogram buckets", func() {
//Testing initial value of the histogram counter to be 1 after first observation call
latency := getRandomLatency()
monitoring.ObserveHTTPMethodLatency(serverController.Metrics, "GET", latency)
time.Sleep(SleepTime)
go func() {
//this blocks
zc.Collect(ch)
}()
readDefaultMetrics(zc, ch)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_count"].String())
var metric dto.Metric
err := pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, 1)
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_sum"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, latency.Seconds())
for _, fvalue := range monitoring.GetDefaultBuckets() {
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_bucket"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
if latency.Seconds() < fvalue {
So(*metric.Counter.Value, ShouldEqual, 1)
} else {
So(*metric.Counter.Value, ShouldEqual, 0)
}
}
So(isChannelDrained(ch), ShouldEqual, true)
})
Convey("Collecting data: Test init Histogram buckets \n", func() {
//Generate a random latency within each bucket and finally test
// that "higher" rank bucket counter is incremented by 1
var latencySum float64
dBuckets := monitoring.GetDefaultBuckets()
for i, fvalue := range dBuckets {
var latency time.Duration
if i == 0 {
//first bucket value
latency = getRandomLatencyN(int64(fvalue * SecondToNanoseconds))
} else {
pvalue := dBuckets[i-1] // previous bucket value
latency = time.Duration(pvalue*SecondToNanoseconds) +
getRandomLatencyN(int64(dBuckets[0]*SecondToNanoseconds))
}
latencySum += latency.Seconds()
monitoring.ObserveHTTPMethodLatency(serverController.Metrics, "GET", latency)
}
time.Sleep(SleepTime)
go func() {
//this blocks
zc.Collect(ch)
}()
readDefaultMetrics(zc, ch)
pm := <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_count"].String())
var metric dto.Metric
err := pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, len(dBuckets))
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_sum"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, latencySum)
for i := range dBuckets {
pm = <-ch
So(pm.Desc().String(), ShouldEqual, zc.MetricsDesc["zot_http_method_latency_seconds_bucket"].String())
err = pm.Write(&metric)
So(err, ShouldBeNil)
So(*metric.Counter.Value, ShouldEqual, i+1)
}
So(isChannelDrained(ch), ShouldEqual, true)
})
Convey("Negative testing: Send unknown metric type to MetricServer", func() {
serverController.Metrics.SendMetric(getRandomLatency())
})
Convey("Concurrent metrics scrape", func() {
var wg sync.WaitGroup
workersSize := rand.Intn(100)
for i := 0; i < workersSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
m := serverController.Metrics.ReceiveMetrics()
var json = jsoniter.ConfigCompatibleWithStandardLibrary
_, err := json.Marshal(m)
if err != nil {
exporterController.Log.Error().Err(err).Msg("Concurrent metrics scrape fail")
}
}()
}
wg.Wait()
})
Convey("Negative testing: Increment a counter that does not exist", func() {
cv := monitoring.CounterValue{Name: "dummyName"}
serverController.Metrics.SendMetric(cv)
})
Convey("Negative testing: Set a gauge for a metric with len(labelNames)!=len(knownLabelNames)", func() {
gv := monitoring.GaugeValue{
Name: "zot.info",
Value: 1,
LabelNames: []string{"commit", "binaryType", "version"},
}
serverController.Metrics.SendMetric(gv)
})
Convey("Negative testing: Summary observe for a metric with labelNames!=knownLabelNames", func() {
sv := monitoring.SummaryValue{
Name: "zot.repo.latency.seconds",
LabelNames: []string{"dummyRepoLabelName"},
LabelValues: []string{"dummyrepo"},
}
serverController.Metrics.SendMetric(sv)
})
Convey("Negative testing: Histogram observe for a metric with len(labelNames)!=len(LabelValues)", func() {
hv := monitoring.HistogramValue{
Name: "zot.method.latency.seconds",
LabelNames: []string{"method"},
LabelValues: []string{"GET", "POST", "DELETE"},
}
serverController.Metrics.SendMetric(hv)
})
Convey("Negative testing: error in getting the size for a repo directory", func() {
monitoring.SetStorageUsage(serverController.Metrics, "/tmp/zot", "dummyrepo")
})
Convey("Disabling metrics after idle timeout", func() {
So(serverController.Metrics.IsEnabled(), ShouldEqual, true)
time.Sleep(monitoring.GetMaxIdleScrapeInterval())
So(serverController.Metrics.IsEnabled(), ShouldEqual, false)
})
})
})
})
}

View file

@ -0,0 +1,180 @@
// +build minimal
package api
import (
"fmt"
"math"
"net/http"
"regexp"
"strconv"
"github.com/anuvu/zot/pkg/extensions/monitoring"
"github.com/anuvu/zot/pkg/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type Collector struct {
Client *monitoring.MetricsClient
MetricsDesc map[string]*prometheus.Desc // all known metrics descriptions
invalidChars *regexp.Regexp
}
// Implements prometheus.Collector interface.
func (zc Collector) Describe(ch chan<- *prometheus.Desc) {
for _, metricDescription := range zc.MetricsDesc {
ch <- metricDescription
}
}
// Implements prometheus.Collector interface.
func (zc Collector) Collect(ch chan<- prometheus.Metric) {
metrics, err := zc.Client.GetMetrics()
if err != nil {
fmt.Println(err)
ch <- prometheus.MustNewConstMetric(zc.MetricsDesc["zot_up"], prometheus.GaugeValue, 0)
return
}
ch <- prometheus.MustNewConstMetric(zc.MetricsDesc["zot_up"], prometheus.GaugeValue, 1)
for _, g := range metrics.Gauges {
name := zc.invalidChars.ReplaceAllLiteralString(g.Name, "_")
ch <- prometheus.MustNewConstMetric(
zc.MetricsDesc[name], prometheus.GaugeValue, g.Value, g.LabelValues...)
}
for _, c := range metrics.Counters {
name := zc.invalidChars.ReplaceAllLiteralString(c.Name, "_")
name += "_total"
ch <- prometheus.MustNewConstMetric(
zc.MetricsDesc[name], prometheus.CounterValue, float64(c.Count), c.LabelValues...)
}
for _, s := range metrics.Summaries {
mname := zc.invalidChars.ReplaceAllLiteralString(s.Name, "_")
name := mname + "_count"
ch <- prometheus.MustNewConstMetric(
zc.MetricsDesc[name], prometheus.CounterValue, float64(s.Count), s.LabelValues...)
name = mname + "_sum"
ch <- prometheus.MustNewConstMetric(
zc.MetricsDesc[name], prometheus.CounterValue, s.Sum, s.LabelValues...)
}
for _, h := range metrics.Histograms {
mname := zc.invalidChars.ReplaceAllLiteralString(h.Name, "_")
name := mname + "_count"
ch <- prometheus.MustNewConstMetric(
zc.MetricsDesc[name], prometheus.CounterValue, float64(h.Count), h.LabelValues...)
name = mname + "_sum"
ch <- prometheus.MustNewConstMetric(
zc.MetricsDesc[name], prometheus.CounterValue, h.Sum, h.LabelValues...)
if h.Buckets != nil {
for _, fvalue := range monitoring.GetDefaultBuckets() {
var svalue string
if fvalue == math.MaxFloat64 {
svalue = "+Inf"
} else {
svalue = strconv.FormatFloat(fvalue, 'f', -1, 64)
}
name = mname + "_bucket"
ch <- prometheus.MustNewConstMetric(
zc.MetricsDesc[name], prometheus.CounterValue, float64(h.Buckets[svalue]), append(h.LabelValues, svalue)...)
}
}
}
}
func panicOnDuplicateMetricName(m map[string]*prometheus.Desc, name string, log log.Logger) {
if _, present := m[name]; present {
log.Fatal().Msg("Duplicate keys: metric " + name + " already present")
}
}
func GetCollector(c *Controller) *Collector {
//compute all metrics description map
MetricsDesc := map[string]*prometheus.Desc{
"zot_up": prometheus.NewDesc(
"zot_up",
"Connection to zot server was successfully established.",
nil, nil,
),
}
invalidChars := regexp.MustCompile("[^a-zA-Z0-9:_]")
for metricName, metricLabelNames := range monitoring.GetCounters() {
name := invalidChars.ReplaceAllLiteralString(metricName, "_")
name += "_total"
panicOnDuplicateMetricName(MetricsDesc, name, c.Log)
MetricsDesc[name] = prometheus.NewDesc(name, "Metric "+name, metricLabelNames, nil)
}
for metricName, metricLabelNames := range monitoring.GetGauges() {
name := invalidChars.ReplaceAllLiteralString(metricName, "_")
panicOnDuplicateMetricName(MetricsDesc, name, c.Log)
MetricsDesc[name] = prometheus.NewDesc(name, "Metric "+name, metricLabelNames, nil)
}
for metricName, metricLabelNames := range monitoring.GetSummaries() {
mname := invalidChars.ReplaceAllLiteralString(metricName, "_")
name := mname + "_count"
panicOnDuplicateMetricName(MetricsDesc, name, c.Log)
MetricsDesc[name] = prometheus.NewDesc(name, "Metric "+name, metricLabelNames, nil)
name = mname + "_sum"
panicOnDuplicateMetricName(MetricsDesc, name, c.Log)
MetricsDesc[name] = prometheus.NewDesc(name, "Metric "+name, metricLabelNames, nil)
}
for metricName, metricLabelNames := range monitoring.GetHistograms() {
mname := invalidChars.ReplaceAllLiteralString(metricName, "_")
name := mname + "_count"
panicOnDuplicateMetricName(MetricsDesc, name, c.Log)
MetricsDesc[name] = prometheus.NewDesc(name, "Metric "+name, metricLabelNames, nil)
name = mname + "_sum"
panicOnDuplicateMetricName(MetricsDesc, name, c.Log)
MetricsDesc[name] = prometheus.NewDesc(name, "Metric "+name, metricLabelNames, nil)
name = mname + "_bucket"
panicOnDuplicateMetricName(MetricsDesc, name, c.Log)
// Append a new label to hitogram bucket - le - 'lower or equal'
MetricsDesc[name] = prometheus.NewDesc(name, "Metric "+name, append(metricLabelNames, "le"), nil)
}
// parameters to connect to the zot server
serverAddr := fmt.Sprintf("%s://%s:%s", c.Config.Server.Protocol,
c.Config.Server.Host, c.Config.Server.Port)
cfg := &monitoring.MetricsConfig{Address: serverAddr}
return &Collector{
Client: monitoring.NewMetricsClient(cfg, c.Log),
MetricsDesc: MetricsDesc,
invalidChars: invalidChars,
}
}
func runExporter(c *Controller) {
err := prometheus.Register(GetCollector(c))
if err != nil {
c.Log.Error().Err(err).Msg("Expected error in testing")
}
http.Handle(c.Config.Exporter.Metrics.Path, promhttp.Handler())
exporterAddr := fmt.Sprintf(":%s", c.Config.Exporter.Port)
c.Log.Info().Msgf("Exporter is listening on %s & exposes metrics on %s path",
exporterAddr, c.Config.Exporter.Metrics.Path)
serverAddr := fmt.Sprintf("%s://%s:%s", c.Config.Server.Protocol,
c.Config.Server.Host, c.Config.Server.Port)
c.Log.Info().Msgf("Scraping metrics from %s", serverAddr)
c.Log.Fatal().Err(http.ListenAndServe(exporterAddr, nil)).Msg("Exporter stopped")
}

76
pkg/exporter/cli/cli.go Normal file
View file

@ -0,0 +1,76 @@
// +build minimal
package cli
import (
"github.com/anuvu/zot/errors"
"github.com/anuvu/zot/pkg/exporter/api"
"github.com/mitchellh/mapstructure"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// metadataConfig reports metadata after parsing, which we use to track
// errors.
func metadataConfig(md *mapstructure.Metadata) viper.DecoderConfigOption {
return func(c *mapstructure.DecoderConfig) {
c.Metadata = md
}
}
func NewExporterCmd() *cobra.Command {
config := api.DefaultConfig()
// "config"
configCmd := &cobra.Command{
Use: "config <config_file>",
Aliases: []string{"config"},
Short: "`config` node exporter properties",
Long: "`config` node exporter properties",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 0 {
loadConfiguration(config, args[0])
}
c := api.NewController(config)
c.Run()
},
}
// "node_exporter"
exporterCmd := &cobra.Command{
Use: "zot_exporter",
Short: "`zot_exporter`",
Long: "`zot_exporter`",
Run: func(cmd *cobra.Command, args []string) {
_ = cmd.Usage()
cmd.SilenceErrors = false
},
}
exporterCmd.AddCommand(configCmd)
return exporterCmd
}
func loadConfiguration(config *api.Config, configPath string) {
viper.SetConfigFile(configPath)
if err := viper.ReadInConfig(); err != nil {
log.Error().Err(err).Msg("Error while reading configuration")
panic(err)
}
md := &mapstructure.Metadata{}
if err := viper.Unmarshal(&config, metadataConfig(md)); err != nil {
log.Error().Err(err).Msg("Error while unmarshalling new config")
panic(err)
}
if len(md.Keys) == 0 || len(md.Unused) > 0 {
log.Error().Err(errors.ErrBadConfig).Msg("Bad configuration, retry writing it")
panic(errors.ErrBadConfig)
}
}

View file

@ -9,6 +9,7 @@ import (
type ExtensionConfig struct {
Search *SearchConfig
Sync *sync.Config
Metrics *MetricsConfig
}
type SearchConfig struct {
@ -20,3 +21,12 @@ type SearchConfig struct {
type CVEConfig struct {
UpdateInterval time.Duration // should be 2 hours or more, if not specified default be kept as 24 hours
}
type MetricsConfig struct {
Enable bool
Prometheus *PrometheusConfig
}
type PrometheusConfig struct {
Path string // default is "/metrics"
}

View file

@ -1,20 +1,20 @@
//go:build extended
// +build extended
package extensions
import (
"github.com/anuvu/zot/pkg/api/config"
"github.com/anuvu/zot/pkg/extensions/search"
"github.com/anuvu/zot/pkg/extensions/sync"
"github.com/anuvu/zot/pkg/storage"
"github.com/gorilla/mux"
"time"
gqlHandler "github.com/99designs/gqlgen/graphql/handler"
"github.com/anuvu/zot/pkg/api/config"
"github.com/anuvu/zot/pkg/extensions/search"
cveinfo "github.com/anuvu/zot/pkg/extensions/search/cve"
"github.com/anuvu/zot/pkg/extensions/sync"
"github.com/anuvu/zot/pkg/log"
"github.com/anuvu/zot/pkg/storage"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// DownloadTrivyDB ...
@ -83,6 +83,18 @@ func EnableExtensions(config *config.Config, log log.Logger, rootDir string) {
} else {
log.Info().Msg("Sync registries config not provided, skipping sync")
}
if config.Extensions.Metrics != nil &&
config.Extensions.Metrics.Enable &&
config.Extensions.Metrics.Prometheus != nil {
if config.Extensions.Metrics.Prometheus.Path == "" {
config.Extensions.Metrics.Prometheus.Path = "/metrics"
log.Warn().Msg("Prometheus instrumentation Path not set, changing to '/metrics'.")
}
} else {
log.Info().Msg("Metrics config not provided, skipping Metrics config update")
}
}
// SetupRoutes ...
@ -128,6 +140,11 @@ func SetupRoutes(config *config.Config, router *mux.Router, storeController stor
router.HandleFunc("/sync", postSyncer.Handler).Methods("POST")
}
if config.Extensions.Metrics != nil && config.Extensions.Metrics.Enable {
router.PathPrefix(config.Extensions.Metrics.Prometheus.Path).
Handler(promhttp.Handler())
}
}
// SyncOneImage syncs one image.

View file

@ -11,23 +11,26 @@ import (
"github.com/gorilla/mux"
)
// DownloadTrivyDB ...
// nolint: deadcode,unused
func downloadTrivyDB(dbDir string, log log.Logger, updateInterval time.Duration) error {
return nil
}
// EnableExtensions ...
func EnableExtensions(config *config.Config, log log.Logger, rootDir string) {
log.Warn().Msg("skipping enabling extensions because given zot binary doesn't support any extensions, please build zot full binary for this feature")
log.Warn().Msg("skipping enabling extensions because given zot binary doesn't support " +
"any extensions, please build zot full binary for this feature")
}
// SetupRoutes ...
func SetupRoutes(conf *config.Config, router *mux.Router, storeController storage.StoreController, log log.Logger) {
log.Warn().Msg("skipping setting up extensions routes because given zot binary doesn't support any extensions, please build zot full binary for this feature")
log.Warn().Msg("skipping setting up extensions routes because given zot binary doesn't support " +
"any extensions, please build zot full binary for this feature")
}
// SyncOneImage...
func SyncOneImage(config *config.Config, log log.Logger, repoName, reference string) (bool, error) {
log.Warn().Msg("skipping syncing on demand because given zot binary doesn't support any extensions, please build zot full binary for this feature")
log.Warn().Msg("skipping syncing on demand because given zot binary doesn't support " +
"any extensions, please build zot full binary for this feature")
return false, nil
}

View file

@ -0,0 +1,35 @@
package monitoring
import (
"math"
"os"
"path/filepath"
)
type MetricServer interface {
SendMetric(interface{})
// works like SendMetric, but adds the metric regardless of the value of 'enabled' field for MetricServer
ForceSendMetric(interface{})
ReceiveMetrics() interface{}
IsEnabled() bool
}
func GetDefaultBuckets() []float64 {
return []float64{.05, .5, 1, 5, 30, 60, 600, math.MaxFloat64}
}
func getDirSize(path string) (int64, error) {
var size int64
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
size += info.Size()
}
return err
})
return size, err
}

View file

@ -0,0 +1,162 @@
// +build extended
package monitoring
import (
"path"
"regexp"
"time"
"github.com/anuvu/zot/pkg/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
const metricsNamespace = "zot"
var (
httpConnRequests = promauto.NewCounterVec( // nolint: gochecknoglobals
prometheus.CounterOpts{
Namespace: metricsNamespace,
Name: "http_requests_total",
Help: "Total number of http request in zot",
},
[]string{"method", "code"},
)
httpRepoLatency = promauto.NewSummaryVec( // nolint: gochecknoglobals
prometheus.SummaryOpts{
Namespace: metricsNamespace,
Name: "http_repo_latency_seconds",
Help: "Latency of serving HTTP requests",
},
[]string{"repo"},
)
httpMethodLatency = promauto.NewHistogramVec( // nolint: gochecknoglobals
prometheus.HistogramOpts{
Namespace: metricsNamespace,
Name: "http_method_latency_seconds",
Help: "Latency of serving HTTP requests",
Buckets: GetDefaultBuckets(),
},
[]string{"method"},
)
repoStorageBytes = promauto.NewGaugeVec( // nolint: gochecknoglobals
prometheus.GaugeOpts{
Namespace: metricsNamespace,
Name: "repo_storage_bytes",
Help: "Storage used per zot repo",
},
[]string{"repo"},
)
uploadCounter = promauto.NewCounterVec( // nolint: gochecknoglobals
prometheus.CounterOpts{
Namespace: metricsNamespace,
Name: "repo_uploads_total",
Help: "Total number times an image was uploaded",
},
[]string{"repo"},
)
downloadCounter = promauto.NewCounterVec( // nolint: gochecknoglobals
prometheus.CounterOpts{
Namespace: metricsNamespace,
Name: "repo_downloads_total",
Help: "Total number times an image was downloaded",
},
[]string{"repo"},
)
serverInfo = promauto.NewGaugeVec( // nolint: gochecknoglobals
prometheus.GaugeOpts{
Namespace: metricsNamespace,
Name: "info",
Help: "Server general information",
},
[]string{"commit", "binaryType", "goVersion", "version"},
)
)
type metricServer struct {
enabled bool
log log.Logger
}
func NewMetricsServer(enabled bool, log log.Logger) MetricServer {
return &metricServer{
enabled: enabled,
log: log,
}
}
// implementing the MetricServer interface.
func (ms *metricServer) SendMetric(mfunc interface{}) {
if ms.enabled {
fn := mfunc.(func())
fn()
}
}
func (ms *metricServer) ForceSendMetric(mfunc interface{}) {
fn := mfunc.(func())
fn()
}
func (ms *metricServer) ReceiveMetrics() interface{} {
return nil
}
func (ms *metricServer) IsEnabled() bool {
return ms.enabled
}
func IncHTTPConnRequests(ms MetricServer, lvalues ...string) {
ms.SendMetric(func() {
httpConnRequests.WithLabelValues(lvalues...).Inc()
})
}
func ObserveHTTPRepoLatency(ms MetricServer, path string, latency time.Duration) {
ms.SendMetric(func() {
re := regexp.MustCompile(`\/v2\/(.*?)\/(blobs|tags|manifests)\/(.*)$`)
match := re.FindStringSubmatch(path)
if len(match) > 1 {
httpRepoLatency.WithLabelValues(match[1]).Observe(latency.Seconds())
} else {
httpRepoLatency.WithLabelValues("N/A").Observe(latency.Seconds())
}
})
}
func ObserveHTTPMethodLatency(ms MetricServer, method string, latency time.Duration) {
ms.SendMetric(func() {
httpMethodLatency.WithLabelValues(method).Observe(latency.Seconds())
})
}
func IncDownloadCounter(ms MetricServer, repo string) {
ms.SendMetric(func() {
downloadCounter.WithLabelValues(repo).Inc()
})
}
func SetStorageUsage(ms MetricServer, rootDir string, repo string) {
ms.SendMetric(func() {
dir := path.Join(rootDir, repo)
repoSize, err := getDirSize(dir)
if err == nil {
repoStorageBytes.WithLabelValues(repo).Set(float64(repoSize))
}
})
}
func IncUploadCounter(ms MetricServer, repo string) {
ms.SendMetric(func() {
uploadCounter.WithLabelValues(repo).Inc()
})
}
func SetServerInfo(ms MetricServer, lvalues ...string) {
ms.ForceSendMetric(func() {
serverInfo.WithLabelValues(lvalues...).Set(0)
})
}

View file

@ -0,0 +1,495 @@
// +build minimal
package monitoring
import (
"fmt"
"math"
"path"
"regexp"
"strconv"
"time"
"github.com/anuvu/zot/pkg/log"
)
const (
metricsNamespace = "zot"
// Counters
httpConnRequests = metricsNamespace + ".http.requests"
repoDownloads = metricsNamespace + ".repo.downloads"
repoUploads = metricsNamespace + ".repo.uploads"
//Gauge
repoStorageBytes = metricsNamespace + ".repo.storage.bytes"
serverInfo = metricsNamespace + ".info"
//Summary
httpRepoLatencySeconds = metricsNamespace + ".http.repo.latency.seconds"
//Histogram
httpMethodLatencySeconds = metricsNamespace + ".http.method.latency.seconds"
metricsScrapeTimeout = 2 * time.Minute
metricsScrapeCheckInterval = 30 * time.Second
)
type metricServer struct {
enabled bool
lastCheck time.Time
reqChan chan interface{}
cache *MetricsInfo
cacheChan chan *MetricsInfo
bucketsF2S map[float64]string // float64 to string conversion of buckets label
log log.Logger
}
type MetricsInfo struct {
Counters []*CounterValue
Gauges []*GaugeValue
Summaries []*SummaryValue
Histograms []*HistogramValue
}
// CounterValue stores info about a metric that is incremented over time,
// such as the number of requests to an HTTP endpoint.
type CounterValue struct {
Name string
Count int
LabelNames []string
LabelValues []string
}
// GaugeValue stores one value that is updated as time goes on, such as
// the amount of memory allocated.
type GaugeValue struct {
Name string
Value float64
LabelNames []string
LabelValues []string
}
// SummaryValue stores info about a metric that is incremented over time,
// such as the number of requests to an HTTP endpoint.
type SummaryValue struct {
Name string
Count int
Sum float64
LabelNames []string
LabelValues []string
}
type HistogramValue struct {
Name string
Count int
Sum float64
Buckets map[string]int
LabelNames []string
LabelValues []string
}
// implements the MetricServer interface.
func (ms *metricServer) SendMetric(metric interface{}) {
if ms.enabled {
ms.reqChan <- metric
}
}
func (ms *metricServer) ForceSendMetric(metric interface{}) {
ms.reqChan <- metric
}
func (ms *metricServer) ReceiveMetrics() interface{} {
if !ms.enabled {
ms.enabled = true
}
ms.cacheChan <- &MetricsInfo{}
return <-ms.cacheChan
}
func (ms *metricServer) IsEnabled() (b bool) {
// send a bool value on the request channel to avoid data race
ms.reqChan <- b
return (<-ms.reqChan).(bool)
}
func (ms *metricServer) Run() {
sendAfter := make(chan time.Duration, 1)
// periodically send a notification to the metric server to check if we can disable metrics
go func() {
for {
t := metricsScrapeCheckInterval
time.Sleep(t)
sendAfter <- t
}
}()
for {
select {
case <-ms.cacheChan:
ms.lastCheck = time.Now()
ms.cacheChan <- ms.cache
case m := <-ms.reqChan:
switch v := m.(type) {
case CounterValue:
cv := m.(CounterValue)
ms.CounterInc(&cv)
case GaugeValue:
gv := m.(GaugeValue)
ms.GaugeSet(&gv)
case SummaryValue:
sv := m.(SummaryValue)
ms.SummaryObserve(&sv)
case HistogramValue:
hv := m.(HistogramValue)
ms.HistogramObserve(&hv)
case bool:
ms.reqChan <- ms.enabled
default:
ms.log.Error().Msgf("unexpected type %T", v)
}
case <-sendAfter:
// Check if we didn't receive a metrics scrape in a while and if so,
// disable metrics (possible node exporter down/crashed)
if ms.enabled {
lastCheckInterval := time.Since(ms.lastCheck)
if lastCheckInterval > metricsScrapeTimeout {
ms.enabled = false
}
}
}
}
}
func NewMetricsServer(enabled bool, log log.Logger) MetricServer {
mi := &MetricsInfo{
Counters: make([]*CounterValue, 0),
Gauges: make([]*GaugeValue, 0),
Summaries: make([]*SummaryValue, 0),
Histograms: make([]*HistogramValue, 0),
}
// convert to a map for returning easily the string corresponding to a bucket
bucketsFloat2String := map[float64]string{}
for _, fvalue := range GetDefaultBuckets() {
if fvalue == math.MaxFloat64 {
bucketsFloat2String[fvalue] = "+Inf"
} else {
s := strconv.FormatFloat(fvalue, 'f', -1, 64)
bucketsFloat2String[fvalue] = s
}
}
ms := &metricServer{
enabled: enabled,
reqChan: make(chan interface{}),
cacheChan: make(chan *MetricsInfo),
cache: mi,
bucketsF2S: bucketsFloat2String,
log: log,
}
go ms.Run()
return ms
}
// contains a map with key=CounterName and value=CounterLabels.
func GetCounters() map[string][]string {
return map[string][]string{
httpConnRequests: {"method", "code"},
repoDownloads: {"repo"},
repoUploads: {"repo"},
}
}
func GetGauges() map[string][]string {
return map[string][]string{
repoStorageBytes: {"repo"},
serverInfo: {"commit", "binaryType", "goVersion", "version"},
}
}
func GetSummaries() map[string][]string {
return map[string][]string{
httpRepoLatencySeconds: {"repo"},
}
}
func GetHistograms() map[string][]string {
return map[string][]string{
httpMethodLatencySeconds: {"method"},
}
}
// return true if a metric does not have any labels or if the label
// values for searched metric corresponds to the one in the cached slice.
func isMetricMatch(lValues []string, metricValues []string) bool {
if len(lValues) == len(metricValues) {
for i, v := range metricValues {
if v != lValues[i] {
return false
}
}
}
return true
}
// returns {-1, false} in case metric was not found in the slice.
func findCounterValueIndex(metricSlice []*CounterValue, name string, labelValues []string) (int, bool) {
for i, m := range metricSlice {
if m.Name == name {
if isMetricMatch(labelValues, m.LabelValues) {
return i, true
}
}
}
return -1, false
}
// returns {-1, false} in case metric was not found in the slice.
func findGaugeValueIndex(metricSlice []*GaugeValue, name string, labelValues []string) (int, bool) {
for i, m := range metricSlice {
if m.Name == name {
if isMetricMatch(labelValues, m.LabelValues) {
return i, true
}
}
}
return -1, false
}
// returns {-1, false} in case metric was not found in the slice.
func findSummaryValueIndex(metricSlice []*SummaryValue, name string, labelValues []string) (int, bool) {
for i, m := range metricSlice {
if m.Name == name {
if isMetricMatch(labelValues, m.LabelValues) {
return i, true
}
}
}
return -1, false
}
// returns {-1, false} in case metric was not found in the slice.
func findHistogramValueIndex(metricSlice []*HistogramValue, name string, labelValues []string) (int, bool) {
for i, m := range metricSlice {
if m.Name == name {
if isMetricMatch(labelValues, m.LabelValues) {
return i, true
}
}
}
return -1, false
}
func (ms *metricServer) CounterInc(cv *CounterValue) {
kLabels, ok := GetCounters()[cv.Name] // known label names for the 'name' counter
err := sanityChecks(cv.Name, kLabels, ok, cv.LabelNames, cv.LabelValues)
if err != nil {
// The last thing we want is to panic/stop the server due to instrumentation
// thus log a message (should be detected during development of new metrics)
ms.log.Error().Err(err).Msg("Instrumentation error")
return
}
index, ok := findCounterValueIndex(ms.cache.Counters, cv.Name, cv.LabelValues)
if !ok {
// cv not found in cache: add it
cv.Count = 1
ms.cache.Counters = append(ms.cache.Counters, cv)
} else {
ms.cache.Counters[index].Count++
}
}
func (ms *metricServer) GaugeSet(gv *GaugeValue) {
kLabels, ok := GetGauges()[gv.Name] // known label names for the 'name' counter
err := sanityChecks(gv.Name, kLabels, ok, gv.LabelNames, gv.LabelValues)
if err != nil {
ms.log.Error().Err(err).Msg("Instrumentation error")
return
}
index, ok := findGaugeValueIndex(ms.cache.Gauges, gv.Name, gv.LabelValues)
if !ok {
// gv not found in cache: add it
ms.cache.Gauges = append(ms.cache.Gauges, gv)
} else {
ms.cache.Gauges[index].Value = gv.Value
}
}
func (ms *metricServer) SummaryObserve(sv *SummaryValue) {
kLabels, ok := GetSummaries()[sv.Name] // known label names for the 'name' summary
err := sanityChecks(sv.Name, kLabels, ok, sv.LabelNames, sv.LabelValues)
if err != nil {
ms.log.Error().Err(err).Msg("Instrumentation error")
return
}
index, ok := findSummaryValueIndex(ms.cache.Summaries, sv.Name, sv.LabelValues)
if !ok {
// The SampledValue not found: add it
sv.Count = 1 // First value, no need to increment
ms.cache.Summaries = append(ms.cache.Summaries, sv)
} else {
ms.cache.Summaries[index].Count++
ms.cache.Summaries[index].Sum += sv.Sum
}
}
func (ms *metricServer) HistogramObserve(hv *HistogramValue) {
kLabels, ok := GetHistograms()[hv.Name] // known label names for the 'name' counter
err := sanityChecks(hv.Name, kLabels, ok, hv.LabelNames, hv.LabelValues)
if err != nil {
ms.log.Error().Err(err).Msg("Instrumentation error")
return
}
index, ok := findHistogramValueIndex(ms.cache.Histograms, hv.Name, hv.LabelValues)
if !ok {
// The HistogramValue not found: add it
buckets := make(map[string]int)
for _, fvalue := range GetDefaultBuckets() {
if hv.Sum <= fvalue {
buckets[ms.bucketsF2S[fvalue]] = 1
} else {
buckets[ms.bucketsF2S[fvalue]] = 0
}
}
hv.Count = 1 // First value, no need to increment
hv.Buckets = buckets
ms.cache.Histograms = append(ms.cache.Histograms, hv)
} else {
cachedH := ms.cache.Histograms[index]
cachedH.Count++
cachedH.Sum += hv.Sum
for _, fvalue := range GetDefaultBuckets() {
if hv.Sum <= fvalue {
cachedH.Buckets[ms.bucketsF2S[fvalue]]++
}
}
}
}
// nolint: goerr113
func sanityChecks(name string, knownLabels []string, found bool, labelNames []string, labelValues []string) error {
if !found {
return fmt.Errorf("metric %s: not found", name)
}
if len(labelNames) != len(labelValues) ||
len(labelNames) != len(knownLabels) {
return fmt.Errorf("metric %s: label size mismatch", name)
}
// The list of label names defined in init() for the counter must match what was provided in labelNames
for i, label := range labelNames {
if label != knownLabels[i] {
return fmt.Errorf("metric %s: label size mismatch", name)
}
}
return nil
}
func IncHTTPConnRequests(ms MetricServer, lvs ...string) {
req := CounterValue{
Name: httpConnRequests,
LabelNames: []string{"method", "code"},
LabelValues: lvs,
}
ms.SendMetric(req)
}
func ObserveHTTPRepoLatency(ms MetricServer, path string, latency time.Duration) {
if ms.(*metricServer).enabled {
var lvs []string
re := regexp.MustCompile(`\/v2\/(.*?)\/(blobs|tags|manifests)\/(.*)$`)
match := re.FindStringSubmatch(path)
if len(match) > 1 {
lvs = []string{match[1]}
} else {
lvs = []string{"N/A"}
}
sv := SummaryValue{
Name: httpRepoLatencySeconds,
Sum: latency.Seconds(),
LabelNames: []string{"repo"},
LabelValues: lvs,
}
ms.SendMetric(sv)
}
}
func ObserveHTTPMethodLatency(ms MetricServer, method string, latency time.Duration) {
h := HistogramValue{
Name: httpMethodLatencySeconds,
Sum: latency.Seconds(), // convenient temporary store for Histogram latency value
LabelNames: []string{"method"},
LabelValues: []string{method},
}
ms.SendMetric(h)
}
func IncDownloadCounter(ms MetricServer, repo string) {
dCounter := CounterValue{
Name: repoDownloads,
LabelNames: []string{"repo"},
LabelValues: []string{repo},
}
ms.SendMetric(dCounter)
}
func IncUploadCounter(ms MetricServer, repo string) {
uCounter := CounterValue{
Name: repoUploads,
LabelNames: []string{"repo"},
LabelValues: []string{repo},
}
ms.SendMetric(uCounter)
}
func SetStorageUsage(ms MetricServer, rootDir string, repo string) {
dir := path.Join(rootDir, repo)
repoSize, err := getDirSize(dir)
if err != nil {
ms.(*metricServer).log.Error().Err(err).Msg("failed to set storage usage")
}
storage := GaugeValue{
Name: repoStorageBytes,
Value: float64(repoSize),
LabelNames: []string{"repo"},
LabelValues: []string{repo},
}
ms.ForceSendMetric(storage)
}
func SetServerInfo(ms MetricServer, lvs ...string) {
info := GaugeValue{
Name: serverInfo,
Value: 0,
LabelNames: []string{"commit", "binaryType", "goVersion", "version"},
LabelValues: lvs,
}
// This metric is set once at zot startup (set it regardless of metrics enabled)
ms.ForceSendMetric(info)
}
func GetMaxIdleScrapeInterval() time.Duration {
return metricsScrapeTimeout + metricsScrapeCheckInterval
}

View file

@ -0,0 +1,86 @@
// +build minimal
package monitoring
import (
"crypto/tls"
"encoding/json"
"net/http"
"time"
"github.com/anuvu/zot/pkg/log"
)
const (
httpTimeout = 1 * time.Minute
)
// MetricsConfig is used to configure the creation of a Node Exporter http client
// that will connect to a particular zot instance.
type MetricsConfig struct {
// Address of the zot http server
Address string
// Transport to use for the http client.
Transport *http.Transport
// HTTPClient is the client to use.
HTTPClient *http.Client
}
type MetricsClient struct {
headers http.Header
config MetricsConfig
log log.Logger
}
func newHTTPMetricsClient() *http.Client {
defaultTransport := http.DefaultTransport.(*http.Transport).Clone()
defaultTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint: gosec
return &http.Client{
Timeout: httpTimeout,
Transport: defaultTransport,
}
}
// Creates a MetricsClient that can be used to retrieve in memory metrics
// The new MetricsClient retrieved must be cached and reused by the Node Exporter
// in order to prevent concurrent memory leaks.
func NewMetricsClient(config *MetricsConfig, logger log.Logger) *MetricsClient {
if config.HTTPClient == nil {
config.HTTPClient = newHTTPMetricsClient()
}
return &MetricsClient{config: *config, headers: make(http.Header), log: logger}
}
func (mc *MetricsClient) GetMetrics() (*MetricsInfo, error) {
metrics := &MetricsInfo{}
if _, err := mc.makeGETRequest(mc.config.Address+"/v2/metrics", metrics); err != nil {
return nil, err
}
return metrics, nil
}
func (mc *MetricsClient) makeGETRequest(url string, resultsPtr interface{}) (http.Header, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
resp, err := mc.config.HTTPClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := json.NewDecoder(resp.Body).Decode(resultsPtr); err != nil {
return nil, err
}
return resp.Header, nil
}

View file

@ -1,3 +1,5 @@
// +build extended
package common_test
import (
@ -13,6 +15,7 @@ import (
"github.com/anuvu/zot/pkg/api"
"github.com/anuvu/zot/pkg/api/config"
extconf "github.com/anuvu/zot/pkg/extensions/config"
"github.com/anuvu/zot/pkg/extensions/monitoring"
"github.com/anuvu/zot/pkg/extensions/search/common"
"github.com/anuvu/zot/pkg/log"
"github.com/anuvu/zot/pkg/storage"
@ -159,7 +162,8 @@ func TestImageFormat(t *testing.T) {
log := log.NewLogger("debug", "")
dbDir := "../../../../test/data"
defaultStore := storage.NewImageStore(dbDir, false, false, log)
metrics := monitoring.NewMetricsServer(false, log)
defaultStore := storage.NewImageStore(dbDir, false, false, log, metrics)
storeController := storage.StoreController{DefaultStore: defaultStore}
olu := common.NewOciLayoutUtils(storeController, log)
@ -444,9 +448,10 @@ func TestUtilsMethod(t *testing.T) {
}
defer os.RemoveAll(subRootDir)
defaultStore := storage.NewImageStore(rootDir, false, false, log)
metrics := monitoring.NewMetricsServer(false, log)
defaultStore := storage.NewImageStore(rootDir, false, false, log, metrics)
subStore := storage.NewImageStore(subRootDir, false, false, log)
subStore := storage.NewImageStore(subRootDir, false, false, log, metrics)
subStoreMap := make(map[string]storage.ImageStore)

View file

@ -1,3 +1,5 @@
// +build extended
// nolint: lll
package cveinfo_test
@ -15,6 +17,7 @@ import (
"github.com/anuvu/zot/pkg/api"
"github.com/anuvu/zot/pkg/api/config"
extconf "github.com/anuvu/zot/pkg/extensions/config"
"github.com/anuvu/zot/pkg/extensions/monitoring"
"github.com/anuvu/zot/pkg/extensions/search/common"
cveinfo "github.com/anuvu/zot/pkg/extensions/search/cve"
"github.com/anuvu/zot/pkg/log"
@ -94,8 +97,9 @@ func testSetup() error {
}
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
storeController := storage.StoreController{DefaultStore: storage.NewImageStore(dir, false, false, log)}
storeController := storage.StoreController{DefaultStore: storage.NewImageStore(dir, false, false, log, metrics)}
layoutUtils := common.NewOciLayoutUtils(storeController, log)
@ -410,13 +414,14 @@ func TestMultipleStoragePath(t *testing.T) {
defer os.RemoveAll(thirdRootDir)
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
// Create ImageStore
firstStore := storage.NewImageStore(firstRootDir, false, false, log)
firstStore := storage.NewImageStore(firstRootDir, false, false, log, metrics)
secondStore := storage.NewImageStore(secondRootDir, false, false, log)
secondStore := storage.NewImageStore(secondRootDir, false, false, log, metrics)
thirdStore := storage.NewImageStore(thirdRootDir, false, false, log)
thirdStore := storage.NewImageStore(thirdRootDir, false, false, log, metrics)
storeController := storage.StoreController{}
@ -675,7 +680,8 @@ func TestCVESearch(t *testing.T) {
func TestCVEConfig(t *testing.T) {
Convey("Verify CVE config", t, func() {
conf := config.New()
port := conf.HTTP.Port
port := getFreePort()
conf.HTTP.Port = port
baseURL := getBaseURL(port)
htpasswdPath := makeHtpasswdFile()
defer os.Remove(htpasswdPath)

View file

@ -1,3 +1,5 @@
// +build extended
// nolint: gochecknoinits
package digestinfo_test
@ -14,6 +16,7 @@ import (
"github.com/anuvu/zot/pkg/api"
"github.com/anuvu/zot/pkg/api/config"
extconf "github.com/anuvu/zot/pkg/extensions/config"
"github.com/anuvu/zot/pkg/extensions/monitoring"
digestinfo "github.com/anuvu/zot/pkg/extensions/search/digest"
"github.com/anuvu/zot/pkg/log"
"github.com/anuvu/zot/pkg/storage"
@ -97,8 +100,8 @@ func testSetup() error {
}
log := log.NewLogger("debug", "")
storeController := storage.StoreController{DefaultStore: storage.NewImageStore(rootDir, false, false, log)}
metrics := monitoring.NewMetricsServer(false, log)
storeController := storage.StoreController{DefaultStore: storage.NewImageStore(rootDir, false, false, log, metrics)}
digestInfo = digestinfo.NewDigestInfo(storeController, log)

View file

@ -1,3 +1,5 @@
// +build extended
package sync_test
import (

View file

@ -1,15 +1,12 @@
package log
import (
"encoding/base64"
"net/http"
"os"
"runtime"
"strconv"
"strings"
"time"
"github.com/gorilla/mux"
"github.com/rs/zerolog"
)
@ -69,143 +66,6 @@ func NewAuditLogger(level string, audit string) *Logger {
return &Logger{Logger: auditLog.With().Timestamp().Logger()}
}
type statusWriter struct {
http.ResponseWriter
status int
length int
}
func (w *statusWriter) WriteHeader(status int) {
w.status = status
w.ResponseWriter.WriteHeader(status)
}
func (w *statusWriter) Write(b []byte) (int, error) {
if w.status == 0 {
w.status = 200
}
n, err := w.ResponseWriter.Write(b)
w.length += n
return n, err
}
// SessionLogger logs session details.
func SessionLogger(log Logger) mux.MiddlewareFunc {
l := log.With().Str("module", "http").Logger()
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Start timer
start := time.Now()
path := r.URL.Path
raw := r.URL.RawQuery
sw := statusWriter{ResponseWriter: w}
// Process request
next.ServeHTTP(&sw, r)
// Stop timer
end := time.Now()
latency := end.Sub(start)
if latency > time.Minute {
// Truncate in a golang < 1.8 safe way
latency -= latency % time.Second
}
clientIP := r.RemoteAddr
method := r.Method
headers := map[string][]string{}
username := ""
log := l.Info()
for key, value := range r.Header {
if key == "Authorization" { // anonymize from logs
s := strings.SplitN(value[0], " ", 2)
if len(s) == 2 && strings.EqualFold(s[0], "basic") {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
pair := strings.SplitN(string(b), ":", 2)
// nolint:gomnd
if len(pair) == 2 {
username = pair[0]
log = log.Str("username", username)
}
}
}
value = []string{"******"}
}
headers[key] = value
}
statusCode := sw.status
bodySize := sw.length
if raw != "" {
path = path + "?" + raw
}
log.Str("clientIP", clientIP).
Str("method", method).
Str("path", path).
Int("statusCode", statusCode).
Str("latency", latency.String()).
Int("bodySize", bodySize).
Interface("headers", headers).
Msg("HTTP API")
})
}
}
func SessionAuditLogger(audit *Logger) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
raw := r.URL.RawQuery
sw := statusWriter{ResponseWriter: w}
// Process request
next.ServeHTTP(&sw, r)
clientIP := r.RemoteAddr
method := r.Method
username := ""
for key, value := range r.Header {
if key == "Authorization" { // anonymize from logs
s := strings.SplitN(value[0], " ", 2)
if len(s) == 2 && strings.EqualFold(s[0], "basic") {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
pair := strings.SplitN(string(b), ":", 2)
// nolint:gomnd
if len(pair) == 2 {
username = pair[0]
}
}
}
}
}
statusCode := sw.status
if raw != "" {
path = path + "?" + raw
}
if (method == http.MethodPost || method == http.MethodPut ||
method == http.MethodPatch || method == http.MethodDelete) &&
(statusCode == http.StatusOK || statusCode == http.StatusCreated || statusCode == http.StatusAccepted) {
audit.Info().
Str("clientIP", clientIP).
Str("subject", username).
Str("action", method).
Str("object", path).
Int("status", statusCode).
Msg("HTTP API Audit")
}
})
}
}
// goroutineID adds goroutine-id to logs to help debug concurrency issues.
func goroutineID() int {
var buf [64]byte

View file

@ -16,6 +16,7 @@ import (
"time"
"github.com/anuvu/zot/errors"
"github.com/anuvu/zot/pkg/extensions/monitoring"
zlog "github.com/anuvu/zot/pkg/log"
apexlog "github.com/apex/log"
guuid "github.com/gofrs/uuid"
@ -53,6 +54,7 @@ type ImageStoreFS struct {
gc bool
dedupe bool
log zerolog.Logger
metrics monitoring.MetricServer
}
func (is *ImageStoreFS) RootDir() string {
@ -102,7 +104,7 @@ func (sc StoreController) GetImageStore(name string) ImageStore {
}
// NewImageStore returns a new image store backed by a file storage.
func NewImageStore(rootDir string, gc bool, dedupe bool, log zlog.Logger) ImageStore {
func NewImageStore(rootDir string, gc bool, dedupe bool, log zlog.Logger, m monitoring.MetricServer) ImageStore {
if _, err := os.Stat(rootDir); os.IsNotExist(err) {
if err := os.MkdirAll(rootDir, 0700); err != nil {
log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir")
@ -117,6 +119,7 @@ func NewImageStore(rootDir string, gc bool, dedupe bool, log zlog.Logger) ImageS
gc: gc,
dedupe: dedupe,
log: log.With().Caller().Logger(),
metrics: m,
}
if dedupe {
@ -430,6 +433,8 @@ func (is *ImageStoreFS) GetImageManifest(repo string, reference string) ([]byte,
return nil, "", "", err
}
monitoring.IncDownloadCounter(is.metrics, repo)
return buf, digest.String(), mediaType, nil
}
@ -592,6 +597,9 @@ func (is *ImageStoreFS) PutImageManifest(repo string, reference string, mediaTyp
}
}
monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
monitoring.IncUploadCounter(is.metrics, repo)
return desc.Digest.String(), nil
}
@ -703,6 +711,8 @@ func (is *ImageStoreFS) DeleteImageManifest(repo string, reference string) error
_ = os.Remove(p)
}
monitoring.SetStorageUsage(is.metrics, is.rootDir, repo)
return nil
}

View file

@ -15,6 +15,7 @@ import (
"time"
"github.com/anuvu/zot/errors"
"github.com/anuvu/zot/pkg/extensions/monitoring"
"github.com/anuvu/zot/pkg/log"
"github.com/anuvu/zot/pkg/storage"
godigest "github.com/opencontainers/go-digest"
@ -31,7 +32,9 @@ func TestAPIs(t *testing.T) {
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
Convey("Repo layout", t, func(c C) {
repoName := "test"
@ -694,7 +697,9 @@ func TestDedupe(t *testing.T) {
}
defer os.RemoveAll(dir)
is := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
is := storage.NewImageStore(dir, true, true, log, metrics)
So(is.DedupeBlob("", "", ""), ShouldNotBeNil)
})
@ -702,6 +707,9 @@ func TestDedupe(t *testing.T) {
}
func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
Convey("Invalid root dir", t, func(c C) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
@ -709,9 +717,10 @@ func TestNegativeCases(t *testing.T) {
}
os.RemoveAll(dir)
So(storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldNotBeNil)
So(storage.NewImageStore(dir, true, true, log, metrics), ShouldNotBeNil)
if os.Geteuid() != 0 {
So(storage.NewImageStore("/deadBEEF", true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldBeNil)
So(storage.NewImageStore("/deadBEEF", true, true, log, metrics), ShouldBeNil)
}
})
@ -721,7 +730,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
il := storage.NewImageStore(dir, true, true, log, metrics)
err = os.Chmod(dir, 0000) // remove all perms
So(err, ShouldBeNil)
if os.Geteuid() != 0 {
@ -751,7 +760,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
il := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
@ -829,7 +838,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
il := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil)
@ -852,7 +861,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
il := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(os.Chmod(path.Join(dir, "test", "index.json"), 0000), ShouldBeNil)
@ -875,7 +884,7 @@ func TestNegativeCases(t *testing.T) {
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
il := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
@ -915,7 +924,7 @@ func TestNegativeCases(t *testing.T) {
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
il := storage.NewImageStore(dir, true, true, log, metrics)
v, err := il.NewBlobUpload("dedupe1")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
@ -1065,13 +1074,14 @@ func TestStorageHandler(t *testing.T) {
defer os.RemoveAll(thirdRootDir)
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
// Create ImageStore
firstStore := storage.NewImageStore(firstRootDir, false, false, log)
firstStore := storage.NewImageStore(firstRootDir, false, false, log, metrics)
secondStore := storage.NewImageStore(secondRootDir, false, false, log)
secondStore := storage.NewImageStore(secondRootDir, false, false, log, metrics)
thirdStore := storage.NewImageStore(thirdRootDir, false, false, log)
thirdStore := storage.NewImageStore(thirdRootDir, false, false, log, metrics)
storeController := storage.StoreController{}