0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-30 22:34:13 -05:00

Added clustering github workflow

Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
Petu Eusebiu 2022-03-02 20:10:06 +02:00 committed by Ramkumar Chinchani
parent b8010e1ee4
commit fa27e22404
4 changed files with 244 additions and 0 deletions

162
.github/workflows/cluster.yaml vendored Normal file
View file

@ -0,0 +1,162 @@
name: "Clustering test"
on:
push:
branches:
- main
pull_request:
branches: [main]
release:
types:
- published
jobs:
client-tools:
name: Stateless zot with shared reliable storage
runs-on: ubuntu-latest
# services:
# minio:
# image: minio/minio:edge-cicd
# env:
# MINIO_ROOT_USER: minioadmin
# MINIO_ROOT_PASSWORD: minioadmin
# ports:
# - 9000:9000
# volumes:
# - /tmp/data:/data
# options: --name=minio --health-cmd "curl http://localhost:9000/minio/health/live"
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: 1.17.x
- name: Install dependencies
run: |
cd $GITHUB_WORKSPACE
go get -u github.com/swaggo/swag/cmd/swag
go mod download
sudo apt-get update
sudo apt-get -y install rpm uidmap
# install skopeo
. /etc/os-release
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
sudo apt-get update
sudo apt-get -y upgrade
sudo apt-get -y install skopeo
# install notation
curl -Lo notation.tar.gz https://github.com/notaryproject/notation/releases/download/v0.7.1-alpha.1/notation_0.7.1-alpha.1_linux_amd64.tar.gz
sudo tar xvzf notation.tar.gz -C /usr/bin notation
# install oras
curl -LO https://github.com/oras-project/oras/releases/download/v0.12.0/oras_0.12.0_linux_amd64.tar.gz
mkdir -p oras-install/
tar -zxf oras_0.12.0_*.tar.gz -C oras-install/
sudo mv oras-install/oras /usr/bin/
rm -rf oras_0.12.0_*.tar.gz oras-install/
# install haproxy
sudo apt-get install haproxy
- name: Setup minio service
run: |
docker run -d -p 9000:9000 --name minio \
-e "MINIO_ACCESS_KEY=minioadmin" \
-e "MINIO_SECRET_KEY=minioadmin" \
-v /tmp/data:/data \
-v /tmp/config:/root/.minio \
--health-cmd "curl http://localhost:9000/minio/health/live" \
minio/minio:edge-cicd server /data
- name: Install py minio
run: pip3 install minio
- name: Create minio bucket
run: |
python3 - <<'EOF'
from minio import Minio
try:
minio = Minio(
'localhost:9000',
access_key='minioadmin',
secret_key='minioadmin',
secure=False
)
except Exception as ex:
raise
minio.make_bucket('zot-storage')
print(f'{minio.list_buckets()}')
EOF
- name: Run haproxy
run: |
sudo haproxy -d -f examples/cluster/haproxy.cfg -D
sleep 10
- name: Prepare configuration files
run: |
cp test/cluster/config-minio.json test/cluster/config-minio1.json
sed -i 's/8081/8082/g' test/cluster/config-minio.json
cp test/cluster/config-minio.json test/cluster/config-minio2.json
sed -i 's/8082/8083/g' test/cluster/config-minio.json
cp test/cluster/config-minio.json test/cluster/config-minio3.json
- name: Run push-pull tests
run: |
make binary
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
sleep 10
# run tests
skopeo --debug copy --format=oci --dest-tls-verify=false docker://ghcr.io/project-zot/golang:1.17 docker://localhost:8080/golang:1.17
skopeo --debug copy --src-tls-verify=false docker://localhost:8080/golang:1.17 oci:golang:1.17
echo "{\"name\":\"foo\",\"value\":\"bar\"}" > config.json
echo "hello world" > artifact.txt
oras push localhost:8080/hello-artifact:v2 \
--manifest-config config.json:application/vnd.acme.rocket.config.v1+json \
artifact.txt:text/plain -d -v
rm -f artifact.txt # first delete the file
oras pull localhost:8080/hello-artifact:v2 -d -v -a
grep -q "hello world" artifact.txt # should print "hello world"
if [ $? -ne 0 ]; then \
killall -r zot-*; \
exit 1; \
fi
killall -r zot-*
env:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Run benchmark
run: |
make bench
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
sleep 10
# run zb
bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080
env:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
# Download previous benchmark result from cache (if exists)
- name: Download previous benchmark data
uses: actions/cache@v1
with:
path: ./cache
key: ${{ runner.os }}-benchmark-stateless-cluster
# Run `github-action-benchmark` action
- name: Store benchmark result
uses: benchmark-action/github-action-benchmark@v1.11.3
with:
# What benchmark tool the output.txt came from
tool: 'customBiggerIsBetter'
# Where the output from the benchmark tool is stored
output-file-path: ci-cd.json
# Where the previous data file is stored
external-data-json-path: ./cache/benchmark-data.json
# Workflow will fail when an alert happens
fail-on-alert: true
# Upload the updated cache file for the next job by actions/cache

View file

@ -15,6 +15,7 @@ https://zotregistry.io
## What's new? ## What's new?
* Supports container image signatures - [cosign](https://github.com/sigstore/cosign) and [notation](https://github.com/notaryproject/notation) * Supports container image signatures - [cosign](https://github.com/sigstore/cosign) and [notation](https://github.com/notaryproject/notation)
* Multi-arch support * Multi-arch support
* Clustering support
# Features # Features
* Conforms to [OCI distribution spec](https://github.com/opencontainers/distribution-spec) APIs * Conforms to [OCI distribution spec](https://github.com/opencontainers/distribution-spec) APIs
@ -368,6 +369,13 @@ bin/zxp config _config-file_
## Enable Metrics ## Enable Metrics
In the zot with all extensions case see [configuration example](./examples/config-metrics.json) for enabling metrics In the zot with all extensions case see [configuration example](./examples/config-metrics.json) for enabling metrics
## Clustering
zot supports clustering by using multiple stateless zot with shared s3 storage and a haproxy (with sticky session) in front of them.
- haproxy [configuration example](./examples/cluster/haproxy.cfg)
- zot s3 [configuration example](./examples/config-s3.json)
# Contributing # Contributing
We encourage and support an active, healthy community of contributors. We encourage and support an active, healthy community of contributors.

View file

@ -0,0 +1,49 @@
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
maxconn 2000
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
stats timeout 30s
user haproxy
group haproxy
daemon
# Default SSL material locations
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
# See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
errorfile 500 /etc/haproxy/errors/500.http
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
frontend local_zot
bind 127.0.0.1:8080
mode http
default_backend my_zots
backend my_zots
mode http
balance leastconn
stick-table type ip size 1m expire 1h
stick on src
server zot1 127.0.0.1:8081 check
server zot2 127.0.0.1:8081 check
server zot3 127.0.0.1:8082 check

View file

@ -0,0 +1,25 @@
{
"version": "0.1.0-dev",
"storage": {
"rootDirectory": "/zot",
"gc": false,
"dedupe": false,
"storageDriver": {
"name": "s3",
"region": "us-east-2",
"bucket": "zot-storage",
"regionendpoint": "http://localhost:9000",
"secure": false,
"skipverify": false
}
},
"http": {
"address": "127.0.0.1",
"port": "8081",
"ReadOnly": false
},
"log": {
"level": "debug",
"output": "/dev/null"
}
}