mirror of
https://github.com/project-zot/zot.git
synced 2024-12-30 22:34:13 -05:00
8553712613
chore: upgrade trivy to v0.55.2, also update the logic of waiting for zot to start in some jobs Seems like there's an increate in the time zot requires to start before servicing requests. From my GitHub observations it is better check using curl instead of relying on hardcoded 5s or 10s values. The logic in .github/workflows/cluster.yaml seems to be old and out of date. Even on main right now there is only 1 our of 3 zots actualy running. The other 2 are actually erroring: Error: operation timeout: boltdb file is already in use, path '/tmp/zot/cache.db' This is unrelated to this PR, I am seeing the same issue in the olders workflow runs still showing the logs Signed-off-by: Andrei Aaron <aaaron@luxoft.com>
179 lines
6.4 KiB
YAML
179 lines
6.4 KiB
YAML
name: "Clustering test"
|
|
on:
|
|
push:
|
|
branches:
|
|
- main
|
|
pull_request:
|
|
branches: [main]
|
|
release:
|
|
types:
|
|
- published
|
|
|
|
permissions: read-all
|
|
|
|
jobs:
|
|
client-tools:
|
|
name: Stateless zot with shared reliable storage
|
|
runs-on: ubuntu-latest-4-cores
|
|
# services:
|
|
# minio:
|
|
# image: minio/minio:RELEASE.2024-07-16T23-46-41Z
|
|
# env:
|
|
# MINIO_ROOT_USER: minioadmin
|
|
# MINIO_ROOT_PASSWORD: minioadmin
|
|
# ports:
|
|
# - 9000:9000
|
|
# volumes:
|
|
# - /tmp/data:/data
|
|
# options: --name=minio --health-cmd "curl http://localhost:9000/minio/health/live"
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
- uses: actions/setup-go@v5
|
|
with:
|
|
cache: false
|
|
go-version: 1.22.x
|
|
- name: Install dependencies
|
|
run: |
|
|
cd $GITHUB_WORKSPACE
|
|
go install github.com/swaggo/swag/cmd/swag@v1.16.2
|
|
go mod download
|
|
sudo apt-get update
|
|
sudo apt-get -y install rpm uidmap
|
|
# install skopeo
|
|
sudo apt-get -y install skopeo
|
|
|
|
# install haproxy
|
|
sudo apt-get install haproxy
|
|
|
|
- name: Setup minio service
|
|
run: |
|
|
docker run -d -p 9000:9000 --name minio \
|
|
-e "MINIO_ACCESS_KEY=minioadmin" \
|
|
-e "MINIO_SECRET_KEY=minioadmin" \
|
|
-v /tmp/data:/data \
|
|
-v /tmp/config:/root/.minio \
|
|
--health-cmd "curl http://localhost:9000/minio/health/live" \
|
|
minio/minio:RELEASE.2024-07-16T23-46-41Z server /data
|
|
- name: Install py minio
|
|
run: pip3 install minio
|
|
|
|
- name: Wait for minio to come up
|
|
run: |
|
|
curl --connect-timeout 5 \
|
|
--max-time 120 \
|
|
--retry 12 \
|
|
--retry-max-time 120 \
|
|
'http://localhost:9000/minio/health/live'
|
|
|
|
- name: Create minio bucket
|
|
run: |
|
|
python3 - <<'EOF'
|
|
from minio import Minio
|
|
|
|
try:
|
|
minio = Minio(
|
|
'localhost:9000',
|
|
access_key='minioadmin',
|
|
secret_key='minioadmin',
|
|
secure=False
|
|
)
|
|
except Exception as ex:
|
|
raise
|
|
|
|
minio.make_bucket('zot-storage')
|
|
print(f'{minio.list_buckets()}')
|
|
EOF
|
|
|
|
- name: Run haproxy
|
|
run: |
|
|
sudo haproxy -d -f examples/cluster/haproxy.cfg -D
|
|
sleep 10
|
|
|
|
- name: Prepare configuration files
|
|
run: |
|
|
cp test/cluster/config-minio.json test/cluster/config-minio1.json
|
|
sed -i 's/8081/8082/g' test/cluster/config-minio.json
|
|
cp test/cluster/config-minio.json test/cluster/config-minio2.json
|
|
sed -i 's/8082/8083/g' test/cluster/config-minio.json
|
|
cp test/cluster/config-minio.json test/cluster/config-minio3.json
|
|
|
|
- name: Run push-pull tests
|
|
run: |
|
|
make binary
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
|
|
sleep 20
|
|
# run tests
|
|
skopeo --debug copy --format=oci --dest-tls-verify=false docker://ghcr.io/project-zot/golang:1.20 docker://localhost:8080/golang:1.20
|
|
skopeo --debug copy --src-tls-verify=false docker://localhost:8080/golang:1.20 oci:golang:1.20
|
|
echo "{\"name\":\"foo\",\"value\":\"bar\"}" > config.json
|
|
echo "hello world" > artifact.txt
|
|
oras push --plain-http localhost:8080/hello-artifact:v2 \
|
|
--config config.json:application/vnd.acme.rocket.config.v1+json \
|
|
artifact.txt:text/plain -d -v
|
|
rm -f artifact.txt # first delete the file
|
|
oras pull --plain-http localhost:8080/hello-artifact:v2 -d -v
|
|
grep -q "hello world" artifact.txt # should print "hello world"
|
|
if [ $? -ne 0 ]; then \
|
|
killall -r zot-*; \
|
|
exit 1; \
|
|
fi
|
|
|
|
killall -r zot-*
|
|
env:
|
|
AWS_ACCESS_KEY_ID: minioadmin
|
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
|
|
|
- name: Run benchmark with --src-cidr arg
|
|
run: |
|
|
make bench
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
|
|
sleep 20
|
|
# run zb with --src-cidr
|
|
bin/zb-linux-amd64 -c 10 -n 50 -o ci-cd --src-cidr 127.0.0.0/8 http://localhost:8080
|
|
|
|
killall -r zot-*
|
|
|
|
# clean zot storage
|
|
sudo rm -rf /tmp/data/zot-storage/zot
|
|
env:
|
|
AWS_ACCESS_KEY_ID: minioadmin
|
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
|
|
|
- name: Run benchmark with --src-ips arg
|
|
run: |
|
|
make bench
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
|
|
sleep 20
|
|
# run zb with --src-ips
|
|
bin/zb-linux-amd64 -c 10 -n 50 -o ci-cd --src-ips 127.0.0.2,127.0.0.3,127.0.0.4,127.0.0.5,127.0.0.6,127.0.12.5,127.0.12.6 http://localhost:8080
|
|
|
|
killall -r zot-*
|
|
env:
|
|
AWS_ACCESS_KEY_ID: minioadmin
|
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
|
|
|
# Download previous benchmark result from cache (if exists)
|
|
- name: Download previous benchmark data
|
|
uses: actions/cache@v4
|
|
with:
|
|
path: ./cache
|
|
key: ${{ runner.os }}-gen1-benchmark-stateless-cluster
|
|
# Run `github-action-benchmark` action
|
|
- name: Store benchmark result
|
|
uses: benchmark-action/github-action-benchmark@v1.20.3
|
|
with:
|
|
# What benchmark tool the output.txt came from
|
|
tool: 'customBiggerIsBetter'
|
|
# Where the output from the benchmark tool is stored
|
|
output-file-path: ci-cd.json
|
|
# Where the previous data file is stored
|
|
external-data-json-path: ./cache/benchmark-data.json
|
|
# Workflow will fail when an alert happens
|
|
fail-on-alert: true
|
|
# Upload the updated cache file for the next job by actions/cache
|